diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..2b4a5fccdaf --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Code of Conduct + +Please read the [Go Community Code of Conduct](https://golang.org/conduct). diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE index f626ddbf0b8..d3c3a2d262f 100644 --- a/.github/ISSUE_TEMPLATE +++ b/.github/ISSUE_TEMPLATE @@ -1,8 +1,12 @@ Please answer these questions before submitting your issue. Thanks! + ### What version of Go are you using (`go version`)? +### Does this issue reproduce with the latest release? + + ### What operating system and processor architecture are you using (`go env`)? @@ -18,4 +22,3 @@ A link on play.golang.org is best. ### What did you see instead? - diff --git a/.gitignore b/.gitignore index 8b2f36b9f1b..39723909c71 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ _testmain.go /pkg/ /src/*.*/ /src/cmd/cgo/zdefaultcc.go +/src/cmd/dist/dist /src/cmd/go/internal/cfg/zdefaultcc.go /src/cmd/go/internal/cfg/zosarch.go /src/cmd/internal/objabi/zbootstrap.go @@ -43,3 +44,8 @@ _testmain.go /test/pass.out /test/run.out /test/times.out + +# This file includes artifacts of Go build that should not be checked in. +# For files created by specific development environment (e.g. editor), +# use alternative ways to exclude files from git. +# For example, set up .git/info/exclude or use a global .gitignore. diff --git a/api/except.txt b/api/except.txt index fbabd18a810..f8cd0faf646 100644 --- a/api/except.txt +++ b/api/except.txt @@ -1,4 +1,5 @@ pkg encoding/json, method (*RawMessage) MarshalJSON() ([]uint8, error) +pkg math/big, const MaxBase = 36 pkg math/big, type Word uintptr pkg net, func ListenUnixgram(string, *UnixAddr) (*UDPConn, error) pkg os (linux-arm), const O_SYNC = 4096 @@ -343,3 +344,4 @@ pkg syscall (openbsd-386), const SYS_KILL = 37 pkg syscall (openbsd-386-cgo), const SYS_KILL = 37 pkg syscall (openbsd-amd64), const SYS_KILL = 37 pkg syscall (openbsd-amd64-cgo), const SYS_KILL = 37 +pkg unicode, const Version = "9.0.0" diff --git a/api/next.txt b/api/next.txt index e69de29bb2d..1394a0d491a 100644 --- a/api/next.txt +++ b/api/next.txt @@ -0,0 +1,641 @@ +pkg archive/tar, const FormatGNU = 8 +pkg archive/tar, const FormatGNU Format +pkg archive/tar, const FormatPAX = 4 +pkg archive/tar, const FormatPAX Format +pkg archive/tar, const FormatUSTAR = 2 +pkg archive/tar, const FormatUSTAR Format +pkg archive/tar, const FormatUnknown = 0 +pkg archive/tar, const FormatUnknown Format +pkg archive/tar, method (*Header) DetectSparseHoles(*os.File) error +pkg archive/tar, method (*Header) PunchSparseHoles(*os.File) error +pkg archive/tar, method (*Reader) WriteTo(io.Writer) (int64, error) +pkg archive/tar, method (*Writer) ReadFrom(io.Reader) (int64, error) +pkg archive/tar, method (Format) String() string +pkg archive/tar, type Format int +pkg archive/tar, type Header struct, Format Format +pkg archive/tar, type Header struct, PAXRecords map[string]string +pkg archive/tar, type Header struct, SparseHoles []SparseEntry +pkg archive/tar, type SparseEntry struct +pkg archive/tar, type SparseEntry struct, Length int64 +pkg archive/tar, type SparseEntry struct, Offset int64 +pkg archive/zip, type FileHeader struct, Modified time.Time +pkg archive/zip, type FileHeader struct, NonUTF8 bool +pkg archive/zip, type Writer struct, Comment string +pkg bufio, method (*Reader) Size() int +pkg bufio, method (*Writer) Size() int +pkg crypto/tls, const ECDSAWithSHA1 = 515 +pkg crypto/tls, const ECDSAWithSHA1 SignatureScheme +pkg crypto/x509, const CANotAuthorizedForExtKeyUsage = 9 +pkg crypto/x509, const CANotAuthorizedForExtKeyUsage InvalidReason +pkg crypto/x509, const NameConstraintsWithoutSANs = 6 +pkg crypto/x509, const NameConstraintsWithoutSANs InvalidReason +pkg crypto/x509, const TooManyConstraints = 8 +pkg crypto/x509, const TooManyConstraints InvalidReason +pkg crypto/x509, const UnconstrainedName = 7 +pkg crypto/x509, const UnconstrainedName InvalidReason +pkg crypto/x509, func MarshalPKCS8PrivateKey(interface{}) ([]uint8, error) +pkg crypto/x509, method (PublicKeyAlgorithm) String() string +pkg crypto/x509, type Certificate struct, ExcludedEmailAddresses []string +pkg crypto/x509, type Certificate struct, ExcludedIPRanges []*net.IPNet +pkg crypto/x509, type Certificate struct, ExcludedURIDomains []string +pkg crypto/x509, type Certificate struct, PermittedEmailAddresses []string +pkg crypto/x509, type Certificate struct, PermittedIPRanges []*net.IPNet +pkg crypto/x509, type Certificate struct, PermittedURIDomains []string +pkg crypto/x509, type Certificate struct, URIs []*url.URL +pkg crypto/x509, type CertificateInvalidError struct, Detail string +pkg crypto/x509, type CertificateRequest struct, URIs []*url.URL +pkg crypto/x509, type VerifyOptions struct, MaxConstraintComparisions int +pkg crypto/x509/pkix, method (Name) String() string +pkg crypto/x509/pkix, method (RDNSequence) String() string +pkg database/sql, func OpenDB(driver.Connector) *DB +pkg database/sql/driver, type Connector interface { Connect, Driver } +pkg database/sql/driver, type Connector interface, Connect(context.Context) (Conn, error) +pkg database/sql/driver, type Connector interface, Driver() Driver +pkg database/sql/driver, type SessionResetter interface { ResetSession } +pkg database/sql/driver, type SessionResetter interface, ResetSession(context.Context) error +pkg debug/elf, const R_386_16 = 20 +pkg debug/elf, const R_386_16 R_386 +pkg debug/elf, const R_386_32PLT = 11 +pkg debug/elf, const R_386_32PLT R_386 +pkg debug/elf, const R_386_8 = 22 +pkg debug/elf, const R_386_8 R_386 +pkg debug/elf, const R_386_GOT32X = 43 +pkg debug/elf, const R_386_GOT32X R_386 +pkg debug/elf, const R_386_IRELATIVE = 42 +pkg debug/elf, const R_386_IRELATIVE R_386 +pkg debug/elf, const R_386_PC16 = 21 +pkg debug/elf, const R_386_PC16 R_386 +pkg debug/elf, const R_386_PC8 = 23 +pkg debug/elf, const R_386_PC8 R_386 +pkg debug/elf, const R_386_SIZE32 = 38 +pkg debug/elf, const R_386_SIZE32 R_386 +pkg debug/elf, const R_386_TLS_DESC = 41 +pkg debug/elf, const R_386_TLS_DESC R_386 +pkg debug/elf, const R_386_TLS_DESC_CALL = 40 +pkg debug/elf, const R_386_TLS_DESC_CALL R_386 +pkg debug/elf, const R_386_TLS_GOTDESC = 39 +pkg debug/elf, const R_386_TLS_GOTDESC R_386 +pkg debug/elf, const R_AARCH64_LD64_GOTOFF_LO15 = 310 +pkg debug/elf, const R_AARCH64_LD64_GOTOFF_LO15 R_AARCH64 +pkg debug/elf, const R_AARCH64_LD64_GOTPAGE_LO15 = 313 +pkg debug/elf, const R_AARCH64_LD64_GOTPAGE_LO15 R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSGD_ADR_PREL21 = 512 +pkg debug/elf, const R_AARCH64_TLSGD_ADR_PREL21 R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSGD_MOVW_G0_NC = 516 +pkg debug/elf, const R_AARCH64_TLSGD_MOVW_G0_NC R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSGD_MOVW_G1 = 515 +pkg debug/elf, const R_AARCH64_TLSGD_MOVW_G1 R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSLD_ADR_PAGE21 = 518 +pkg debug/elf, const R_AARCH64_TLSLD_ADR_PAGE21 R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSLD_ADR_PREL21 = 517 +pkg debug/elf, const R_AARCH64_TLSLD_ADR_PREL21 R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSLD_LDST128_DTPREL_LO12 = 572 +pkg debug/elf, const R_AARCH64_TLSLD_LDST128_DTPREL_LO12 R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC = 573 +pkg debug/elf, const R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSLE_LDST128_TPREL_LO12 = 570 +pkg debug/elf, const R_AARCH64_TLSLE_LDST128_TPREL_LO12 R_AARCH64 +pkg debug/elf, const R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC = 571 +pkg debug/elf, const R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC R_AARCH64 +pkg debug/elf, const R_ARM_ABS32_NOI = 55 +pkg debug/elf, const R_ARM_ABS32_NOI R_ARM +pkg debug/elf, const R_ARM_ALU_PCREL_15_8 = 33 +pkg debug/elf, const R_ARM_ALU_PCREL_15_8 R_ARM +pkg debug/elf, const R_ARM_ALU_PCREL_23_15 = 34 +pkg debug/elf, const R_ARM_ALU_PCREL_23_15 R_ARM +pkg debug/elf, const R_ARM_ALU_PCREL_7_0 = 32 +pkg debug/elf, const R_ARM_ALU_PCREL_7_0 R_ARM +pkg debug/elf, const R_ARM_ALU_PC_G0 = 58 +pkg debug/elf, const R_ARM_ALU_PC_G0 R_ARM +pkg debug/elf, const R_ARM_ALU_PC_G0_NC = 57 +pkg debug/elf, const R_ARM_ALU_PC_G0_NC R_ARM +pkg debug/elf, const R_ARM_ALU_PC_G1 = 60 +pkg debug/elf, const R_ARM_ALU_PC_G1 R_ARM +pkg debug/elf, const R_ARM_ALU_PC_G1_NC = 59 +pkg debug/elf, const R_ARM_ALU_PC_G1_NC R_ARM +pkg debug/elf, const R_ARM_ALU_PC_G2 = 61 +pkg debug/elf, const R_ARM_ALU_PC_G2 R_ARM +pkg debug/elf, const R_ARM_ALU_SBREL_19_12_NC = 36 +pkg debug/elf, const R_ARM_ALU_SBREL_19_12_NC R_ARM +pkg debug/elf, const R_ARM_ALU_SBREL_27_20_CK = 37 +pkg debug/elf, const R_ARM_ALU_SBREL_27_20_CK R_ARM +pkg debug/elf, const R_ARM_ALU_SB_G0 = 71 +pkg debug/elf, const R_ARM_ALU_SB_G0 R_ARM +pkg debug/elf, const R_ARM_ALU_SB_G0_NC = 70 +pkg debug/elf, const R_ARM_ALU_SB_G0_NC R_ARM +pkg debug/elf, const R_ARM_ALU_SB_G1 = 73 +pkg debug/elf, const R_ARM_ALU_SB_G1 R_ARM +pkg debug/elf, const R_ARM_ALU_SB_G1_NC = 72 +pkg debug/elf, const R_ARM_ALU_SB_G1_NC R_ARM +pkg debug/elf, const R_ARM_ALU_SB_G2 = 74 +pkg debug/elf, const R_ARM_ALU_SB_G2 R_ARM +pkg debug/elf, const R_ARM_BASE_ABS = 31 +pkg debug/elf, const R_ARM_BASE_ABS R_ARM +pkg debug/elf, const R_ARM_CALL = 28 +pkg debug/elf, const R_ARM_CALL R_ARM +pkg debug/elf, const R_ARM_GOTOFF12 = 98 +pkg debug/elf, const R_ARM_GOTOFF12 R_ARM +pkg debug/elf, const R_ARM_GOTRELAX = 99 +pkg debug/elf, const R_ARM_GOTRELAX R_ARM +pkg debug/elf, const R_ARM_GOT_ABS = 95 +pkg debug/elf, const R_ARM_GOT_ABS R_ARM +pkg debug/elf, const R_ARM_GOT_BREL12 = 97 +pkg debug/elf, const R_ARM_GOT_BREL12 R_ARM +pkg debug/elf, const R_ARM_GOT_PREL = 96 +pkg debug/elf, const R_ARM_GOT_PREL R_ARM +pkg debug/elf, const R_ARM_IRELATIVE = 160 +pkg debug/elf, const R_ARM_IRELATIVE R_ARM +pkg debug/elf, const R_ARM_JUMP24 = 29 +pkg debug/elf, const R_ARM_JUMP24 R_ARM +pkg debug/elf, const R_ARM_LDC_PC_G0 = 67 +pkg debug/elf, const R_ARM_LDC_PC_G0 R_ARM +pkg debug/elf, const R_ARM_LDC_PC_G1 = 68 +pkg debug/elf, const R_ARM_LDC_PC_G1 R_ARM +pkg debug/elf, const R_ARM_LDC_PC_G2 = 69 +pkg debug/elf, const R_ARM_LDC_PC_G2 R_ARM +pkg debug/elf, const R_ARM_LDC_SB_G0 = 81 +pkg debug/elf, const R_ARM_LDC_SB_G0 R_ARM +pkg debug/elf, const R_ARM_LDC_SB_G1 = 82 +pkg debug/elf, const R_ARM_LDC_SB_G1 R_ARM +pkg debug/elf, const R_ARM_LDC_SB_G2 = 83 +pkg debug/elf, const R_ARM_LDC_SB_G2 R_ARM +pkg debug/elf, const R_ARM_LDRS_PC_G0 = 64 +pkg debug/elf, const R_ARM_LDRS_PC_G0 R_ARM +pkg debug/elf, const R_ARM_LDRS_PC_G1 = 65 +pkg debug/elf, const R_ARM_LDRS_PC_G1 R_ARM +pkg debug/elf, const R_ARM_LDRS_PC_G2 = 66 +pkg debug/elf, const R_ARM_LDRS_PC_G2 R_ARM +pkg debug/elf, const R_ARM_LDRS_SB_G0 = 78 +pkg debug/elf, const R_ARM_LDRS_SB_G0 R_ARM +pkg debug/elf, const R_ARM_LDRS_SB_G1 = 79 +pkg debug/elf, const R_ARM_LDRS_SB_G1 R_ARM +pkg debug/elf, const R_ARM_LDRS_SB_G2 = 80 +pkg debug/elf, const R_ARM_LDRS_SB_G2 R_ARM +pkg debug/elf, const R_ARM_LDR_PC_G1 = 62 +pkg debug/elf, const R_ARM_LDR_PC_G1 R_ARM +pkg debug/elf, const R_ARM_LDR_PC_G2 = 63 +pkg debug/elf, const R_ARM_LDR_PC_G2 R_ARM +pkg debug/elf, const R_ARM_LDR_SBREL_11_10_NC = 35 +pkg debug/elf, const R_ARM_LDR_SBREL_11_10_NC R_ARM +pkg debug/elf, const R_ARM_LDR_SB_G0 = 75 +pkg debug/elf, const R_ARM_LDR_SB_G0 R_ARM +pkg debug/elf, const R_ARM_LDR_SB_G1 = 76 +pkg debug/elf, const R_ARM_LDR_SB_G1 R_ARM +pkg debug/elf, const R_ARM_LDR_SB_G2 = 77 +pkg debug/elf, const R_ARM_LDR_SB_G2 R_ARM +pkg debug/elf, const R_ARM_ME_TOO = 128 +pkg debug/elf, const R_ARM_ME_TOO R_ARM +pkg debug/elf, const R_ARM_MOVT_ABS = 44 +pkg debug/elf, const R_ARM_MOVT_ABS R_ARM +pkg debug/elf, const R_ARM_MOVT_BREL = 85 +pkg debug/elf, const R_ARM_MOVT_BREL R_ARM +pkg debug/elf, const R_ARM_MOVT_PREL = 46 +pkg debug/elf, const R_ARM_MOVT_PREL R_ARM +pkg debug/elf, const R_ARM_MOVW_ABS_NC = 43 +pkg debug/elf, const R_ARM_MOVW_ABS_NC R_ARM +pkg debug/elf, const R_ARM_MOVW_BREL = 86 +pkg debug/elf, const R_ARM_MOVW_BREL R_ARM +pkg debug/elf, const R_ARM_MOVW_BREL_NC = 84 +pkg debug/elf, const R_ARM_MOVW_BREL_NC R_ARM +pkg debug/elf, const R_ARM_MOVW_PREL_NC = 45 +pkg debug/elf, const R_ARM_MOVW_PREL_NC R_ARM +pkg debug/elf, const R_ARM_PLT32_ABS = 94 +pkg debug/elf, const R_ARM_PLT32_ABS R_ARM +pkg debug/elf, const R_ARM_PREL31 = 42 +pkg debug/elf, const R_ARM_PREL31 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_0 = 112 +pkg debug/elf, const R_ARM_PRIVATE_0 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_1 = 113 +pkg debug/elf, const R_ARM_PRIVATE_1 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_10 = 122 +pkg debug/elf, const R_ARM_PRIVATE_10 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_11 = 123 +pkg debug/elf, const R_ARM_PRIVATE_11 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_12 = 124 +pkg debug/elf, const R_ARM_PRIVATE_12 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_13 = 125 +pkg debug/elf, const R_ARM_PRIVATE_13 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_14 = 126 +pkg debug/elf, const R_ARM_PRIVATE_14 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_15 = 127 +pkg debug/elf, const R_ARM_PRIVATE_15 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_2 = 114 +pkg debug/elf, const R_ARM_PRIVATE_2 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_3 = 115 +pkg debug/elf, const R_ARM_PRIVATE_3 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_4 = 116 +pkg debug/elf, const R_ARM_PRIVATE_4 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_5 = 117 +pkg debug/elf, const R_ARM_PRIVATE_5 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_6 = 118 +pkg debug/elf, const R_ARM_PRIVATE_6 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_7 = 119 +pkg debug/elf, const R_ARM_PRIVATE_7 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_8 = 120 +pkg debug/elf, const R_ARM_PRIVATE_8 R_ARM +pkg debug/elf, const R_ARM_PRIVATE_9 = 121 +pkg debug/elf, const R_ARM_PRIVATE_9 R_ARM +pkg debug/elf, const R_ARM_REL32_NOI = 56 +pkg debug/elf, const R_ARM_REL32_NOI R_ARM +pkg debug/elf, const R_ARM_RXPC25 = 249 +pkg debug/elf, const R_ARM_RXPC25 R_ARM +pkg debug/elf, const R_ARM_SBREL31 = 39 +pkg debug/elf, const R_ARM_SBREL31 R_ARM +pkg debug/elf, const R_ARM_TARGET1 = 38 +pkg debug/elf, const R_ARM_TARGET1 R_ARM +pkg debug/elf, const R_ARM_TARGET2 = 41 +pkg debug/elf, const R_ARM_TARGET2 R_ARM +pkg debug/elf, const R_ARM_THM_ALU_ABS_G0_NC = 132 +pkg debug/elf, const R_ARM_THM_ALU_ABS_G0_NC R_ARM +pkg debug/elf, const R_ARM_THM_ALU_ABS_G1_NC = 133 +pkg debug/elf, const R_ARM_THM_ALU_ABS_G1_NC R_ARM +pkg debug/elf, const R_ARM_THM_ALU_ABS_G2_NC = 134 +pkg debug/elf, const R_ARM_THM_ALU_ABS_G2_NC R_ARM +pkg debug/elf, const R_ARM_THM_ALU_ABS_G3 = 135 +pkg debug/elf, const R_ARM_THM_ALU_ABS_G3 R_ARM +pkg debug/elf, const R_ARM_THM_ALU_PREL_11_0 = 53 +pkg debug/elf, const R_ARM_THM_ALU_PREL_11_0 R_ARM +pkg debug/elf, const R_ARM_THM_GOT_BREL12 = 131 +pkg debug/elf, const R_ARM_THM_GOT_BREL12 R_ARM +pkg debug/elf, const R_ARM_THM_JUMP11 = 102 +pkg debug/elf, const R_ARM_THM_JUMP11 R_ARM +pkg debug/elf, const R_ARM_THM_JUMP19 = 51 +pkg debug/elf, const R_ARM_THM_JUMP19 R_ARM +pkg debug/elf, const R_ARM_THM_JUMP24 = 30 +pkg debug/elf, const R_ARM_THM_JUMP24 R_ARM +pkg debug/elf, const R_ARM_THM_JUMP6 = 52 +pkg debug/elf, const R_ARM_THM_JUMP6 R_ARM +pkg debug/elf, const R_ARM_THM_JUMP8 = 103 +pkg debug/elf, const R_ARM_THM_JUMP8 R_ARM +pkg debug/elf, const R_ARM_THM_MOVT_ABS = 48 +pkg debug/elf, const R_ARM_THM_MOVT_ABS R_ARM +pkg debug/elf, const R_ARM_THM_MOVT_BREL = 88 +pkg debug/elf, const R_ARM_THM_MOVT_BREL R_ARM +pkg debug/elf, const R_ARM_THM_MOVT_PREL = 50 +pkg debug/elf, const R_ARM_THM_MOVT_PREL R_ARM +pkg debug/elf, const R_ARM_THM_MOVW_ABS_NC = 47 +pkg debug/elf, const R_ARM_THM_MOVW_ABS_NC R_ARM +pkg debug/elf, const R_ARM_THM_MOVW_BREL = 89 +pkg debug/elf, const R_ARM_THM_MOVW_BREL R_ARM +pkg debug/elf, const R_ARM_THM_MOVW_BREL_NC = 87 +pkg debug/elf, const R_ARM_THM_MOVW_BREL_NC R_ARM +pkg debug/elf, const R_ARM_THM_MOVW_PREL_NC = 49 +pkg debug/elf, const R_ARM_THM_MOVW_PREL_NC R_ARM +pkg debug/elf, const R_ARM_THM_PC12 = 54 +pkg debug/elf, const R_ARM_THM_PC12 R_ARM +pkg debug/elf, const R_ARM_THM_TLS_CALL = 93 +pkg debug/elf, const R_ARM_THM_TLS_CALL R_ARM +pkg debug/elf, const R_ARM_THM_TLS_DESCSEQ16 = 129 +pkg debug/elf, const R_ARM_THM_TLS_DESCSEQ16 R_ARM +pkg debug/elf, const R_ARM_THM_TLS_DESCSEQ32 = 130 +pkg debug/elf, const R_ARM_THM_TLS_DESCSEQ32 R_ARM +pkg debug/elf, const R_ARM_TLS_CALL = 91 +pkg debug/elf, const R_ARM_TLS_CALL R_ARM +pkg debug/elf, const R_ARM_TLS_DESCSEQ = 92 +pkg debug/elf, const R_ARM_TLS_DESCSEQ R_ARM +pkg debug/elf, const R_ARM_TLS_DTPMOD32 = 17 +pkg debug/elf, const R_ARM_TLS_DTPMOD32 R_ARM +pkg debug/elf, const R_ARM_TLS_DTPOFF32 = 18 +pkg debug/elf, const R_ARM_TLS_DTPOFF32 R_ARM +pkg debug/elf, const R_ARM_TLS_GD32 = 104 +pkg debug/elf, const R_ARM_TLS_GD32 R_ARM +pkg debug/elf, const R_ARM_TLS_GOTDESC = 90 +pkg debug/elf, const R_ARM_TLS_GOTDESC R_ARM +pkg debug/elf, const R_ARM_TLS_IE12GP = 111 +pkg debug/elf, const R_ARM_TLS_IE12GP R_ARM +pkg debug/elf, const R_ARM_TLS_IE32 = 107 +pkg debug/elf, const R_ARM_TLS_IE32 R_ARM +pkg debug/elf, const R_ARM_TLS_LDM32 = 105 +pkg debug/elf, const R_ARM_TLS_LDM32 R_ARM +pkg debug/elf, const R_ARM_TLS_LDO12 = 109 +pkg debug/elf, const R_ARM_TLS_LDO12 R_ARM +pkg debug/elf, const R_ARM_TLS_LDO32 = 106 +pkg debug/elf, const R_ARM_TLS_LDO32 R_ARM +pkg debug/elf, const R_ARM_TLS_LE12 = 110 +pkg debug/elf, const R_ARM_TLS_LE12 R_ARM +pkg debug/elf, const R_ARM_TLS_LE32 = 108 +pkg debug/elf, const R_ARM_TLS_LE32 R_ARM +pkg debug/elf, const R_ARM_TLS_TPOFF32 = 19 +pkg debug/elf, const R_ARM_TLS_TPOFF32 R_ARM +pkg debug/elf, const R_ARM_V4BX = 40 +pkg debug/elf, const R_ARM_V4BX R_ARM +pkg debug/elf, const R_PPC64_ADDR16_HIGH = 110 +pkg debug/elf, const R_PPC64_ADDR16_HIGH R_PPC64 +pkg debug/elf, const R_PPC64_ADDR16_HIGHA = 111 +pkg debug/elf, const R_PPC64_ADDR16_HIGHA R_PPC64 +pkg debug/elf, const R_PPC64_ADDR64_LOCAL = 117 +pkg debug/elf, const R_PPC64_ADDR64_LOCAL R_PPC64 +pkg debug/elf, const R_PPC64_DTPREL16_HIGH = 114 +pkg debug/elf, const R_PPC64_DTPREL16_HIGH R_PPC64 +pkg debug/elf, const R_PPC64_DTPREL16_HIGHA = 115 +pkg debug/elf, const R_PPC64_DTPREL16_HIGHA R_PPC64 +pkg debug/elf, const R_PPC64_ENTRY = 118 +pkg debug/elf, const R_PPC64_ENTRY R_PPC64 +pkg debug/elf, const R_PPC64_IRELATIVE = 248 +pkg debug/elf, const R_PPC64_IRELATIVE R_PPC64 +pkg debug/elf, const R_PPC64_JMP_IREL = 247 +pkg debug/elf, const R_PPC64_JMP_IREL R_PPC64 +pkg debug/elf, const R_PPC64_PLT16_LO_DS = 60 +pkg debug/elf, const R_PPC64_PLT16_LO_DS R_PPC64 +pkg debug/elf, const R_PPC64_PLTGOT16 = 52 +pkg debug/elf, const R_PPC64_PLTGOT16 R_PPC64 +pkg debug/elf, const R_PPC64_PLTGOT16_DS = 65 +pkg debug/elf, const R_PPC64_PLTGOT16_DS R_PPC64 +pkg debug/elf, const R_PPC64_PLTGOT16_HA = 55 +pkg debug/elf, const R_PPC64_PLTGOT16_HA R_PPC64 +pkg debug/elf, const R_PPC64_PLTGOT16_HI = 54 +pkg debug/elf, const R_PPC64_PLTGOT16_HI R_PPC64 +pkg debug/elf, const R_PPC64_PLTGOT16_LO = 53 +pkg debug/elf, const R_PPC64_PLTGOT16_LO R_PPC64 +pkg debug/elf, const R_PPC64_PLTGOT_LO_DS = 66 +pkg debug/elf, const R_PPC64_PLTGOT_LO_DS R_PPC64 +pkg debug/elf, const R_PPC64_REL16DX_HA = 246 +pkg debug/elf, const R_PPC64_REL16DX_HA R_PPC64 +pkg debug/elf, const R_PPC64_REL24_NOTOC = 116 +pkg debug/elf, const R_PPC64_REL24_NOTOC R_PPC64 +pkg debug/elf, const R_PPC64_SECTOFF_DS = 61 +pkg debug/elf, const R_PPC64_SECTOFF_DS R_PPC64 +pkg debug/elf, const R_PPC64_SECTOFF_LO_DS = 61 +pkg debug/elf, const R_PPC64_SECTOFF_LO_DS R_PPC64 +pkg debug/elf, const R_PPC64_TOCSAVE = 109 +pkg debug/elf, const R_PPC64_TOCSAVE R_PPC64 +pkg debug/elf, const R_PPC64_TPREL16_HIGH = 112 +pkg debug/elf, const R_PPC64_TPREL16_HIGH R_PPC64 +pkg debug/elf, const R_PPC64_TPREL16_HIGHA = 113 +pkg debug/elf, const R_PPC64_TPREL16_HIGHA R_PPC64 +pkg debug/elf, const R_X86_64_GOT64 = 27 +pkg debug/elf, const R_X86_64_GOT64 R_X86_64 +pkg debug/elf, const R_X86_64_GOTOFF64 = 25 +pkg debug/elf, const R_X86_64_GOTOFF64 R_X86_64 +pkg debug/elf, const R_X86_64_GOTPC32 = 26 +pkg debug/elf, const R_X86_64_GOTPC32 R_X86_64 +pkg debug/elf, const R_X86_64_GOTPC32_TLSDESC = 34 +pkg debug/elf, const R_X86_64_GOTPC32_TLSDESC R_X86_64 +pkg debug/elf, const R_X86_64_GOTPC64 = 29 +pkg debug/elf, const R_X86_64_GOTPC64 R_X86_64 +pkg debug/elf, const R_X86_64_GOTPCREL64 = 28 +pkg debug/elf, const R_X86_64_GOTPCREL64 R_X86_64 +pkg debug/elf, const R_X86_64_GOTPCRELX = 41 +pkg debug/elf, const R_X86_64_GOTPCRELX R_X86_64 +pkg debug/elf, const R_X86_64_GOTPLT64 = 30 +pkg debug/elf, const R_X86_64_GOTPLT64 R_X86_64 +pkg debug/elf, const R_X86_64_IRELATIVE = 37 +pkg debug/elf, const R_X86_64_IRELATIVE R_X86_64 +pkg debug/elf, const R_X86_64_PC32_BND = 39 +pkg debug/elf, const R_X86_64_PC32_BND R_X86_64 +pkg debug/elf, const R_X86_64_PC64 = 24 +pkg debug/elf, const R_X86_64_PC64 R_X86_64 +pkg debug/elf, const R_X86_64_PLT32_BND = 40 +pkg debug/elf, const R_X86_64_PLT32_BND R_X86_64 +pkg debug/elf, const R_X86_64_PLTOFF64 = 31 +pkg debug/elf, const R_X86_64_PLTOFF64 R_X86_64 +pkg debug/elf, const R_X86_64_RELATIVE64 = 38 +pkg debug/elf, const R_X86_64_RELATIVE64 R_X86_64 +pkg debug/elf, const R_X86_64_REX_GOTPCRELX = 42 +pkg debug/elf, const R_X86_64_REX_GOTPCRELX R_X86_64 +pkg debug/elf, const R_X86_64_SIZE32 = 32 +pkg debug/elf, const R_X86_64_SIZE32 R_X86_64 +pkg debug/elf, const R_X86_64_SIZE64 = 33 +pkg debug/elf, const R_X86_64_SIZE64 R_X86_64 +pkg debug/elf, const R_X86_64_TLSDESC = 36 +pkg debug/elf, const R_X86_64_TLSDESC R_X86_64 +pkg debug/elf, const R_X86_64_TLSDESC_CALL = 35 +pkg debug/elf, const R_X86_64_TLSDESC_CALL R_X86_64 +pkg debug/macho, const ARM64_RELOC_ADDEND = 10 +pkg debug/macho, const ARM64_RELOC_ADDEND RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_BRANCH26 = 2 +pkg debug/macho, const ARM64_RELOC_BRANCH26 RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_GOT_LOAD_PAGE21 = 5 +pkg debug/macho, const ARM64_RELOC_GOT_LOAD_PAGE21 RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6 +pkg debug/macho, const ARM64_RELOC_GOT_LOAD_PAGEOFF12 RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_PAGE21 = 3 +pkg debug/macho, const ARM64_RELOC_PAGE21 RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_PAGEOFF12 = 4 +pkg debug/macho, const ARM64_RELOC_PAGEOFF12 RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_POINTER_TO_GOT = 7 +pkg debug/macho, const ARM64_RELOC_POINTER_TO_GOT RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_SUBTRACTOR = 1 +pkg debug/macho, const ARM64_RELOC_SUBTRACTOR RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_TLVP_LOAD_PAGE21 = 8 +pkg debug/macho, const ARM64_RELOC_TLVP_LOAD_PAGE21 RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_TLVP_LOAD_PAGEOFF12 = 9 +pkg debug/macho, const ARM64_RELOC_TLVP_LOAD_PAGEOFF12 RelocTypeARM64 +pkg debug/macho, const ARM64_RELOC_UNSIGNED = 0 +pkg debug/macho, const ARM64_RELOC_UNSIGNED RelocTypeARM64 +pkg debug/macho, const ARM_RELOC_BR24 = 5 +pkg debug/macho, const ARM_RELOC_BR24 RelocTypeARM +pkg debug/macho, const ARM_RELOC_HALF = 8 +pkg debug/macho, const ARM_RELOC_HALF RelocTypeARM +pkg debug/macho, const ARM_RELOC_HALF_SECTDIFF = 9 +pkg debug/macho, const ARM_RELOC_HALF_SECTDIFF RelocTypeARM +pkg debug/macho, const ARM_RELOC_LOCAL_SECTDIFF = 3 +pkg debug/macho, const ARM_RELOC_LOCAL_SECTDIFF RelocTypeARM +pkg debug/macho, const ARM_RELOC_PAIR = 1 +pkg debug/macho, const ARM_RELOC_PAIR RelocTypeARM +pkg debug/macho, const ARM_RELOC_PB_LA_PTR = 4 +pkg debug/macho, const ARM_RELOC_PB_LA_PTR RelocTypeARM +pkg debug/macho, const ARM_RELOC_SECTDIFF = 2 +pkg debug/macho, const ARM_RELOC_SECTDIFF RelocTypeARM +pkg debug/macho, const ARM_RELOC_VANILLA = 0 +pkg debug/macho, const ARM_RELOC_VANILLA RelocTypeARM +pkg debug/macho, const ARM_THUMB_32BIT_BRANCH = 7 +pkg debug/macho, const ARM_THUMB_32BIT_BRANCH RelocTypeARM +pkg debug/macho, const ARM_THUMB_RELOC_BR22 = 6 +pkg debug/macho, const ARM_THUMB_RELOC_BR22 RelocTypeARM +pkg debug/macho, const FlagAllModsBound = 4096 +pkg debug/macho, const FlagAllModsBound uint32 +pkg debug/macho, const FlagAllowStackExecution = 131072 +pkg debug/macho, const FlagAllowStackExecution uint32 +pkg debug/macho, const FlagAppExtensionSafe = 33554432 +pkg debug/macho, const FlagAppExtensionSafe uint32 +pkg debug/macho, const FlagBindAtLoad = 8 +pkg debug/macho, const FlagBindAtLoad uint32 +pkg debug/macho, const FlagBindsToWeak = 65536 +pkg debug/macho, const FlagBindsToWeak uint32 +pkg debug/macho, const FlagCanonical = 16384 +pkg debug/macho, const FlagCanonical uint32 +pkg debug/macho, const FlagDeadStrippableDylib = 4194304 +pkg debug/macho, const FlagDeadStrippableDylib uint32 +pkg debug/macho, const FlagDyldLink = 4 +pkg debug/macho, const FlagDyldLink uint32 +pkg debug/macho, const FlagForceFlat = 256 +pkg debug/macho, const FlagForceFlat uint32 +pkg debug/macho, const FlagHasTLVDescriptors = 8388608 +pkg debug/macho, const FlagHasTLVDescriptors uint32 +pkg debug/macho, const FlagIncrLink = 2 +pkg debug/macho, const FlagIncrLink uint32 +pkg debug/macho, const FlagLazyInit = 64 +pkg debug/macho, const FlagLazyInit uint32 +pkg debug/macho, const FlagNoFixPrebinding = 1024 +pkg debug/macho, const FlagNoFixPrebinding uint32 +pkg debug/macho, const FlagNoHeapExecution = 16777216 +pkg debug/macho, const FlagNoHeapExecution uint32 +pkg debug/macho, const FlagNoMultiDefs = 512 +pkg debug/macho, const FlagNoMultiDefs uint32 +pkg debug/macho, const FlagNoReexportedDylibs = 1048576 +pkg debug/macho, const FlagNoReexportedDylibs uint32 +pkg debug/macho, const FlagNoUndefs = 1 +pkg debug/macho, const FlagNoUndefs uint32 +pkg debug/macho, const FlagPIE = 2097152 +pkg debug/macho, const FlagPIE uint32 +pkg debug/macho, const FlagPrebindable = 2048 +pkg debug/macho, const FlagPrebindable uint32 +pkg debug/macho, const FlagPrebound = 16 +pkg debug/macho, const FlagPrebound uint32 +pkg debug/macho, const FlagRootSafe = 262144 +pkg debug/macho, const FlagRootSafe uint32 +pkg debug/macho, const FlagSetuidSafe = 524288 +pkg debug/macho, const FlagSetuidSafe uint32 +pkg debug/macho, const FlagSplitSegs = 32 +pkg debug/macho, const FlagSplitSegs uint32 +pkg debug/macho, const FlagSubsectionsViaSymbols = 8192 +pkg debug/macho, const FlagSubsectionsViaSymbols uint32 +pkg debug/macho, const FlagTwoLevel = 128 +pkg debug/macho, const FlagTwoLevel uint32 +pkg debug/macho, const FlagWeakDefines = 32768 +pkg debug/macho, const FlagWeakDefines uint32 +pkg debug/macho, const GENERIC_RELOC_LOCAL_SECTDIFF = 4 +pkg debug/macho, const GENERIC_RELOC_LOCAL_SECTDIFF RelocTypeGeneric +pkg debug/macho, const GENERIC_RELOC_PAIR = 1 +pkg debug/macho, const GENERIC_RELOC_PAIR RelocTypeGeneric +pkg debug/macho, const GENERIC_RELOC_PB_LA_PTR = 3 +pkg debug/macho, const GENERIC_RELOC_PB_LA_PTR RelocTypeGeneric +pkg debug/macho, const GENERIC_RELOC_SECTDIFF = 2 +pkg debug/macho, const GENERIC_RELOC_SECTDIFF RelocTypeGeneric +pkg debug/macho, const GENERIC_RELOC_TLV = 5 +pkg debug/macho, const GENERIC_RELOC_TLV RelocTypeGeneric +pkg debug/macho, const GENERIC_RELOC_VANILLA = 0 +pkg debug/macho, const GENERIC_RELOC_VANILLA RelocTypeGeneric +pkg debug/macho, const LoadCmdRpath = 2147483676 +pkg debug/macho, const LoadCmdRpath LoadCmd +pkg debug/macho, const X86_64_RELOC_BRANCH = 2 +pkg debug/macho, const X86_64_RELOC_BRANCH RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_GOT = 4 +pkg debug/macho, const X86_64_RELOC_GOT RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_GOT_LOAD = 3 +pkg debug/macho, const X86_64_RELOC_GOT_LOAD RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_SIGNED = 1 +pkg debug/macho, const X86_64_RELOC_SIGNED RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_SIGNED_1 = 6 +pkg debug/macho, const X86_64_RELOC_SIGNED_1 RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_SIGNED_2 = 7 +pkg debug/macho, const X86_64_RELOC_SIGNED_2 RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_SIGNED_4 = 8 +pkg debug/macho, const X86_64_RELOC_SIGNED_4 RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_SUBTRACTOR = 5 +pkg debug/macho, const X86_64_RELOC_SUBTRACTOR RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_TLV = 9 +pkg debug/macho, const X86_64_RELOC_TLV RelocTypeX86_64 +pkg debug/macho, const X86_64_RELOC_UNSIGNED = 0 +pkg debug/macho, const X86_64_RELOC_UNSIGNED RelocTypeX86_64 +pkg debug/macho, method (RelocTypeARM) GoString() string +pkg debug/macho, method (RelocTypeARM) String() string +pkg debug/macho, method (RelocTypeARM64) GoString() string +pkg debug/macho, method (RelocTypeARM64) String() string +pkg debug/macho, method (RelocTypeGeneric) GoString() string +pkg debug/macho, method (RelocTypeGeneric) String() string +pkg debug/macho, method (RelocTypeX86_64) GoString() string +pkg debug/macho, method (RelocTypeX86_64) String() string +pkg debug/macho, method (Rpath) Raw() []uint8 +pkg debug/macho, method (Type) GoString() string +pkg debug/macho, method (Type) String() string +pkg debug/macho, type Reloc struct +pkg debug/macho, type Reloc struct, Addr uint32 +pkg debug/macho, type Reloc struct, Extern bool +pkg debug/macho, type Reloc struct, Len uint8 +pkg debug/macho, type Reloc struct, Pcrel bool +pkg debug/macho, type Reloc struct, Scattered bool +pkg debug/macho, type Reloc struct, Type uint8 +pkg debug/macho, type Reloc struct, Value uint32 +pkg debug/macho, type RelocTypeARM int +pkg debug/macho, type RelocTypeARM64 int +pkg debug/macho, type RelocTypeGeneric int +pkg debug/macho, type RelocTypeX86_64 int +pkg debug/macho, type Rpath struct +pkg debug/macho, type Rpath struct, Path string +pkg debug/macho, type Rpath struct, embedded LoadBytes +pkg debug/macho, type RpathCmd struct +pkg debug/macho, type RpathCmd struct, Cmd LoadCmd +pkg debug/macho, type RpathCmd struct, Len uint32 +pkg debug/macho, type RpathCmd struct, Path uint32 +pkg debug/macho, type Section struct, Relocs []Reloc +pkg encoding/csv, type ParseError struct, StartLine int +pkg encoding/hex, func NewDecoder(io.Reader) io.Reader +pkg encoding/hex, func NewEncoder(io.Writer) io.Writer +pkg encoding/json, method (*Decoder) DisallowUnknownFields() +pkg encoding/xml, func NewTokenDecoder(TokenReader) *Decoder +pkg encoding/xml, type TokenReader interface { Token } +pkg encoding/xml, type TokenReader interface, Token() (Token, error) +pkg flag, method (*FlagSet) ErrorHandling() ErrorHandling +pkg flag, method (*FlagSet) Name() string +pkg flag, method (*FlagSet) Output() io.Writer +pkg math, func Erfcinv(float64) float64 +pkg math, func Erfinv(float64) float64 +pkg math, func Round(float64) float64 +pkg math, func RoundToEven(float64) float64 +pkg math/big, const MaxBase = 62 +pkg math/big, method (*Float) Sqrt(*Float) *Float +pkg math/big, method (*Int) CmpAbs(*Int) int +pkg math/rand, func Shuffle(int, func(int, int)) +pkg math/rand, method (*Rand) Shuffle(int, func(int, int)) +pkg net, method (*TCPListener) SyscallConn() (syscall.RawConn, error) +pkg net, method (*UnixListener) SyscallConn() (syscall.RawConn, error) +pkg net/smtp, method (*Client) Noop() error +pkg os, func IsTimeout(error) bool +pkg os, method (*File) SetDeadline(time.Time) error +pkg os, method (*File) SetReadDeadline(time.Time) error +pkg os, method (*File) SetWriteDeadline(time.Time) error +pkg os, method (*PathError) Timeout() bool +pkg os, method (*SyscallError) Timeout() bool +pkg os, var ErrNoDeadline error +pkg strings, method (*Builder) Grow(int) +pkg strings, method (*Builder) Len() int +pkg strings, method (*Builder) ReadFrom(io.Reader) (int64, error) +pkg strings, method (*Builder) Reset() +pkg strings, method (*Builder) String() string +pkg strings, method (*Builder) Write([]uint8) (int, error) +pkg strings, method (*Builder) WriteByte(uint8) error +pkg strings, method (*Builder) WriteRune(int32) (int, error) +pkg strings, method (*Builder) WriteString(string) (int, error) +pkg strings, type Builder struct +pkg syscall (freebsd-386), const SYS_UTIMENSAT = 547 +pkg syscall (freebsd-386), const SYS_UTIMENSAT ideal-int +pkg syscall (freebsd-386-cgo), const SYS_UTIMENSAT = 547 +pkg syscall (freebsd-386-cgo), const SYS_UTIMENSAT ideal-int +pkg syscall (freebsd-amd64), const SYS_UTIMENSAT = 547 +pkg syscall (freebsd-amd64), const SYS_UTIMENSAT ideal-int +pkg syscall (freebsd-amd64-cgo), const SYS_UTIMENSAT = 547 +pkg syscall (freebsd-amd64-cgo), const SYS_UTIMENSAT ideal-int +pkg syscall (freebsd-arm), const SYS_UTIMENSAT = 547 +pkg syscall (freebsd-arm), const SYS_UTIMENSAT ideal-int +pkg syscall (freebsd-arm-cgo), const SYS_UTIMENSAT = 547 +pkg syscall (freebsd-arm-cgo), const SYS_UTIMENSAT ideal-int +pkg syscall (windows-386), func CreateProcessAsUser(Token, *uint16, *uint16, *SecurityAttributes, *SecurityAttributes, bool, uint32, *uint16, *uint16, *StartupInfo, *ProcessInformation) error +pkg syscall (windows-386), type SysProcAttr struct, Token Token +pkg syscall (windows-amd64), func CreateProcessAsUser(Token, *uint16, *uint16, *SecurityAttributes, *SecurityAttributes, bool, uint32, *uint16, *uint16, *StartupInfo, *ProcessInformation) error +pkg syscall (windows-amd64), type SysProcAttr struct, Token Token +pkg text/template/parse, const NodeBreak = 20 +pkg text/template/parse, const NodeBreak NodeType +pkg text/template/parse, const NodeContinue = 21 +pkg text/template/parse, const NodeContinue NodeType +pkg text/template/parse, method (*BreakNode) Copy() Node +pkg text/template/parse, method (*BreakNode) Position() Pos +pkg text/template/parse, method (*BreakNode) String() string +pkg text/template/parse, method (*BreakNode) Type() NodeType +pkg text/template/parse, method (*ContinueNode) Copy() Node +pkg text/template/parse, method (*ContinueNode) Position() Pos +pkg text/template/parse, method (*ContinueNode) String() string +pkg text/template/parse, method (*ContinueNode) Type() NodeType +pkg text/template/parse, type BreakNode struct +pkg text/template/parse, type BreakNode struct, embedded NodeType +pkg text/template/parse, type BreakNode struct, embedded Pos +pkg text/template/parse, type ContinueNode struct +pkg text/template/parse, type ContinueNode struct, embedded NodeType +pkg text/template/parse, type ContinueNode struct, embedded Pos +pkg unicode, const Version = "10.0.0" +pkg unicode, var Masaram_Gondi *RangeTable +pkg unicode, var Nushu *RangeTable +pkg unicode, var Regional_Indicator *RangeTable +pkg unicode, var Soyombo *RangeTable +pkg unicode, var Zanabazar_Square *RangeTable diff --git a/doc/articles/wiki/final-noclosure.go b/doc/articles/wiki/final-noclosure.go index d72ca805b87..b4ce2557426 100644 --- a/doc/articles/wiki/final-noclosure.go +++ b/doc/articles/wiki/final-noclosure.go @@ -8,6 +8,7 @@ import ( "errors" "html/template" "io/ioutil" + "log" "net/http" "regexp" ) @@ -98,5 +99,5 @@ func main() { http.HandleFunc("/view/", viewHandler) http.HandleFunc("/edit/", editHandler) http.HandleFunc("/save/", saveHandler) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/final-noerror.go b/doc/articles/wiki/final-noerror.go index 86d8da751f9..42a22da9dd8 100644 --- a/doc/articles/wiki/final-noerror.go +++ b/doc/articles/wiki/final-noerror.go @@ -7,6 +7,7 @@ package main import ( "html/template" "io/ioutil" + "log" "net/http" ) @@ -49,5 +50,5 @@ func viewHandler(w http.ResponseWriter, r *http.Request) { func main() { http.HandleFunc("/view/", viewHandler) http.HandleFunc("/edit/", editHandler) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/final-parsetemplate.go b/doc/articles/wiki/final-parsetemplate.go index 5ff8bf60c5b..a9aa7f28943 100644 --- a/doc/articles/wiki/final-parsetemplate.go +++ b/doc/articles/wiki/final-parsetemplate.go @@ -7,6 +7,7 @@ package main import ( "html/template" "io/ioutil" + "log" "net/http" "regexp" ) @@ -87,5 +88,5 @@ func main() { http.HandleFunc("/view/", makeHandler(viewHandler)) http.HandleFunc("/edit/", makeHandler(editHandler)) http.HandleFunc("/save/", makeHandler(saveHandler)) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/final-template.go b/doc/articles/wiki/final-template.go index 719157da954..7ea480e50a9 100644 --- a/doc/articles/wiki/final-template.go +++ b/doc/articles/wiki/final-template.go @@ -7,6 +7,7 @@ package main import ( "html/template" "io/ioutil" + "log" "net/http" ) @@ -61,5 +62,5 @@ func main() { http.HandleFunc("/view/", viewHandler) http.HandleFunc("/edit/", editHandler) http.HandleFunc("/save/", saveHandler) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/final-test.patch b/doc/articles/wiki/final-test.patch index 499ad789b31..fd7d6253681 100644 --- a/doc/articles/wiki/final-test.patch +++ b/doc/articles/wiki/final-test.patch @@ -1,36 +1,27 @@ -*** final.go 2015-06-14 23:59:22.000000000 +0200 ---- final-test.go 2015-06-15 00:15:41.000000000 +0200 -*************** -*** 7,12 **** ---- 7,14 ---- - import ( - "html/template" - "io/ioutil" -+ "log" -+ "net" - "net/http" - "regexp" - ) -*************** -*** 85,89 **** - http.HandleFunc("/edit/", makeHandler(editHandler)) - http.HandleFunc("/save/", makeHandler(saveHandler)) - -! http.ListenAndServe(":8080", nil) - } ---- 87,101 ---- - http.HandleFunc("/edit/", makeHandler(editHandler)) - http.HandleFunc("/save/", makeHandler(saveHandler)) - -! l, err := net.Listen("tcp", "127.0.0.1:0") -! if err != nil { -! log.Fatal(err) -! } -! err = ioutil.WriteFile("final-test-port.txt", []byte(l.Addr().String()), 0644) -! if err != nil { -! log.Fatal(err) -! } -! s := &http.Server{} -! s.Serve(l) -! return - } +--- final.go 2017-08-31 13:19:00.422925489 -0700 ++++ final-test.go 2017-08-31 13:23:43.381391659 -0700 +@@ -8,6 +8,7 @@ + "html/template" + "io/ioutil" + "log" ++ "net" + "net/http" + "regexp" + ) +@@ -86,5 +87,15 @@ + http.HandleFunc("/edit/", makeHandler(editHandler)) + http.HandleFunc("/save/", makeHandler(saveHandler)) + +- log.Fatal(http.ListenAndServe(":8080", nil)) ++ l, err := net.Listen("tcp", "127.0.0.1:0") ++ if err != nil { ++ log.Fatal(err) ++ } ++ err = ioutil.WriteFile("final-test-port.txt", []byte(l.Addr().String()), 0644) ++ if err != nil { ++ log.Fatal(err) ++ } ++ s := &http.Server{} ++ s.Serve(l) ++ return + } diff --git a/doc/articles/wiki/final.go b/doc/articles/wiki/final.go index 139a3230108..0f6646ba879 100644 --- a/doc/articles/wiki/final.go +++ b/doc/articles/wiki/final.go @@ -7,6 +7,7 @@ package main import ( "html/template" "io/ioutil" + "log" "net/http" "regexp" ) @@ -85,5 +86,5 @@ func main() { http.HandleFunc("/edit/", makeHandler(editHandler)) http.HandleFunc("/save/", makeHandler(saveHandler)) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/http-sample.go b/doc/articles/wiki/http-sample.go index ac8cc4f2d67..9bc2084c672 100644 --- a/doc/articles/wiki/http-sample.go +++ b/doc/articles/wiki/http-sample.go @@ -2,6 +2,7 @@ package main import ( "fmt" + "log" "net/http" ) @@ -11,5 +12,5 @@ func handler(w http.ResponseWriter, r *http.Request) { func main() { http.HandleFunc("/", handler) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/index.html b/doc/articles/wiki/index.html index b6b080df960..3e0d532d7f7 100644 --- a/doc/articles/wiki/index.html +++ b/doc/articles/wiki/index.html @@ -179,7 +179,7 @@ You can compile and run the program like this:
 $ go build wiki.go
 $ ./wiki
-This is a sample page.
+This is a sample Page.
 

@@ -213,6 +213,12 @@ worry about its second parameter, nil, for now.) This function will block until the program is terminated.

+

+ListenAndServe always returns an error, since it only returns when an +unexpected error occurs. +In order to log that error we wrap the function call with log.Fatal. +

+

The function handler is of the type http.HandlerFunc. It takes an http.ResponseWriter and an http.Request as diff --git a/doc/articles/wiki/notemplate.go b/doc/articles/wiki/notemplate.go index be214d1111d..0fda7a98ce5 100644 --- a/doc/articles/wiki/notemplate.go +++ b/doc/articles/wiki/notemplate.go @@ -7,6 +7,7 @@ package main import ( "fmt" "io/ioutil" + "log" "net/http" ) @@ -52,5 +53,5 @@ func editHandler(w http.ResponseWriter, r *http.Request) { func main() { http.HandleFunc("/view/", viewHandler) http.HandleFunc("/edit/", editHandler) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/part2.go b/doc/articles/wiki/part2.go index c0231693efb..30f9dcf146d 100644 --- a/doc/articles/wiki/part2.go +++ b/doc/articles/wiki/part2.go @@ -7,6 +7,7 @@ package main import ( "fmt" "io/ioutil" + "log" "net/http" ) @@ -37,5 +38,5 @@ func viewHandler(w http.ResponseWriter, r *http.Request) { func main() { http.HandleFunc("/view/", viewHandler) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/part3-errorhandling.go b/doc/articles/wiki/part3-errorhandling.go index bb4ecda84bb..34b13a60864 100644 --- a/doc/articles/wiki/part3-errorhandling.go +++ b/doc/articles/wiki/part3-errorhandling.go @@ -7,6 +7,7 @@ package main import ( "html/template" "io/ioutil" + "log" "net/http" ) @@ -69,5 +70,5 @@ func main() { http.HandleFunc("/view/", viewHandler) http.HandleFunc("/edit/", editHandler) http.HandleFunc("/save/", saveHandler) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/articles/wiki/part3.go b/doc/articles/wiki/part3.go index 174f3abcd76..5e5d5056c49 100644 --- a/doc/articles/wiki/part3.go +++ b/doc/articles/wiki/part3.go @@ -7,6 +7,7 @@ package main import ( "html/template" "io/ioutil" + "log" "net/http" ) @@ -53,5 +54,5 @@ func main() { http.HandleFunc("/view/", viewHandler) http.HandleFunc("/edit/", editHandler) //http.HandleFunc("/save/", saveHandler) - http.ListenAndServe(":8080", nil) + log.Fatal(http.ListenAndServe(":8080", nil)) } diff --git a/doc/asm.html b/doc/asm.html index 79dc7df322f..e3e17f85f58 100644 --- a/doc/asm.html +++ b/doc/asm.html @@ -876,6 +876,12 @@ Addressing modes: +

+The value of GOMIPS environment variable (hardfloat or +softfloat) is made available to assembly code by predefining either +GOMIPS_hardfloat or GOMIPS_softfloat. +

+

Unsupported opcodes

diff --git a/doc/contrib.html b/doc/contrib.html index 0290923bcd8..2dc1e7d0e47 100644 --- a/doc/contrib.html +++ b/doc/contrib.html @@ -34,6 +34,7 @@ We encourage all Go users to subscribe to

A summary of the changes between Go releases. Notes for the major releases:

+{{if not $.GoogleCN}}

Language

+{{end}}

Tools

More

@@ -169,7 +190,7 @@ See the Articles page at the Wiki for more Go articles.

- +{{if not $.GoogleCN}}

Talks

@@ -200,7 +221,7 @@ This talk expands on the Go Concurrency Patterns talk to dive deeper into

See the Go Talks site and wiki page for more Go talks.

- +{{end}}

Non-English Documentation

diff --git a/doc/editors.html b/doc/editors.html index 264f5af920f..84fb068918b 100644 --- a/doc/editors.html +++ b/doc/editors.html @@ -22,8 +22,8 @@ editing, navigation, testing, and debugging experience.
  • vim: vim-go plugin provides Go programming language support
  • Visual Studio Code: Go extension provides support for the Go programming language
  • -
  • Gogland: Gogland is distributed either as a standalone IDE -or as a plugin for the commercial IntelliJ Platform IDEs
  • +
  • GoLand: GoLand is distributed either as a standalone IDE +or as a plugin for IntelliJ IDEA Ultimate
  • Atom: Go-Plus is an Atom package that provides enhanced Go support
  • @@ -44,7 +44,7 @@ The following feature matrix lists and compares the most significant features.
    vim
    Visual Studio Code -
    Gogland +
    GoLand
    Atom @@ -159,7 +159,7 @@ The following feature matrix lists and compares the most significant features. Auto generate tests for packages, files and identifiers No Yes - No + Yes No diff --git a/doc/editors/gogland.png b/doc/editors/goland.png similarity index 100% rename from doc/editors/gogland.png rename to doc/editors/goland.png diff --git a/doc/effective_go.html b/doc/effective_go.html index bc70b0c8e3f..61de824fcd2 100644 --- a/doc/effective_go.html +++ b/doc/effective_go.html @@ -1431,9 +1431,7 @@ func Append(slice, data []byte) []byte { slice = newSlice } slice = slice[0:l+len(data)] - for i, c := range data { - slice[l+i] = c - } + copy(slice[l:], data) return slice } @@ -1521,7 +1519,7 @@ for i := range picture {

    Maps are a convenient and powerful built-in data structure that associate values of one type (the key) with values of another type -(the element or value) +(the element or value). The key can be of any type for which the equality operator is defined, such as integers, floating point and complex numbers, @@ -2792,7 +2790,7 @@ job := &Job{command, log.New(os.Stderr, "Job: ", log.Ldate)}

    If we need to refer to an embedded field directly, the type name of the field, ignoring the package qualifier, serves as a field name, as it did -in the Read method of our ReaderWriter struct. +in the Read method of our ReadWriter struct. Here, if we needed to access the *log.Logger of a Job variable job, we would write job.Logger, diff --git a/doc/gccgo_install.html b/doc/gccgo_install.html index 4f6a911541f..d4eac12f11d 100644 --- a/doc/gccgo_install.html +++ b/doc/gccgo_install.html @@ -59,10 +59,17 @@ should not be visible to Go programs.

    -The GCC 7 releases are expected to include a complete implementation -of the Go 1.8 user libraries. As with earlier releases, the Go 1.8 -runtime is not fully merged, but that should not be visible to Go -programs. +The GCC 7 releases include a complete implementation of the Go 1.8.1 +user libraries. As with earlier releases, the Go 1.8 runtime is not +fully merged, but that should not be visible to Go programs. +

    + +

    +The GCC 8 releases are expected to include a complete implementation +of the Go 1.10 release, depending on release timing. The Go 1.10 +runtime has now been fully merged into the GCC development sources, +and concurrent garbage collection is expected to be fully supported in +GCC 8.

    Source code

    diff --git a/doc/go1.10.html b/doc/go1.10.html new file mode 100644 index 00000000000..cdc0feee373 --- /dev/null +++ b/doc/go1.10.html @@ -0,0 +1,1240 @@ + + + + + + +

    DRAFT RELEASE NOTES - Introduction to Go 1.10

    + +

    + Go 1.10 is not yet released. These are work-in-progress + release notes. Go 1.10 is expected to be released in February 2018. +

    + +

    +The latest Go release, version 1.10, arrives six months after go1.9. +Most of its changes are in the implementation of the toolchain, runtime, and libraries. +As always, the release maintains the Go 1 promise of compatibility. +We expect almost all Go programs to continue to compile and run as before. +

    + +

    +OVERVIEW HERE +

    + +

    Changes to the language

    + +

    +There are no substantive changes to the language. +

    + +

    +A corner case involving shifts by untyped constants has been clarified, +and as a result the compilers have been updated to allow the index expression +x[1.0 << s] where s is an untyped constant; +the go/types package already did. +

    + +

    +The grammar for method expressions has been updated to relax the +syntax to allow any type expression as a receiver; +this matches what the compilers were already implementing. +For example, struct{io.Reader}.Read is a valid, if unusual, +method expression that the compilers already accepted and is +now permitted by the language grammar. +

    + +

    Ports

    + +

    +There are no new supported operating systems or processor architectures in this release. +Most of the work has focused on strengthening the support for existing ports, +in particular new instructions in the assembler +and improvements to the code generated by the compilers. +

    + +

    Tools

    + +

    Default GOROOT & GOTMPDIR

    + +

    +TODO: default GOROOT changes in cmd/go +TODO: computed GOROOT change +

    + +

    +By default, the go tool creates its temporary files and directories +in the system temporary directory (for example, $TMPDIR on Unix). +If the new environment variable $GOTMPDIR is set, +the go tool will creates its temporary files and directories in that directory instead. +

    + +

    Build & Install

    + +

    +The go build command now detects out-of-date packages +purely based on the content of source files, specified build flags, and metadata stored in the compiled packages. +Modification times are no longer consulted or relevant. +The old advice to add -a to force a rebuild in cases where +the modification times were misleading for one reason or another +(for example, changes in build flags) is no longer necessary: +builds now always detect when packages must be rebuilt. +(If you observe otherwise, please file a bug.) +

    + +

    +The go build -asmflags, -gcflags, -gccgoflags, and -ldflags options +now apply by default only to the packages listed directly on the command line. +For example, go build -gcflags=-m mypkg +passes the compiler the -m flag when building mypkg +but not its dependencies. +The new, more general form -asmflags=pattern=flags (and similarly for the others) +applies the flags only to the packages matching the pattern. +For example: go install -ldflags=cmd/gofmt=-X=main.version=1.2.3 cmd/... +installs all the commands matching cmd/... but only applies the -X option +to the linker flags for cmd/gofmt. +For more details, see go help build. +

    + +

    +The go build command now maintains a cache of +recently built packages, separate from the installed packages in $GOROOT/pkg or $GOPATH/pkg. +The effect of the cache should be to speed builds that do not explicitly install packages +or when switching between different copies of source code (for example, when changing +back and forth between different branches in a version control system). +The old advice to add the -i flag for speed, as in go build -i +or go test -i, +is no longer necessary: builds run just as fast without -i. +For more details, see go help cache. +

    + +

    +The go install command now installs only the +packages and commands listed directly on the command line. +For example, go install cmd/gofmt +installs the gofmt program but not any of the packages on which it depends. +The new build cache makes future commands still run as quickly as if the +dependencies had been installed. +To force the installation of dependencies, use the new +go install -i flag. +Installing dependencies should not be necessary in general, +and the very concept or installed packages may disappear in a future release. +

    + +

    +Many details of the go build implementation have changed to support these improvements. +One new requirement implied by these changes is that +binary-only packages must now declare accurate import blocks in their +stub source code, so that those imports can be made available when +linking a program using the binary-only package. +For more details, see go help filetype. +

    + +

    Test

    + +

    +The go test command now caches test results: +if the test executable and command line match a previous run +and the files and environment variables consulted by that run +have not changed either, go test will print +the previous test output, replacing the elapsed time with the string “(cached).” +Test caching applies only to successful test results; +only to go test +commands with an explicit list of packages; and +only to command lines using a subset of the +-cpu, -list, -parallel, +-run, -short, and -v test flags. +The idiomatic way to bypass test caching is to use -count=1. +

    + +

    +The go test command now automatically runs +go vet on the package being tested, +to identify significant problems before running the test. +Any such problems are treated like build errors and prevent execution of the test. +Only a high-confidence subset of the available go vet +checks are enabled for this automatic check. +To disable the running of go vet, use +go test -vet=off. +

    + +

    +The go test -coverpkg flag now +interprets its argument as a comma-separated list of patterns to match against +the dependencies of each test, not as a list of packages to load anew. +For example, go test -coverpkg=all +is now a meaningful way to run a test with coverage enabled for the test package +and all its dependencies. +Also, the go test -coverprofile option is now +supported when running multiple tests. +

    + +

    +In case of failure due to timeout, tests are now more likely to write their profiles before exiting. +

    + +

    +The go test command now always +merges the standard output and standard error from a given test binary execution +and writes both to go test's standard output. +In past releases, go test only applied this +merging most of the time. +

    + +

    +The go test -v output +now includes PAUSE and CONT status update +lines to make clearer when parallel tests pause and continue. +

    + +

    +Finally, the new go test -json flag +filters test output through the new command +go tool test2json +to produce a machine-readable JSON-formatted description of test execution. +This should allow the creation of rich presentations of test execution +in IDEs and other tools. +

    + + +

    +For more details about all these changes, +see go help test +and the test2json documentation. +

    + +

    Cgo

    + +

    +Cgo now implements a C typedef like “typedef X Y;” using a Go type alias, +so that Go code may use the types C.X and C.Y interchangeably. +It also now supports the use of niladic function-like macros. +Also, the documentation has been updated to clarify that +Go structs and Go arrays are not supported in the type signatures of cgo-exported functions. +

    + +

    +During toolchain bootstrap, the environment variables CC and CC_FOR_TARGET specify +the default C compiler that the resulting toolchain will use for host and target builds, respectively. +However, if the toolchain will be used with multiple targets, it may be necessary to specify a different C compiler for each +(for example, a different compiler for darwin/arm64 versus linux/ppc64le). +The new set of environment variables CC_FOR_goos_goarch +allows specifying a different default C compiler for each target. +Note that these variables only apply during toolchain bootstrap, +to set the defaults used by the resulting toolchain. +Later go build commands refer to the CC environment +variable or else the built-in default. +For more details, see the cgo documentation. +

    + +

    Doc

    + +

    +The go doc tool now adds functions returning slices of T or *T +to the display of type T, similar to the existing behavior for functions returning single T or *T results. +For example: +

    + +
    +$ go doc mail.Address
    +package mail // import "net/mail"
    +
    +type Address struct {
    +	Name    string 
    +	Address string
    +}
    +    Address represents a single mail address.
    +
    +func ParseAddress(address string) (*Address, error)
    +func ParseAddressList(list string) ([]*Address, error)
    +func (a *Address) String() string
    +$
    +
    + +

    +Previously, ParseAddressList was only shown in the package overview (go doc mail). +

    + +

    Fix

    + +

    +The go fix tool now replaces imports of "golang.org/x/net/context" +with "context". +(Forwarding aliases in the former make it completely equivalent to the latter when using Go 1.9 or later.) +

    + +

    Get

    + +

    +The go get command now supports Fossil source code repositories. +

    + +

    Pprof

    + +

    +The blocking and mutex profiles produced by the runtime/pprof package +now include symbol information, so they can be viewed +in go tool pprof +without the binary that produced the profile. +(All other profile types were changed to include symbol information in Go 1.9.) +

    + +

    +The go tool pprof profile visualizer has been updated to +the latest version from github.com/google/pprof. +

    + +

    Vet

    + +

    +The go vet command now always has access to +complete, up-to-date type information when checking packages, even for packages using cgo or vendored imports. +The reports should be more accurate as a result. +Note that only go vet has access to this information; +the more low-level go tool vet does not +and should be avoided except when working on vet itself. +(As of Go 1.9, go vet provides access to all the same flags as +go tool vet.) +

    + +

    Diagnostics

    + +

    +This release includes a new overview of available Go program diagnostic tools. +

    + +

    Gofmt

    + +

    +A few minor details of the default formatting of Go source code have changed. +First, some complex three-index slice expressions previously formatted like +x[i+1 : j:k] and now +format with more consistent spacing: x[i+1 : j : k]. +Second, single-method interface literals written on a single line, +which are sometimes used in type assertions, +are no longer split onto multiple lines. +Third, blank lines following an opening brace are now always elided. +

    + +

    +Note that these kinds of minor updates to gofmt are expected from time to time. +In general, we recommend against building systems that check that source code +matches the output of a specific version of gofmt. +For example, a continuous integration test that fails if any code already checked into +a repository is not “properly formatted” is inherently fragile and not recommended. +

    + +

    +If multiple programs must agree about which version of gofmt is used to format a source file, +we recommend that they do this by arranging to invoke the same gofmt binary. +For example, in the Go open source repository, we arrange for goimports and +our Git pre-commit hook to agree about source code formatting by having both +invoke the gofmt binary found in the current path. +TODO: Make goimports actually do that. #22695. +As another example, inside Google we arrange that source code presubmit +checks run a gofmt binary maintained at a fixed path in a shared, distributed file system; +that on engineering workstations /usr/bin/gofmt +is a symbolic link to that same path; +and that all editor integrations used for Google development +explicitly invoke /usr/bin/gofmt. +TODO: TMI? +

    + +

    Compiler Toolchain

    + +

    +The compiler includes many improvements to the performance of generated code, +spread fairly evenly across the supported architectures. +

    + +

    +TODO: What to say about DWARF work, if anything? +Global constants (CL 61019), variable decomposition (CL 50878), variable liveness and location lists (CL 41770), more? +What is enabled by default? +

    + +

    +TODO: What to say about FMA, if anything? +The spec change was mentioned in Go 1.9 but I am not sure whether any new architectures turned it on in Go 1.10. +

    + +

    +The various build modes +has been ported to more systems. +Specifically, c-shared now works on linux/ppc64le, windows/386, and windows/amd64; +pie now works on darwin/amd64 and also forces the use of external linking on all systems; +and plugin now works on linux/ppc64le. +

    + +

    +The linux/ppc64le port now requires the use of external linking +with any programs that use cgo, even uses by the standard library. +

    + +

    Assembler

    + +

    +For the ARM 32-bit port, the assembler now supports the instructions +BFC, +BFI, +BFX, +BFXU, +FMULAD, +FMULAF, +FMULSD, +FMULSF, +FNMULAD, +FNMULAF, +FNMULSD, +FNMULSF, +MULAD, +MULAF, +MULSD, +MULSF, +NMULAD, +NMULAF, +NMULD, +NMULF, +NMULSD, +NMULSF, +XTAB, +XTABU, +XTAH, +and +XTAHU. +

    + +

    +For the ARM 64-bit port, the assembler now supports the +VADD, +VADDP, +VADDV, +VAND, +VCMEQ, +VDUP, +VEOR, +VLD1, +VMOV, +VMOVI, +VMOVS, +VORR, +VREV32, +and +VST1 +instructions. +

    + +

    +For the PowerPC 64-bit port, the assembler now supports the POWER9 instructions +ADDEX, +CMPEQB, +COPY, +DARN, +LDMX, +MADDHD, +MADDHDU, +MADDLD, +MFVSRLD, +MTVSRDD, +MTVSRWS, +PASTECC, +VCMPNEZB, +VCMPNEZBCC, +and +VMSUMUDM. +

    + +

    +For the S390X port, the assembler now supports the +TMHH, +TMHL, +TMLH, +and +TMLL +instructions. +

    + +

    +For the X86 64-bit port, the assembler now supports 359 new instructions +and is believed to be complete up to and including the Intel AVX-256 extensions. +The assembler also no longer implements MOVL $0, AX +as an XORL instruction, +to avoid clearing the condition flags unexpectedly. +

    + +

    Gccgo

    + +

    +TODO: Words about GCC 8 and Go 1.10. +

    + +

    Runtime

    + +

    +TODO: Don't start new threads from locked threads or threads that Go did not create. LockOSThread/UnlockOSThread now nest. LockOSThread + return kills the thread +

    + +

    +Stack traces no longer include implicit wrapper functions (previously marked <autogenerated>), +unless a fault or panic happens in the wrapper itself. +

    + +

    +There is no longer a limit on the GOMAXPROCS setting. +(In Go 1.9 the limit was 1024.) +

    + +

    Performance

    + +

    +As always, the changes are so general and varied that precise +statements about performance are difficult to make. Most programs +should run a bit faster, due to speedups in the garbage collector, +better generated code, and optimizations in the core library. +

    + +

    Garbage Collector

    + +

    +TODO: Anything? +

    + +

    Core library

    + +

    +All of the changes to the standard library are minor. +The changes in bytes +and net/url are the most likely to require updating of existing programs. +

    + +

    Minor changes to the library

    + +

    +As always, there are various minor changes and updates to the library, +made with the Go 1 promise of compatibility +in mind. +

    + +
    archive/tar
    +
    +

    +In general, the handling of special header formats is significantly improved and expanded. +

    +

    +FileInfoHeader has always +recorded the Unix UID and GID numbers from its os.FileInfo argument +(specifically, from the system-dependent information returned by the FileInfo's Sys method) +in the returned Header. +Now it also records the user and group names corresponding to those IDs, +as well as the major and minor device numbers for device files. +

    +

    +Errors created by the package now begin with a consistent “tar:” prefix. +(Previously they almost all began with a consistent “archive/tar:” prefix.) +TODO: Why are we changing these? (#22740) +

    +

    +The new Header.Format field +of type Format +controls which tar header format the Writer uses. +The default, as before, is to select the most widely-supported header type +that can encoding the fields needed by the header (USTAR if possible, or else PAX if possible, or else GNU). +The Reader sets Header.Format for each header it reads. +

    +

    +Reader and the Writer now support PAX records, +using the new Header.PAXRecords field. +

    +

    +The Reader no longer insists that the file name or link name in GNU headers +be valid UTF-8. +

    +

    +When writing PAX- or GNU-format headers, the Writer now includes +the Header.AccessTime and Header.ChangeTime fields (if set). +When writing PAX-format headers, the times include sub-second precision. +

    +

    +The Writer.Flush method, +which has had no real effect since Go 1.1, is now marked deprecated. +

    +
    + +
    archive/zip
    +
    +

    +Go 1.10 adds more complete support for times and character set encodings in ZIP archives. +

    +

    +The original ZIP format used the standard MS-DOS encoding of year, month, day, hour, minute, and second into fields in two 16-bit values. +That encoding cannot represent time zones or odd seconds, so multiple extensions have been +introduced to allow richer encodings. +In Go 1.10, the Reader and Writer +now support the widely-understood Info-Zip extension that encodes the time separately in the 32-bit Unix “seconds since epoch” form. +The FileHeader's new Modified field of type time.Time +obsoletes the ModifiedTime and ModifiedDate fields, which continue to hold the MS-DOS encoding. +The ModTime and +SetModTime methods +now simply read and write the new Modified field. +The Reader and Writer now adopt the common +convention that ZIP archive storing the Unix time encoding store the local time +in the MS-DOS field, so that the time zone offset can be inferred. +TODO: These last bits are not true but probably should be (#22738) +

    +

    +The header for each file in a ZIP archive has a flag bit indicating whether +the name and comment fields are encoded as UTF-8, as opposed to a system-specific default encoding. +In Go 1.8 and earlier, the Writer never set the UTF-8 bit. +In Go 1.9, the Writer changed to set the UTF-8 bit almost always. +This broke the creation of ZIP archives containing Shift-JIS file names. +In Go 1.10, the Writer now sets the UTF-8 bit only when +both the name and the comment field are valid UTF-8 and at least one is non-ASCII. +Because non-ASCII encodings very rarely look like valid UTF-8, the new +heuristic should be correct nearly all the time. +Setting a FileHeader's new NonUTF8 field to true +disables the heuristic entirely for that file. +

    +

    +The Writer also now support setting the end-of-central-directory record's comment field, +by setting the Writer's new Comment field +before calling the Close method. +TODO: May change (#22737). +

    +
    + +
    bufio
    +
    +

    +The new Reader.Size +and Writer.Size +methods report the Reader or Writer's underlying buffer size. +

    +
    + +
    bytes
    +
    +

    +The +Fields, +FieldsFunc, +Split, +and +SplitAfter +each already returned slices pointing into the same underlying array as its input. +Go 1.10 changes each of the returned subslices to have capacity equal to its length, +so that appending to a subslice will not overwrite adjacent data in the original input. +

    +
    + +
    crypto/cipher
    +
    +

    +NewOFB now panics if given +an initialization vector of incorrect length, like the other constructors in the +package always have. +(Previously it returned a nil Stream implementation.) +

    +
    + +
    crypto/tls
    +
    +

    +The TLS server now advertises support for SHA-512 signatures when using TLS 1.2. +The server already supported the signatures, but some clients would not select +them unless explicitly advertised. +

    +
    + +
    crypto/x509
    +
    +

    +Leaf certificate validation now enforces the name constraints for all +names contained in the certificate, not just the one name that a client has asked about. +Extended key usage restrictions are similarly now checked all at once. +As a result, after a certificate has been validated, now it can be trusted in its entirety. +It is no longer necessary to revalidate the certificate for each additional name +or key usage. +TODO: Link to docs that may not exist yet. +

    + +

    +Parsed certificates also now report URI names and IP, email, and URI constraints, using the new +Certificate fields +URIs, PermittedIPRanges, ExcludedIPRanges, +PermittedEmailAddresses, ExcludedEmailAddresses, +PermittedURIDomains, and ExcludedURIDomains. +

    + +

    +The new MarshalPKCS8PrivateKey +function converts a private key to PKCS#8 encoded form. +

    +
    + +
    crypto/x509/pkix
    +
    +

    +Name now implements a +String method that +formats the X.509 distinguished name in the standard RFC 2253 format. +

    +
    + +
    database/sql/driver
    +
    +

    +Drivers that want to construct a sql.DB for +their clients can now implement the Connector interface +and call the new sql.OpenDB function, +instead of needing to encode all configuration into a string +passed to sql.Open. +

    +

    +Drivers that implement ExecerContext +no longer need to implement Execer; +similarly, drivers that implement QueryerContext +no longer need to implement Queryer. +Previously, even if the context-based interfaces were implemented they were ignored +unless the non-context-based interfaces were also implemented. +

    +

    +To allow drivers to better isolate different clients using a cached driver connection in succession, +if a Conn implements the new +SessionResetter interface, +database/sql will now call ResetSession before +reusing the Conn for a new client. +

    +
    + +
    debug/elf
    +
    +

    +This release adds 348 new relocation constants divided between the relocation types +R_386, +R_AARCH64, +R_ARM, +R_PPC64, +and +R_X86_64. +

    +
    + +
    debug/macho
    +
    +

    +Go 1.10 adds support for reading relocations from Mach-O sections, +using the Section struct's new Relocs field +and the newReloc, +RelocTypeARM, +RelocTypeARM64, +RelocTypeGeneric, +and +RelocTypeX86_64 +types and associated constants. +

    +

    +Go 1.10 also adds support for the LC_RPATH load command, +represented by the types +RpathCmd and +Rpath, +and new named constants +for the various flag bits found in headers. +

    +
    + +
    encoding/asn1
    +
    +

    +Marshal now correctly encodes +strings containing asterisks as type UTF8String instead of PrintableString, +unless the string is in a struct field with a tag forcing the use of PrintableString. +Marshal also now respects struct tags containing application directives. +

    +

    +Unmarshal now respects +struct field tags using the explicit and tag +directives. +

    +
    + +
    encoding/csv
    +
    +

    +Reader now disallows the use of +nonsensical Comma and Comment settings, +such as NUL, carriage return, newline, invalid runes, and the Unicode replacement character, +or setting Comma and Comment equal to each other. +

    +

    +In the case of a syntax error in a CSV record that spans multiple input lines, Reader +now reports the line on which the record started in the ParseError's new StartLine field. +

    +

    +Reader also no longer strips carriage return characters +appearing before newline characters in multiline quoted strings. +TODO: Maybe not (#22746). +

    +
    + +
    encoding/hex
    +
    +

    +The new functions +NewEncoder +and +NewDecoder +provide streaming conversions to and from hexadecimal, +analogous to equivalent functions already in +encoding/base32 +and +encoding/base64. +

    + +

    +When the functions +Decode +and +DecodeString +encounter malformed input, +they each now return the number of bytes already converted +along with the error. +Previously they always returned a count of 0 with any error. +

    +
    + +
    encoding/json
    +
    +

    +The Decoder +adds a new method +DisallowUnknownFields +that causes it to report inputs with unknown JSON fields as a decoding error. +(The default behavior has always been to discard unknown fields.) +

    +
    + +
    encoding/xml
    +
    +

    +The new function +NewTokenDecoder +is like +NewDecoder +but creates a decoder reading from a TokenReader +instead of an XML-formatted byte stream. +This is meant to enable the construction of XML stream transformers in client libraries. +

    +
    + +
    flag
    +
    +

    +The default +Usage function now prints +its first line of output to +CommandLine.Output() +instead of assuming os.Stderr, +so that the usage message is properly redirected for +clients using CommandLine.SetOutput. +

    +

    +PrintDefaults now +adds appropriate indentation after newlines in flag usage strings, +so that multi-line usage strings display nicely. +

    +

    +FlagSet adds new methods +ErrorHandling, +Name, +and +Output, +to retrieve the settings passed to +NewFlagSet +and +FlagSet.SetOutput. +

    +
    + +
    go/doc
    +
    +

    +To support the doc change described above, +functions returning slices of T, *T, **T, and so on +are now reported in T's Type's Funcs list, +instead of in the Package's Funcs list. +

    +
    + +
    go/importer
    +
    +

    +The For function now accepts a non-nil lookup argument. +

    +
    + +
    go/printer
    +
    +

    +The changes to the default formatting of Go source code +discussed in the gofmt section above +are implemented in the go/printer package +and also affect the output of the higher-level go/format package. +

    +
    + +
    hash
    +
    +

    +Implementations of the Hash interface are now +encouraged to implement encoding.BinaryMarshaler +and encoding.BinaryUnmarshaler +to allow saving and recreating their internal state, +and all implementations in the standard library +(hash/crc32, crypto/sha256, and so on) +now implement those interfaces. +

    +
    + +
    html/template
    +
    +

    +The new actions {{"{{break}}"}} and {{"{{continue}}"}} +break out of the innermost {{"{{range"}} ...}} loop, +like the corresponding Go statements. +

    +

    +TODO: something about the AddParseTree problem (#21844). +

    +
    + +
    math/big
    +
    +

    +Int now supports conversions to and from bases 2 through 62 +in its SetString and Text methods. +(Previously it only allowed bases 2 through 36.) +The value of the constant MaxBase has been updated. +

    +

    +Int adds a new +CmpAbs method +that is like Cmp but +compares only the absolute values (not the signs) of its arguments. +

    +

    +Float adds a new +Sqrt method to +compute square roots. +

    +
    + +
    math/rand
    +
    +

    +The new function and corresponding +Rand.Shuffle method +shuffle an input sequence. +

    +

    +The existing function and corresponding +Rand.Perm method +have been updated to use a more efficient algorithm, with the result +that the specific permutations they return have changed. +TODO: Remove? (#22744) +

    +
    + +
    math
    +
    +

    +The new functions +Round +and +RoundToEven +round their arguments to the nearest integer; +Round rounds a half-integer to its larger integer neighbor (away from zero) +while RoundToEven rounds a half-integer its even integer neighbor. +

    + +

    +The new functions +Erfinv +and +Erfcinv +compute the inverse error function and the +inverse complementary error function. +

    +
    + +
    mime/multipart
    +
    +

    +Reader +now accepts parts with empty filename attributes. +

    +
    + +
    mime
    +
    +

    +ParseMediaType now discards +invalid attribute values; previously it returned those values as empty strings. +

    +
    + +
    net
    +
    +

    +The Conn and +Listener implementations +in this package now guarantee that when Close returns, +the underlying file descriptor has been closed. +(In earlier releases, if the Close stopped pending I/O +in other goroutines, the closing of the file descriptor could happen in one of those +goroutines shortly after Close returned.) +

    + +

    +TCPListener and +UnixListener +now implement +syscall.Conn, +to allow setting options on the underlying file descriptor +using syscall.RawConn.Control. +

    + +

    +The Conn implementations returned by Pipe +now support setting read and write deadlines. +

    + +

    +The IPConn.ReadMsgIP, +IPConn.WriteMsgIP, +UDPConn.ReadMsgUDP, +and +UDPConn.WriteMsgUDP, +methods are now implemented on Windows. +

    +
    + +
    net/http
    +
    +

    +On the client side, an HTTP proxy (most commonly configured by +ProxyFromEnvironment) +can now be specified as an https:// URL, +meaning that the client connects to the proxy over HTTPS before issuing a standard, proxied HTTP request. +(Previously, HTTP proxy URLs were required to begin with http:// or socks5://.) +

    +

    +On the server side, FileServer and its single-file equivalent ServeFile +now apply If-Range checks to HEAD requests. +FileServer also now reports directory read failures to the Server's ErrorLog. +

    +

    +Redirect now sets the Content-Type header before writing its HTTP response. +

    +
    + +
    net/mail
    +
    +

    +ParseAddress and +ParseAddressList and +now support a variety of obsolete address formats. +

    +
    + +
    net/smtp
    +
    +

    +The Client adds a new +Noop method, +to test whether the server is still responding. +It also now defends against possible SMTP injection in the inputs +to the Hello +and Verify methods. +

    +
    + +
    net/textproto
    +
    +

    +ReadMIMEHeader +now discards continuation (indented) header lines that appear before the first actual (unindented) header line. +

    +
    + +
    net/url
    +
    +

    +ResolveReference +now preseves multiple leading slashes in the target URL. +Previously it rewrote multiple leading slashes to a single slash, +which resulted in the http.Client +following certain redirects incorrectly. +

    +

    +For example, this code's output has changed: +

    +
    +base, _ := url.Parse("http://host//path//to/page1")
    +target, _ := url.Parse("page2")
    +fmt.Println(base.ResolveReference(target))
    +
    +

    +Note the doubled slashes around path. +In Go 1.9 and earlier, the resolved URL was http://host/path//to/page2: +the doubled slash before path was incorrectly rewritten +to a single slash, while the doubled slash after path was +correctly preserved. +Go 1.10 preserves both doubled slashes, resolving to http://host//path//to/page2 +as required by RFC 3986. +

    + +

    This change may break existing buggy programs that unintentionally +construct a base URL with a leading doubled slash in the path and inadvertently +depend on ResolveReference to correct that mistake. +For example, this can happen if code adds a host prefix +like http://host/ to a path like /my/api, +resulting in a URL with a doubled slash: http://host//my/api. +

    +
    + +
    os
    +
    +

    +File adds new methods +SetDeadline, +SetReadDeadline, +and +SetWriteDeadline +that allow setting I/O deadlines when the +underlying file descriptor supports non-blocking I/O operations. +The definition of these methods matches those in net.Conn. +

    + +

    +Also matching net.Conn, +File's +Close method +now guarantee that when Close returns, +the underlying file descriptor has been closed. +(In earlier releases, like for net.Conn's, +if the Close stopped pending I/O +in other goroutines, the closing of the file descriptor could happen in one of those +goroutines shortly after Close returned.) +

    + +

    +On BSD, macOS, and Solaris systems, +Chtimes +now supports setting file times with nanosecond precision +(assuming the underlying file system can represent them). +

    +
    + +
    reflect
    +
    +

    +The Copy function now allows copying +from a string into a byte array or byte slice, to match the +built-in copy function. +

    +
    + +
    runtime/pprof
    +
    +

    +As noted above, the blocking and mutex profiles +now include symbol information so that they can be viewed without needing +the binary that generated them. +

    +
    + +
    strconv
    +
    +

    +ParseUint now returns +the maximum magnitude integer of the appropriate size +with any ErrRange error, as it was already documented to do. +Previously it returned 0 with ErrRange errors. +

    +
    + +
    strings
    +
    +

    +A new type +Builder is a replacement for +bytes.Buffer for the use case of +accumulating text into a string result. +The Builder's API is a restricted subset of bytes.Buffer's +that allows it to safely avoid making a duplicate copy of the data +during the String method. +

    +
    + +
    syscall
    +
    +

    +On Windows, +the new SysProcAttr field Token, +of type Token allows the creation of a process that +runs as another user during StartProcess +(and therefore also during os.StartProcess and +exec.Cmd.Start). +The new function CreateProcessAsUser +gives access to the underlying system call. +

    + +

    +On BSD, macOS, and Solaris systems, UtimesNano +is now implemented. +

    +
    + +
    text/template
    +
    +

    +The new actions {{"{{break}}"}} and {{"{{continue}}"}} +break out of the innermost {{"{{range"}} ...}} loop, +like the corresponding Go statements. +

    +
    + +
    time
    +
    +

    +LoadLocation now uses the directory +or uncompressed zip file named by the $ZONEINFO +environment variable before looking in the default system-specific list of +known installation locations or in $GOROOT/lib/time/zoneinfo.zip. +

    +

    +TODO: Maybe CL 68890. +

    +
    + +
    unicode
    +
    +

    +The unicode package and associated +support throughout the system has been upgraded from version 9.0 to +Unicode 10.0, +which adds 8,518 new characters, including four new scripts, one new property, +a Bitcoin currency symbol, and 56 new emoji. +

    +
    diff --git a/doc/go1.9.html b/doc/go1.9.html index 222e0e9ba0f..fa50ae78059 100644 --- a/doc/go1.9.html +++ b/doc/go1.9.html @@ -719,6 +719,11 @@ version of gccgo. header when matching handlers. The host is matched unmodified for CONNECT requests. +
  • + The new Server.ServeTLS method wraps + Server.Serve with added TLS support. +
  • +
  • Server.WriteTimeout now applies to HTTP/2 connections and is enforced per-stream. diff --git a/doc/go_faq.html b/doc/go_faq.html index f8322efcd32..62349fe5f7d 100644 --- a/doc/go_faq.html +++ b/doc/go_faq.html @@ -1476,6 +1476,53 @@ For more detail on this topic see the talk entitled, Concurrency is not Parallelism. +

    +Why is there no goroutine ID?

    + +

    +Goroutines do not have names; they are just anonymous workers. +They expose no unique identifier, name, or data structure to the programmer. +Some people are surprised by this, expecting the go +statement to return some item that can be used to access and control +the goroutine later. +

    + +

    +The fundamental reason goroutines are anonymous is so that +the full Go language is available when programming concurrent code. +By contrast, the usage patterns that develop when threads and goroutines are +named can restrict what a library using them can do. +

    + +

    +Here is an illustration of the difficulties. +Once one names a goroutine and constructs a model around +it, it becomes special, and one is tempted to associate all computation +with that goroutine, ignoring the possibility +of using multiple, possibly shared goroutines for the processing. +If the net/http package associated per-request +state with a goroutine, +clients would be unable to use more goroutines +when serving a request. +

    + +

    +Moreover, experience with libraries such as those for graphics systems +that require all processing to occur on the "main thread" +has shown how awkward and limiting the approach can be when +deployed in a concurrent language. +The very existence of a special thread or goroutine forces +the programmer to distort the program to avoid crashes +and other problems caused by inadvertently operating +on the wrong thread. +

    + +

    +For those cases where a particular goroutine is truly special, +the language provides features such as channels that can be +used in flexible ways to interact with it. +

    +

    Functions and Methods

    diff --git a/doc/go_spec.html b/doc/go_spec.html index 6642869d0c5..ebf1cefffea 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ @@ -16,8 +16,7 @@ Go is a general-purpose language designed with systems programming in mind. It is strongly typed and garbage-collected and has explicit support for concurrent programming. Programs are constructed from packages, whose properties allow efficient management of -dependencies. The existing implementations use a traditional -compile/link model to generate executable binaries. +dependencies.

    @@ -577,11 +576,7 @@ or conversion, or implicitly when used in a assignment or as an operand in an expression. It is an error if the constant value -cannot be represented as a value of the respective type. -For instance, 3.0 can be given any integer or any -floating-point type, while 2147483648.0 (equal to 1<<31) -can be given the types float32, float64, or uint32 but -not int32 or string. +cannot be represented as a value of the respective type.

    @@ -765,7 +760,8 @@ using a receiver of that type.

    A boolean type represents the set of Boolean truth values denoted by the predeclared constants true -and false. The predeclared boolean type is bool. +and false. The predeclared boolean type is bool; +it is a defined type.

    Numeric types

    @@ -812,8 +808,9 @@ uintptr an unsigned integer large enough to store the uninterpreted bits of a p

    -To avoid portability issues all numeric types are distinct except -byte, which is an alias for uint8, and +To avoid portability issues all numeric types are defined +types and thus distinct except +byte, which is an alias for uint8, and rune, which is an alias for int32. Conversions are required when different numeric types are mixed in an expression @@ -829,7 +826,8 @@ A string type represents the set of string values. A string value is a (possibly empty) sequence of bytes. Strings are immutable: once created, it is impossible to change the contents of a string. -The predeclared string type is string. +The predeclared string type is string; +it is a defined type.

    @@ -861,7 +859,8 @@ ElementType = Type .

    The length is part of the array's type; it must evaluate to a -non-negative constant representable by a value +non-negative constant +representable by a value of type int. The length of array a can be discovered using the built-in function len. @@ -1514,7 +1513,7 @@ are different because B0 is different from []string.

    A value x is assignable to a variable of type T -("x is assignable to T") in any of these cases: +("x is assignable to T") if one of the following conditions applies:

    +

    Representability

    + +

    +A constant x is representable +by a value of type T if one of the following conditions applies: +

    + + + +
    +x                   T           x is representable by a value of T because
    +
    +'a'                 byte        97 is in the set of byte values
    +97                  rune        rune is an alias for int32, and 97 is in the set of 32-bit integers
    +"foo"               string      "foo" is in the set of string values
    +1024                int16       1024 is in the set of 16-bit integers
    +42.0                byte        42 is in the set of unsigned 8-bit integers
    +1e10                uint64      10000000000 is in the set of unsigned 64-bit integers
    +2.718281828459045   float32     2.718281828459045 rounds to 2.7182817 which is in the set of float32 values
    +-1e-1000            float64     -1e-1000 rounds to IEEE -0.0 which is further simplified to 0.0
    +0i                  int         0 is an integer value
    +(42 + 0i)           float32     42.0 (with zero imaginary part) is in the set of float32 values
    +
    + +
    +x                   T           x is not representable by a value of T because
    +
    +0                   bool        0 is not in the set of boolean values
    +'a'                 string      'a' is a rune, it is not in the set of string values
    +1024                byte        1024 is not in the set of unsigned 8-bit integers
    +-1                  uint16      -1 is not in the set of unsigned 16-bit integers
    +1.1                 int         1.1 is not an integer value
    +42i                 float32     (0 + 42i) is not in the set of float32 values
    +1e1000              float64     1e1000 overflows to IEEE +Inf after rounding
    +
    + +

    Blocks

    @@ -1781,7 +1836,7 @@ const u, v float32 = 0, 3 // u = 0.0, v = 3.0

    Within a parenthesized const declaration list the -expression list may be omitted from any but the first declaration. +expression list may be omitted from any but the first ConstSpec. Such an empty list is equivalent to the textual substitution of the first preceding non-empty expression list and its type if any. Omitting the list of expressions is therefore equivalent to @@ -1810,52 +1865,51 @@ const (

    Within a constant declaration, the predeclared identifier iota represents successive untyped integer -constants. It is reset to 0 whenever the reserved word const -appears in the source and increments after each ConstSpec. +constants. Its value is the index of the respective ConstSpec +in that constant declaration, starting at zero. It can be used to construct a set of related constants:

    -const ( // iota is reset to 0
    +const (
     	c0 = iota  // c0 == 0
     	c1 = iota  // c1 == 1
     	c2 = iota  // c2 == 2
     )
     
    -const ( // iota is reset to 0
    -	a = 1 << iota  // a == 1
    -	b = 1 << iota  // b == 2
    -	c = 3          // c == 3  (iota is not used but still incremented)
    -	d = 1 << iota  // d == 8
    +const (
    +	a = 1 << iota  // a == 1  (iota == 0)
    +	b = 1 << iota  // b == 2  (iota == 1)
    +	c = 3          // c == 3  (iota == 2, unused)
    +	d = 1 << iota  // d == 8  (iota == 3)
     )
     
    -const ( // iota is reset to 0
    +const (
     	u         = iota * 42  // u == 0     (untyped integer constant)
     	v float64 = iota * 42  // v == 42.0  (float64 constant)
     	w         = iota * 42  // w == 84    (untyped integer constant)
     )
     
    -const x = iota  // x == 0  (iota has been reset)
    -const y = iota  // y == 0  (iota has been reset)
    +const x = iota  // x == 0
    +const y = iota  // y == 0
     

    -Within an ExpressionList, the value of each iota is the same because -it is only incremented after each ConstSpec: +By definition, multiple uses of iota in the same ConstSpec all have the same value:

     const (
    -	bit0, mask0 = 1 << iota, 1<<iota - 1  // bit0 == 1, mask0 == 0
    -	bit1, mask1                           // bit1 == 2, mask1 == 1
    -	_, _                                  // skips iota == 2
    -	bit3, mask3                           // bit3 == 8, mask3 == 7
    +	bit0, mask0 = 1 << iota, 1<<iota - 1  // bit0 == 1, mask0 == 0  (iota == 0)
    +	bit1, mask1                           // bit1 == 2, mask1 == 1  (iota == 1)
    +	_, _                                  //                        (iota == 2, unused)
    +	bit3, mask3                           // bit3 == 8, mask3 == 7  (iota == 3)
     )
     

    -This last example exploits the implicit repetition of the -last non-empty expression list. +This last example exploits the implicit repetition +of the last non-empty expression list.

    @@ -1946,7 +2000,7 @@ func (m *Mutex) Unlock() { /* Unlock implementation */ } // NewMutex has the same composition as Mutex but its method set is empty. type NewMutex Mutex -// The method set of the base type of PtrMutex remains unchanged, +// The method set of PtrMutex's underlying type *Mutex remains unchanged, // but the method set of PtrMutex is empty. type PtrMutex *Mutex @@ -2224,7 +2278,6 @@ non-blank identifier denoting a constant, variable, or function, -a method expression yielding a function, or a parenthesized expression.

    @@ -2234,7 +2287,7 @@ operand only on the left-hand side of an assignment.

    -Operand     = Literal | OperandName | MethodExpr | "(" Expression ")" .
    +Operand     = Literal | OperandName | "(" Expression ")" .
     Literal     = BasicLit | CompositeLit | FunctionLit .
     BasicLit    = int_lit | float_lit | imaginary_lit | rune_lit | string_lit .
     OperandName = identifier | QualifiedIdent.
    @@ -2348,7 +2401,8 @@ For array and slice literals the following rules apply:
     	    its position in the array.
     	
  • An element with a key uses the key as its index. The - key must be a non-negative constant representable by + key must be a non-negative constant + representable by a value of type int; and if it is typed it must be of integer type.
  • @@ -2499,6 +2553,7 @@ Primary expressions are the operands for unary and binary expressions. PrimaryExpr = Operand | Conversion | + MethodExpr | PrimaryExpr Selector | PrimaryExpr Index | PrimaryExpr Slice | @@ -2685,7 +2740,7 @@ argument that is the receiver of the method.
     MethodExpr    = ReceiverType "." MethodName .
    -ReceiverType  = TypeName | "(" "*" TypeName ")" | "(" ReceiverType ")" .
    +ReceiverType  = Type .
     

    @@ -2921,11 +2976,12 @@ The following rules apply: If a is not a map:

    @@ -3075,7 +3131,8 @@ For arrays or strings, the indices are in range if 0 <= low <= high <= len(a), otherwise they are out of range. For slices, the upper index bound is the slice capacity cap(a) rather than the length. -A constant index must be non-negative and representable by a value of type +A constant index must be non-negative and +representable by a value of type int; for arrays or constant strings, constant indices must also be in range. If both indices are constant, they must satisfy low <= high. If the indices are out of range at run time, a run-time panic occurs. @@ -3091,8 +3148,8 @@ and the result of the slice operation is a slice with the same element type as t

    If the sliced operand of a valid slice expression is a nil slice, the result -is a nil slice. Otherwise, the result shares its underlying array with the -operand. +is a nil slice. Otherwise, if the result is a slice, it shares its underlying +array with the operand.

    Full slice expressions

    @@ -3135,7 +3192,8 @@ If the sliced operand is an array, it must be addre

    The indices are in range if 0 <= low <= high <= max <= cap(a), otherwise they are out of range. -A constant index must be non-negative and representable by a value of type +A constant index must be non-negative and +representable by a value of type int; for arrays, constant indices must also be in range. If multiple indices are constant, the constants that are present must be in range relative to each other. @@ -3384,7 +3442,8 @@ to the type of the other operand.

    The right operand in a shift expression must have unsigned integer type -or be an untyped constant representable by a value of type uint. +or be an untyped constant representable by a +value of type uint. If the left operand of a non-constant shift expression is an untyped constant, it is first converted to the type it would assume if the shift expression were replaced by its left operand alone. @@ -3392,18 +3451,20 @@ replaced by its left operand alone.

     var s uint = 33
    -var i = 1<<s           // 1 has type int
    -var j int32 = 1<<s     // 1 has type int32; j == 0
    -var k = uint64(1<<s)   // 1 has type uint64; k == 1<<33
    -var m int = 1.0<<s     // 1.0 has type int; m == 0 if ints are 32bits in size
    -var n = 1.0<<s == j    // 1.0 has type int32; n == true
    -var o = 1<<s == 2<<s   // 1 and 2 have type int; o == true if ints are 32bits in size
    -var p = 1<<s == 1<<33  // illegal if ints are 32bits in size: 1 has type int, but 1<<33 overflows int
    -var u = 1.0<<s         // illegal: 1.0 has type float64, cannot shift
    -var u1 = 1.0<<s != 0   // illegal: 1.0 has type float64, cannot shift
    -var u2 = 1<<s != 1.0   // illegal: 1 has type float64, cannot shift
    -var v float32 = 1<<s   // illegal: 1 has type float32, cannot shift
    -var w int64 = 1.0<<33  // 1.0<<33 is a constant shift expression
    +var i = 1<<s                  // 1 has type int
    +var j int32 = 1<<s            // 1 has type int32; j == 0
    +var k = uint64(1<<s)          // 1 has type uint64; k == 1<<33
    +var m int = 1.0<<s            // 1.0 has type int; m == 0 if ints are 32bits in size
    +var n = 1.0<<s == j           // 1.0 has type int32; n == true
    +var o = 1<<s == 2<<s          // 1 and 2 have type int; o == true if ints are 32bits in size
    +var p = 1<<s == 1<<33         // illegal if ints are 32bits in size: 1 has type int, but 1<<33 overflows int
    +var u = 1.0<<s                // illegal: 1.0 has type float64, cannot shift
    +var u1 = 1.0<<s != 0          // illegal: 1.0 has type float64, cannot shift
    +var u2 = 1<<s != 1.0          // illegal: 1 has type float64, cannot shift
    +var v float32 = 1<<s          // illegal: 1 has type float32, cannot shift
    +var w int64 = 1.0<<33         // 1.0<<33 is a constant shift expression
    +var x = a[1.0<<s]             // 1.0 has type int; x == a[0] if ints are 32bits in size
    +var a = make([]byte, 1.0<<s)  // 1.0 has type int; len(a) == 0 if ints are 32bits in size
     
    @@ -3877,30 +3938,14 @@ func() int(x) // x is converted to func() int (unambiguous)

    A constant value x can be converted to -type T in any of these cases: +type T if x is representable +by a value of T. +As a special case, an integer constant x can be converted to a +string type using the +same rule +as for non-constant x.

    - -

    Converting a constant yields a typed constant as result.

    @@ -4187,7 +4232,8 @@ The divisor of a constant division or remainder operation must not be zero:

    -The values of typed constants must always be accurately representable as values +The values of typed constants must always be accurately +representable by values of the constant type. The following constant expressions are illegal:

    @@ -4820,8 +4866,9 @@ in the TypeSwitchGuard.

    -The type in a case may be nil; -that case is used when the expression in the TypeSwitchGuard +Instead of a type, a case may use the predeclared identifier +nil; +that case is selected when the expression in the TypeSwitchGuard is a nil interface value. There may be at most one nil case.

    @@ -4979,12 +5026,10 @@ the range clause is equivalent to the same clause without that identifier.

    -The range expression is evaluated once before beginning the loop, -with one exception: if the range expression is an array or a pointer to an array -and at most one iteration variable is present, only the range expression's -length is evaluated; if that length is constant, -by definition -the range expression itself will not be evaluated. +The range expression x is evaluated once before beginning the loop, +with one exception: if at most one iteration variable is present and +len(x) is constant, +the range expression is not evaluated.

    @@ -5680,9 +5725,10 @@ make(T, n) channel buffered channel of type T, buffer size n

    -The size arguments n and m must be of integer type or untyped. -A constant size argument must be non-negative and -representable by a value of type int. +Each of the size arguments n and m must be of integer type +or an untyped constant. +A constant size argument must be non-negative and representable +by a value of type int; if it is an untyped constant it is given type int. If both n and m are provided and are constant, then n must be no larger than m. If n is negative or larger than m at run time, @@ -6157,7 +6203,7 @@ of make, and no explicit initialization is provided, the variable or value is given a default value. Each element of such a variable or value is set to the zero value for its type: false for booleans, -0 for integers, 0.0 for floats, "" +0 for numeric types, "" for strings, and nil for pointers, functions, interfaces, slices, channels, and maps. This initialization is done recursively, so for instance each element of an array of structs will have its fields zeroed if no value is specified. @@ -6409,7 +6455,8 @@ type Error interface {

    Package unsafe

    -The built-in package unsafe, known to the compiler, +The built-in package unsafe, known to the compiler +and accessible through the import path "unsafe", provides facilities for low-level programming including operations that violate the type system. A package using unsafe must be vetted manually for type safety and may not be portable. diff --git a/doc/help.html b/doc/help.html index 057d75290ee..f668196871d 100644 --- a/doc/help.html +++ b/doc/help.html @@ -1,6 +1,7 @@

    @@ -9,6 +10,7 @@ +{{if not $.GoogleCN}}

    Go Nuts Mailing List

    Get help from Go users, and share your work on the official mailing list. @@ -31,10 +33,12 @@ forum for Go programmers.

    Go IRC Channel

    Get live support at #go-nuts on irc.freenode.net, the official Go IRC channel.

    +{{end}}

    Frequently Asked Questions (FAQ)

    Answers to common questions about Go.

    +{{if not $.GoogleCN}}

    Stay informed

    Go Announcements Mailing List

    @@ -64,6 +68,7 @@ for Go news and discussion. The Go Time podcast is a panel of Go experts and special guests discussing the Go programming language, the community, and everything in between.

    +{{end}}

    Community resources

    @@ -73,11 +78,13 @@ Each month in places around the world, groups of Go programmers ("gophers") meet to talk about Go. Find a chapter near you.

    +{{if not $.GoogleCN}}

    Go Playground

    A place to write, run, and share Go code.

    Go Wiki

    A wiki maintained by the Go community.

    +{{end}}

    Code of Conduct

    diff --git a/doc/install-source.html b/doc/install-source.html index d120f7d6f2b..17b6ed3ea12 100644 --- a/doc/install-source.html +++ b/doc/install-source.html @@ -143,12 +143,13 @@ packaged Go distribution.

    To build a bootstrap tool chain from source, use either the git branch release-branch.go1.4 or -go1.4-bootstrap-20170531.tar.gz, +go1.4-bootstrap-20171003.tar.gz, which contains the Go 1.4 source code plus accumulated fixes to keep the tools running on newer operating systems. (Go 1.4 was the last distribution in which the tool chain was written in C.) After unpacking the Go 1.4 source, cd to -the src subdirectory and run make.bash (or, +the src subdirectory, set CGO_ENABLED=0 in +the environment, and run make.bash (or, on Windows, make.bat).

    @@ -471,8 +472,9 @@ Choices for $GOARCH are amd64 (64-bit x86, the most mature port), 386 (32-bit x86), arm (32-bit ARM), arm64 (64-bit ARM), ppc64le (PowerPC 64-bit, little-endian), ppc64 (PowerPC 64-bit, big-endian), -mips64le (MIPS 64-bit, little-endian), and mips64 (MIPS 64-bit, big-endian). -mipsle (MIPS 32-bit, little-endian), and mips (MIPS 32-bit, big-endian). +mips64le (MIPS 64-bit, little-endian), mips64 (MIPS 64-bit, big-endian), +mipsle (MIPS 32-bit, little-endian), mips (MIPS 32-bit, big-endian), and +s390x (IBM System z 64-bit, big-endian). The valid combinations of $GOOS and $GOARCH are: @@ -536,6 +538,9 @@ The valid combinations of $GOOS and $GOARCH are: + + + diff --git a/doc/install.html b/doc/install.html index 7f32f68cd32..abf7fa6daed 100644 --- a/doc/install.html +++ b/doc/install.html @@ -16,7 +16,7 @@

    Official binary -distributions are available for the FreeBSD (release 8-STABLE and above), +distributions are available for the FreeBSD (release 10-STABLE and above), Linux, Mac OS X (10.8 and above), and Windows operating systems and the 32-bit (386) and 64-bit (amd64) x86 processor architectures. @@ -47,7 +47,7 @@ If your OS or architecture is not on the list, you may be able to

    - + diff --git a/doc/progs/run.go b/doc/progs/run.go index 8479a66b675..06ea130d999 100644 --- a/doc/progs/run.go +++ b/doc/progs/run.go @@ -219,12 +219,5 @@ func fixcgo() { // cgo1 and cgo2 don't run on netbsd, srandom has a different signature skipTest("cgo1") skipTest("cgo2") - // cgo3 and cgo4 don't run on netbsd, since cgo cannot handle stdout correctly, see issue #10715. - skipTest("cgo3") - skipTest("cgo4") - case "openbsd", "solaris": - // cgo3 and cgo4 don't run on openbsd and solaris, since cgo cannot handle stdout correctly, see issue #10715. - skipTest("cgo3") - skipTest("cgo4") } } diff --git a/misc/cgo/errors/errors_test.go b/misc/cgo/errors/errors_test.go new file mode 100644 index 00000000000..118187f23b8 --- /dev/null +++ b/misc/cgo/errors/errors_test.go @@ -0,0 +1,161 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errorstest + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +func path(file string) string { + return filepath.Join("src", file) +} + +func check(t *testing.T, file string) { + t.Run(file, func(t *testing.T) { + t.Parallel() + + contents, err := ioutil.ReadFile(path(file)) + if err != nil { + t.Fatal(err) + } + var errors []*regexp.Regexp + for i, line := range bytes.Split(contents, []byte("\n")) { + if bytes.HasSuffix(line, []byte("ERROR HERE")) { + re := regexp.MustCompile(regexp.QuoteMeta(fmt.Sprintf("%s:%d:", file, i+1))) + errors = append(errors, re) + continue + } + + frags := bytes.SplitAfterN(line, []byte("ERROR HERE: "), 2) + if len(frags) == 1 { + continue + } + re, err := regexp.Compile(string(frags[1])) + if err != nil { + t.Errorf("Invalid regexp after `ERROR HERE: `: %#q", frags[1]) + continue + } + errors = append(errors, re) + } + if len(errors) == 0 { + t.Fatalf("cannot find ERROR HERE") + } + expect(t, file, errors) + }) +} + +func expect(t *testing.T, file string, errors []*regexp.Regexp) { + dir, err := ioutil.TempDir("", filepath.Base(t.Name())) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + dst := filepath.Join(dir, strings.TrimSuffix(file, ".go")) + cmd := exec.Command("go", "build", "-gcflags=-L", "-o="+dst, path(file)) // TODO(gri) no need for -gcflags=-L if go tool is adjusted + out, err := cmd.CombinedOutput() + if err == nil { + t.Errorf("expected cgo to fail but it succeeded") + } + + lines := bytes.Split(out, []byte("\n")) + for _, re := range errors { + found := false + for _, line := range lines { + if re.Match(line) { + t.Logf("found match for %#q: %q", re, line) + found = true + break + } + } + if !found { + t.Errorf("expected error output to contain %#q", re) + } + } + + if t.Failed() { + t.Logf("actual output:\n%s", out) + } +} + +func sizeofLongDouble(t *testing.T) int { + cmd := exec.Command("go", "run", path("long_double_size.go")) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%#q: %v:\n%s", strings.Join(cmd.Args, " "), err, out) + } + + i, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + t.Fatalf("long_double_size.go printed invalid size: %s", out) + } + return i +} + +func TestReportsTypeErrors(t *testing.T) { + for _, file := range []string{ + "err1.go", + "err2.go", + "err3.go", + "issue7757.go", + "issue8442.go", + "issue11097a.go", + "issue11097b.go", + "issue13129.go", + "issue13423.go", + "issue13467.go", + "issue13635.go", + "issue13830.go", + "issue16116.go", + "issue16591.go", + "issue18452.go", + "issue18889.go", + } { + check(t, file) + } + + if sizeofLongDouble(t) > 8 { + check(t, "err4.go") + } +} + +func TestToleratesOptimizationFlag(t *testing.T) { + for _, cflags := range []string{ + "", + "-O", + } { + cflags := cflags + t.Run(cflags, func(t *testing.T) { + t.Parallel() + + cmd := exec.Command("go", "build", path("issue14669.go")) + cmd.Env = append(os.Environ(), "CGO_CFLAGS="+cflags) + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("%#q: %v:\n%s", strings.Join(cmd.Args, " "), err, out) + } + }) + } +} + +func TestMallocCrashesOnNil(t *testing.T) { + t.Parallel() + + cmd := exec.Command("go", "run", path("malloc.go")) + out, err := cmd.CombinedOutput() + if err == nil { + t.Logf("%#q:\n%s", strings.Join(cmd.Args, " "), out) + t.Fatalf("succeeded unexpectedly") + } +} diff --git a/misc/cgo/errors/issue13635.go b/misc/cgo/errors/issue13635.go deleted file mode 100644 index 0ce2b1e83a1..00000000000 --- a/misc/cgo/errors/issue13635.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// issue 13635: used to output error about C.unsignedchar. -// This test tests all such types. - -package pkg - -import "C" - -func main() { - var ( - _ C.uchar = "uc" // ERROR HERE - _ C.schar = "sc" // ERROR HERE - _ C.ushort = "us" // ERROR HERE - _ C.uint = "ui" // ERROR HERE - _ C.ulong = "ul" // ERROR HERE - _ C.longlong = "ll" // ERROR HERE - _ C.ulonglong = "ull" // ERROR HERE - _ C.complexfloat = "cf" // ERROR HERE - _ C.complexdouble = "cd" // ERROR HERE - ) -} diff --git a/misc/cgo/errors/ptr.go b/misc/cgo/errors/ptr_test.go similarity index 79% rename from misc/cgo/errors/ptr.go rename to misc/cgo/errors/ptr_test.go index 3e117666bff..d295a5849db 100644 --- a/misc/cgo/errors/ptr.go +++ b/misc/cgo/errors/ptr_test.go @@ -4,20 +4,18 @@ // Tests that cgo detects invalid pointer passing at runtime. -package main +package errorstest import ( "bufio" "bytes" "fmt" - "io" "io/ioutil" "os" "os/exec" "path/filepath" - "runtime" "strings" - "sync" + "testing" ) // ptrTest is the tests without the boilerplate. @@ -344,7 +342,7 @@ var ptrTests = []ptrTest{ fail: false, }, { - // Issue #21306. + // Test preemption while entering a cgo call. Issue #21306. name: "preempt-during-call", c: `void f() {}`, imports: []string{"runtime", "sync"}, @@ -353,219 +351,145 @@ var ptrTests = []ptrTest{ }, } -func main() { - os.Exit(doTests()) +func TestPointerChecks(t *testing.T) { + for _, pt := range ptrTests { + pt := pt + t.Run(pt.name, func(t *testing.T) { + testOne(t, pt) + }) + } } -func doTests() int { - gopath, err := ioutil.TempDir("", "cgoerrors") +func testOne(t *testing.T, pt ptrTest) { + t.Parallel() + + gopath, err := ioutil.TempDir("", filepath.Base(t.Name())) if err != nil { - fmt.Fprintln(os.Stderr, err) - return 2 + t.Fatal(err) } defer os.RemoveAll(gopath) - if err := os.MkdirAll(filepath.Join(gopath, "src"), 0777); err != nil { - fmt.Fprintln(os.Stderr, err) - return 2 + src := filepath.Join(gopath, "src") + if err := os.Mkdir(src, 0777); err != nil { + t.Fatal(err) } - workers := runtime.NumCPU() + 1 - - var wg sync.WaitGroup - c := make(chan int) - errs := make(chan int) - for i := 0; i < workers; i++ { - wg.Add(1) - go func() { - worker(gopath, c, errs) - wg.Done() - }() - } - - for i := range ptrTests { - c <- i - } - close(c) - - go func() { - wg.Wait() - close(errs) - }() - - tot := 0 - for e := range errs { - tot += e - } - return tot -} - -func worker(gopath string, c, errs chan int) { - e := 0 - for i := range c { - if !doOne(gopath, i) { - e++ - } - } - if e > 0 { - errs <- e - } -} - -func doOne(gopath string, i int) bool { - t := &ptrTests[i] - - dir := filepath.Join(gopath, "src", fmt.Sprintf("dir%d", i)) - if err := os.Mkdir(dir, 0777); err != nil { - fmt.Fprintln(os.Stderr, err) - return false - } - - name := filepath.Join(dir, fmt.Sprintf("t%d.go", i)) + name := filepath.Join(src, fmt.Sprintf("%s.go", filepath.Base(t.Name()))) f, err := os.Create(name) if err != nil { - fmt.Fprintln(os.Stderr, err) - return false + t.Fatal(err) } b := bufio.NewWriter(f) fmt.Fprintln(b, `package main`) fmt.Fprintln(b) fmt.Fprintln(b, `/*`) - fmt.Fprintln(b, t.c) + fmt.Fprintln(b, pt.c) fmt.Fprintln(b, `*/`) fmt.Fprintln(b, `import "C"`) fmt.Fprintln(b) - for _, imp := range t.imports { + for _, imp := range pt.imports { fmt.Fprintln(b, `import "`+imp+`"`) } - if len(t.imports) > 0 { + if len(pt.imports) > 0 { fmt.Fprintln(b) } - if len(t.support) > 0 { - fmt.Fprintln(b, t.support) + if len(pt.support) > 0 { + fmt.Fprintln(b, pt.support) fmt.Fprintln(b) } fmt.Fprintln(b, `func main() {`) - fmt.Fprintln(b, t.body) + fmt.Fprintln(b, pt.body) fmt.Fprintln(b, `}`) if err := b.Flush(); err != nil { - fmt.Fprintf(os.Stderr, "flushing %s: %v\n", name, err) - return false + t.Fatalf("flushing %s: %v", name, err) } if err := f.Close(); err != nil { - fmt.Fprintf(os.Stderr, "closing %s: %v\n", name, err) - return false + t.Fatalf("closing %s: %v", name, err) } - for _, e := range t.extra { - if err := ioutil.WriteFile(filepath.Join(dir, e.name), []byte(e.contents), 0644); err != nil { - fmt.Fprintf(os.Stderr, "writing %s: %v\n", e.name, err) - return false + for _, e := range pt.extra { + if err := ioutil.WriteFile(filepath.Join(src, e.name), []byte(e.contents), 0644); err != nil { + t.Fatalf("writing %s: %v", e.name, err) } } - ok := true + args := func(cmd *exec.Cmd) string { + return strings.Join(cmd.Args, " ") + } cmd := exec.Command("go", "build") - cmd.Dir = dir + cmd.Dir = src cmd.Env = addEnv("GOPATH", gopath) buf, err := cmd.CombinedOutput() if err != nil { - fmt.Fprintf(os.Stderr, "test %s failed to build: %v\n%s", t.name, err, buf) - return false + t.Logf("%#q:\n%s", args(cmd), buf) + t.Fatalf("failed to build: %v", err) } - exe := filepath.Join(dir, filepath.Base(dir)) + exe := filepath.Join(src, filepath.Base(src)) cmd = exec.Command(exe) - cmd.Dir = dir + cmd.Dir = src - if t.expensive { + if pt.expensive { cmd.Env = cgocheckEnv("1") buf, err := cmd.CombinedOutput() if err != nil { - var errbuf bytes.Buffer - if t.fail { - fmt.Fprintf(&errbuf, "test %s marked expensive but failed when not expensive: %v\n", t.name, err) + t.Logf("%#q:\n%s", args(cmd), buf) + if pt.fail { + t.Fatalf("test marked expensive, but failed when not expensive: %v", err) } else { - fmt.Fprintf(&errbuf, "test %s failed unexpectedly with GODEBUG=cgocheck=1: %v\n", t.name, err) + t.Errorf("failed unexpectedly with GODEBUG=cgocheck=1: %v", err) } - reportTestOutput(&errbuf, t.name, buf) - os.Stderr.Write(errbuf.Bytes()) - ok = false } cmd = exec.Command(exe) - cmd.Dir = dir + cmd.Dir = src } - if t.expensive { + if pt.expensive { cmd.Env = cgocheckEnv("2") } buf, err = cmd.CombinedOutput() - - if t.fail { + if pt.fail { if err == nil { - var errbuf bytes.Buffer - fmt.Fprintf(&errbuf, "test %s did not fail as expected\n", t.name) - reportTestOutput(&errbuf, t.name, buf) - os.Stderr.Write(errbuf.Bytes()) - ok = false + t.Logf("%#q:\n%s", args(cmd), buf) + t.Fatalf("did not fail as expected") } else if !bytes.Contains(buf, []byte("Go pointer")) { - var errbuf bytes.Buffer - fmt.Fprintf(&errbuf, "test %s output does not contain expected error (failed with %v)\n", t.name, err) - reportTestOutput(&errbuf, t.name, buf) - os.Stderr.Write(errbuf.Bytes()) - ok = false + t.Logf("%#q:\n%s", args(cmd), buf) + t.Fatalf("did not print expected error (failed with %v)", err) } } else { if err != nil { - var errbuf bytes.Buffer - fmt.Fprintf(&errbuf, "test %s failed unexpectedly: %v\n", t.name, err) - reportTestOutput(&errbuf, t.name, buf) - os.Stderr.Write(errbuf.Bytes()) - ok = false + t.Logf("%#q:\n%s", args(cmd), buf) + t.Fatalf("failed unexpectedly: %v", err) } - if !t.expensive && ok { + if !pt.expensive { // Make sure it passes with the expensive checks. cmd := exec.Command(exe) - cmd.Dir = dir + cmd.Dir = src cmd.Env = cgocheckEnv("2") buf, err := cmd.CombinedOutput() if err != nil { - var errbuf bytes.Buffer - fmt.Fprintf(&errbuf, "test %s failed unexpectedly with expensive checks: %v\n", t.name, err) - reportTestOutput(&errbuf, t.name, buf) - os.Stderr.Write(errbuf.Bytes()) - ok = false + t.Logf("%#q:\n%s", args(cmd), buf) + t.Fatalf("failed unexpectedly with expensive checks: %v", err) } } } - if t.fail && ok { + if pt.fail { cmd = exec.Command(exe) - cmd.Dir = dir + cmd.Dir = src cmd.Env = cgocheckEnv("0") buf, err := cmd.CombinedOutput() if err != nil { - var errbuf bytes.Buffer - fmt.Fprintf(&errbuf, "test %s failed unexpectedly with GODEBUG=cgocheck=0: %v\n", t.name, err) - reportTestOutput(&errbuf, t.name, buf) - os.Stderr.Write(errbuf.Bytes()) - ok = false + t.Logf("%#q:\n%s", args(cmd), buf) + t.Fatalf("failed unexpectedly with GODEBUG=cgocheck=0: %v", err) } } - - return ok -} - -func reportTestOutput(w io.Writer, name string, buf []byte) { - fmt.Fprintf(w, "=== test %s output ===\n", name) - fmt.Fprintf(w, "%s", buf) - fmt.Fprintf(w, "=== end of test %s output ===\n", name) } func cgocheckEnv(val string) []string { diff --git a/misc/cgo/errors/err1.go b/misc/cgo/errors/src/err1.go similarity index 100% rename from misc/cgo/errors/err1.go rename to misc/cgo/errors/src/err1.go diff --git a/misc/cgo/errors/err2.go b/misc/cgo/errors/src/err2.go similarity index 100% rename from misc/cgo/errors/err2.go rename to misc/cgo/errors/src/err2.go diff --git a/misc/cgo/errors/err3.go b/misc/cgo/errors/src/err3.go similarity index 100% rename from misc/cgo/errors/err3.go rename to misc/cgo/errors/src/err3.go diff --git a/misc/cgo/errors/src/err4.go b/misc/cgo/errors/src/err4.go new file mode 100644 index 00000000000..8e5f78e987b --- /dev/null +++ b/misc/cgo/errors/src/err4.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +long double x = 0; +*/ +import "C" + +func main() { + _ = C.x // ERROR HERE + _ = C.x +} diff --git a/misc/cgo/errors/issue11097a.go b/misc/cgo/errors/src/issue11097a.go similarity index 100% rename from misc/cgo/errors/issue11097a.go rename to misc/cgo/errors/src/issue11097a.go diff --git a/misc/cgo/errors/issue11097b.go b/misc/cgo/errors/src/issue11097b.go similarity index 100% rename from misc/cgo/errors/issue11097b.go rename to misc/cgo/errors/src/issue11097b.go diff --git a/misc/cgo/errors/issue13129.go b/misc/cgo/errors/src/issue13129.go similarity index 88% rename from misc/cgo/errors/issue13129.go rename to misc/cgo/errors/src/issue13129.go index f7ad7a7e149..057bce4b829 100644 --- a/misc/cgo/errors/issue13129.go +++ b/misc/cgo/errors/src/issue13129.go @@ -10,5 +10,5 @@ import "C" func main() { var x C.ushort - x = int(0) // ERROR HERE + x = int(0) // ERROR HERE: C\.ushort } diff --git a/misc/cgo/errors/issue13423.go b/misc/cgo/errors/src/issue13423.go similarity index 100% rename from misc/cgo/errors/issue13423.go rename to misc/cgo/errors/src/issue13423.go diff --git a/misc/cgo/errors/src/issue13467.go b/misc/cgo/errors/src/issue13467.go new file mode 100644 index 00000000000..e061880ddab --- /dev/null +++ b/misc/cgo/errors/src/issue13467.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +/* +static int transform(int x) { return x; } +*/ +import "C" + +func F() { + var x rune = '✈' + var _ rune = C.transform(x) // ERROR HERE: C\.int +} diff --git a/misc/cgo/errors/src/issue13635.go b/misc/cgo/errors/src/issue13635.go new file mode 100644 index 00000000000..3f38f5df4b5 --- /dev/null +++ b/misc/cgo/errors/src/issue13635.go @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// issue 13635: used to output error about C.unsignedchar. +// This test tests all such types. + +package pkg + +import "C" + +func main() { + var ( + _ C.uchar = "uc" // ERROR HERE: C\.uchar + _ C.schar = "sc" // ERROR HERE: C\.schar + _ C.ushort = "us" // ERROR HERE: C\.ushort + _ C.uint = "ui" // ERROR HERE: C\.uint + _ C.ulong = "ul" // ERROR HERE: C\.ulong + _ C.longlong = "ll" // ERROR HERE: C\.longlong + _ C.ulonglong = "ull" // ERROR HERE: C\.ulonglong + _ C.complexfloat = "cf" // ERROR HERE: C\.complexfloat + _ C.complexdouble = "cd" // ERROR HERE: C\.complexdouble + ) +} diff --git a/misc/cgo/errors/issue13830.go b/misc/cgo/errors/src/issue13830.go similarity index 100% rename from misc/cgo/errors/issue13830.go rename to misc/cgo/errors/src/issue13830.go diff --git a/misc/cgo/errors/issue14669.go b/misc/cgo/errors/src/issue14669.go similarity index 100% rename from misc/cgo/errors/issue14669.go rename to misc/cgo/errors/src/issue14669.go diff --git a/misc/cgo/errors/issue16116.go b/misc/cgo/errors/src/issue16116.go similarity index 100% rename from misc/cgo/errors/issue16116.go rename to misc/cgo/errors/src/issue16116.go diff --git a/misc/cgo/errors/issue16591.go b/misc/cgo/errors/src/issue16591.go similarity index 100% rename from misc/cgo/errors/issue16591.go rename to misc/cgo/errors/src/issue16591.go diff --git a/misc/cgo/errors/issue18452.go b/misc/cgo/errors/src/issue18452.go similarity index 75% rename from misc/cgo/errors/issue18452.go rename to misc/cgo/errors/src/issue18452.go index 36ef7f54e12..0386d768927 100644 --- a/misc/cgo/errors/issue18452.go +++ b/misc/cgo/errors/src/issue18452.go @@ -13,6 +13,6 @@ import ( func a() { fmt.Println("Hello, world!") - C.function_that_does_not_exist() // line 16 - C.pi // line 17 + C.function_that_does_not_exist() // ERROR HERE + C.pi // ERROR HERE } diff --git a/misc/cgo/errors/issue18889.go b/misc/cgo/errors/src/issue18889.go similarity index 100% rename from misc/cgo/errors/issue18889.go rename to misc/cgo/errors/src/issue18889.go diff --git a/misc/cgo/errors/issue7757.go b/misc/cgo/errors/src/issue7757.go similarity index 100% rename from misc/cgo/errors/issue7757.go rename to misc/cgo/errors/src/issue7757.go diff --git a/misc/cgo/errors/issue8442.go b/misc/cgo/errors/src/issue8442.go similarity index 100% rename from misc/cgo/errors/issue8442.go rename to misc/cgo/errors/src/issue8442.go diff --git a/misc/cgo/errors/src/long_double_size.go b/misc/cgo/errors/src/long_double_size.go new file mode 100644 index 00000000000..8b797f886ae --- /dev/null +++ b/misc/cgo/errors/src/long_double_size.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +const int sizeofLongDouble = sizeof(long double); +*/ +import "C" + +import "fmt" + +func main() { + fmt.Println(C.sizeofLongDouble) +} diff --git a/misc/cgo/errors/malloc.go b/misc/cgo/errors/src/malloc.go similarity index 100% rename from misc/cgo/errors/malloc.go rename to misc/cgo/errors/src/malloc.go diff --git a/misc/cgo/errors/test.bash b/misc/cgo/errors/test.bash deleted file mode 100755 index ed0b0946925..00000000000 --- a/misc/cgo/errors/test.bash +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2013 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -check() { - file=$1 - line=$(grep -n 'ERROR HERE' $file | sed 's/:.*//') - if [ "$line" = "" ]; then - echo 1>&2 misc/cgo/errors/test.bash: BUG: cannot find ERROR HERE in $file - exit 1 - fi - expect $file $file:$line: -} - -expect() { - file=$1 - shift - if go build -gcflags=-C $file >errs 2>&1; then - echo 1>&2 misc/cgo/errors/test.bash: BUG: expected cgo to fail on $file but it succeeded - exit 1 - fi - if ! test -s errs; then - echo 1>&2 misc/cgo/errors/test.bash: BUG: expected error output for $file but saw none - exit 1 - fi - for error; do - if ! fgrep $error errs >/dev/null 2>&1; then - echo 1>&2 misc/cgo/errors/test.bash: BUG: expected error output for $file to contain \"$error\" but saw: - cat 1>&2 errs - exit 1 - fi - done -} - -check err1.go -check err2.go -check err3.go -check issue7757.go -check issue8442.go -check issue11097a.go -check issue11097b.go -expect issue13129.go C.ushort -check issue13423.go -expect issue13635.go C.uchar C.schar C.ushort C.uint C.ulong C.longlong C.ulonglong C.complexfloat C.complexdouble -check issue13830.go -check issue16116.go -check issue16591.go -check issue18889.go -expect issue18452.go issue18452.go:16 issue18452.go:17 - -if ! go build issue14669.go; then - exit 1 -fi -if ! CGO_CFLAGS="-O" go build issue14669.go; then - exit 1 -fi - -if ! go run ptr.go; then - exit 1 -fi - -# The malloc.go test should crash. -rm -f malloc.out -if go run malloc.go >malloc.out 2>&1; then - echo '`go run malloc.go` succeeded unexpectedly' - cat malloc.out - rm -f malloc.out - exit 1 -fi -rm -f malloc.out - -rm -rf errs _obj -exit 0 diff --git a/misc/cgo/life/main.go b/misc/cgo/life/main.go index aa2f6d116b3..45376fd05a9 100644 --- a/misc/cgo/life/main.go +++ b/misc/cgo/life/main.go @@ -1,4 +1,4 @@ -// cmpout +// cmpout -tags=use_go_run // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -11,9 +11,10 @@ package main import ( - "." "flag" "fmt" + + "." ) const MAXDIM = 100 diff --git a/misc/cgo/stdio/chain.go b/misc/cgo/stdio/chain.go index 03cddb76888..0fa813cab70 100644 --- a/misc/cgo/stdio/chain.go +++ b/misc/cgo/stdio/chain.go @@ -1,4 +1,4 @@ -// cmpout +// cmpout -tags=use_go_run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/misc/cgo/stdio/fib.go b/misc/cgo/stdio/fib.go index 61a1b83728c..56e32552ee6 100644 --- a/misc/cgo/stdio/fib.go +++ b/misc/cgo/stdio/fib.go @@ -1,4 +1,4 @@ -// cmpout +// cmpout -tags=use_go_run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/misc/cgo/stdio/hello.go b/misc/cgo/stdio/hello.go index 47179ba4827..63bff4c617a 100644 --- a/misc/cgo/stdio/hello.go +++ b/misc/cgo/stdio/hello.go @@ -1,4 +1,4 @@ -// cmpout +// cmpout -tags=use_go_run // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go index 9485e25bf47..67abfff2c03 100644 --- a/misc/cgo/test/cgo_test.go +++ b/misc/cgo/test/cgo_test.go @@ -80,6 +80,11 @@ func Test20369(t *testing.T) { test20369(t) } func Test18720(t *testing.T) { test18720(t) } func Test20266(t *testing.T) { test20266(t) } func Test20129(t *testing.T) { test20129(t) } +func Test20910(t *testing.T) { test20910(t) } func Test21708(t *testing.T) { test21708(t) } +func Test21809(t *testing.T) { test21809(t) } +func Test6907(t *testing.T) { test6907(t) } +func Test6907Go(t *testing.T) { test6907Go(t) } +func Test21897(t *testing.T) { test21897(t) } func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) } diff --git a/misc/cgo/test/issue18720.go b/misc/cgo/test/issue18720.go index a93304498e0..3d64003be74 100644 --- a/misc/cgo/test/issue18720.go +++ b/misc/cgo/test/issue18720.go @@ -12,13 +12,39 @@ package cgotest struct foo { char c; }; #define SIZE_OF(x) sizeof(x) #define SIZE_OF_FOO SIZE_OF(struct foo) +#define VAR1 VAR +#define VAR var +int var = 5; + +#define ADDR &var + +#define CALL fn() +int fn(void) { + return ++var; +} */ import "C" import "testing" func test18720(t *testing.T) { - if C.HELLO_WORLD != "hello\000world" { - t.Fatalf(`expected "hello\000world", but got %q`, C.HELLO_WORLD) + if got, want := C.HELLO_WORLD, "hello\000world"; got != want { + t.Errorf("C.HELLO_WORLD == %q, expected %q", got, want) + } + + if got, want := C.VAR1, C.int(5); got != want { + t.Errorf("C.VAR1 == %v, expected %v", got, want) + } + + if got, want := *C.ADDR, C.int(5); got != want { + t.Errorf("*C.ADDR == %v, expected %v", got, want) + } + + if got, want := C.CALL, C.int(6); got != want { + t.Errorf("C.CALL == %v, expected %v", got, want) + } + + if got, want := C.CALL, C.int(7); got != want { + t.Errorf("C.CALL == %v, expected %v", got, want) } // Issue 20125. diff --git a/misc/cgo/test/issue19832.go b/misc/cgo/test/issue19832.go new file mode 100644 index 00000000000..44587770af4 --- /dev/null +++ b/misc/cgo/test/issue19832.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Issue 19832. Functions taking a pointer typedef were being expanded and triggering a compiler error. + +package cgotest + +// typedef struct { int i; } *PS; +// void T19832(PS p) {} +import "C" +import "testing" + +func test19832(t *testing.T) { + C.T19832(nil) +} diff --git a/misc/cgo/test/issue20910.c b/misc/cgo/test/issue20910.c new file mode 100644 index 00000000000..e8d623fc983 --- /dev/null +++ b/misc/cgo/test/issue20910.c @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include +#include "_cgo_export.h" + +/* Test calling a Go function with multiple return values. */ + +void +callMulti(void) +{ + struct multi_return result = multi(); + assert(strcmp(result.r0, "multi") == 0); + assert(result.r1 == 0); + free(result.r0); +} diff --git a/misc/cgo/test/issue20910.go b/misc/cgo/test/issue20910.go new file mode 100644 index 00000000000..69d7d9249ac --- /dev/null +++ b/misc/cgo/test/issue20910.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +//void callMulti(void); +import "C" + +import "testing" + +//export multi +func multi() (*C.char, C.int) { + return C.CString("multi"), 0 +} + +func test20910(t *testing.T) { + C.callMulti() +} diff --git a/misc/cgo/test/issue21809.go b/misc/cgo/test/issue21809.go new file mode 100644 index 00000000000..a3a6b88897e --- /dev/null +++ b/misc/cgo/test/issue21809.go @@ -0,0 +1,45 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// Issue 21809. Compile C `typedef` to go type aliases. + +// typedef long MySigned_t; +// /* tests alias-to-alias */ +// typedef MySigned_t MySigned2_t; +// +// long takes_long(long x) { return x * x; } +// MySigned_t takes_typedef(MySigned_t x) { return x * x; } +import "C" + +import "testing" + +func test21809(t *testing.T) { + longVar := C.long(3) + typedefVar := C.MySigned_t(4) + typedefTypedefVar := C.MySigned2_t(5) + + // all three should be considered identical to `long` + if ret := C.takes_long(longVar); ret != 9 { + t.Errorf("got %v but expected %v", ret, 9) + } + if ret := C.takes_long(typedefVar); ret != 16 { + t.Errorf("got %v but expected %v", ret, 16) + } + if ret := C.takes_long(typedefTypedefVar); ret != 25 { + t.Errorf("got %v but expected %v", ret, 25) + } + + // They should also be identical to the typedef'd type + if ret := C.takes_typedef(longVar); ret != 9 { + t.Errorf("got %v but expected %v", ret, 9) + } + if ret := C.takes_typedef(typedefVar); ret != 16 { + t.Errorf("got %v but expected %v", ret, 16) + } + if ret := C.takes_typedef(typedefTypedefVar); ret != 25 { + t.Errorf("got %v but expected %v", ret, 25) + } +} diff --git a/misc/cgo/test/issue21897.go b/misc/cgo/test/issue21897.go new file mode 100644 index 00000000000..d13246bd84a --- /dev/null +++ b/misc/cgo/test/issue21897.go @@ -0,0 +1,56 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,cgo,!internal + +package cgotest + +/* +#cgo LDFLAGS: -framework CoreFoundation +#include +*/ +import "C" +import ( + "runtime/debug" + "testing" + "unsafe" +) + +func test21897(t *testing.T) { + // Please write barrier, kick in soon. + defer debug.SetGCPercent(debug.SetGCPercent(1)) + + for i := 0; i < 10000; i++ { + testCFNumberRef() + testCFDateRef() + testCFBooleanRef() + // Allocate some memory, so eventually the write barrier is enabled + // and it will see writes of bad pointers in the test* functions below. + byteSliceSink = make([]byte, 1024) + } +} + +var byteSliceSink []byte + +func testCFNumberRef() { + var v int64 = 0 + xCFNumberRef = C.CFNumberCreate(C.kCFAllocatorSystemDefault, C.kCFNumberSInt64Type, unsafe.Pointer(&v)) + //fmt.Printf("CFNumberRef: %x\n", uintptr(unsafe.Pointer(xCFNumberRef))) +} + +var xCFNumberRef C.CFNumberRef + +func testCFDateRef() { + xCFDateRef = C.CFDateCreate(C.kCFAllocatorSystemDefault, 0) // 0 value is 1 Jan 2001 00:00:00 GMT + //fmt.Printf("CFDateRef: %x\n", uintptr(unsafe.Pointer(xCFDateRef))) +} + +var xCFDateRef C.CFDateRef + +func testCFBooleanRef() { + xCFBooleanRef = C.kCFBooleanFalse + //fmt.Printf("CFBooleanRef: %x\n", uintptr(unsafe.Pointer(xCFBooleanRef))) +} + +var xCFBooleanRef C.CFBooleanRef diff --git a/misc/cgo/test/issue21897b.go b/misc/cgo/test/issue21897b.go new file mode 100644 index 00000000000..08b5f4d808e --- /dev/null +++ b/misc/cgo/test/issue21897b.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin !cgo internal + +package cgotest + +import "testing" + +func test21897(t *testing.T) { + t.Skip("test runs only on darwin+cgo") +} diff --git a/misc/cgo/test/issue22958.go b/misc/cgo/test/issue22958.go new file mode 100644 index 00000000000..a5f058fdae1 --- /dev/null +++ b/misc/cgo/test/issue22958.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// Test handling of bitfields. + +/* +typedef struct { + unsigned long long f8 : 8; + unsigned long long f16 : 16; + unsigned long long f24 : 24; + unsigned long long f32 : 32; + unsigned long long f40 : 40; + unsigned long long f48 : 48; + unsigned long long f56 : 56; + unsigned long long f64 : 64; +} issue22958Type; +*/ +import "C" + +// Nothing to run, just make sure this compiles. +var Vissue22958 C.issue22958Type diff --git a/misc/cgo/test/issue6907.go b/misc/cgo/test/issue6907.go new file mode 100644 index 00000000000..00495ab8e2e --- /dev/null +++ b/misc/cgo/test/issue6907.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* +#include +#include + +char* Issue6907CopyString(_GoString_ s) { + size_t n; + const char *p; + char *r; + + n = _GoStringLen(s); + p = _GoStringPtr(s); + r = malloc(n + 1); + memmove(r, p, n); + r[n] = '\0'; + return r; +} +*/ +import "C" + +import "testing" + +func test6907(t *testing.T) { + want := "yarn" + if got := C.GoString(C.Issue6907CopyString(want)); got != want { + t.Errorf("C.GoString(C.Issue6907CopyString(%q)) == %q, want %q", want, got, want) + } +} diff --git a/misc/cgo/test/issue6907export.go b/misc/cgo/test/issue6907export.go new file mode 100644 index 00000000000..d41899e1a62 --- /dev/null +++ b/misc/cgo/test/issue6907export.go @@ -0,0 +1,30 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +/* +extern int CheckIssue6907C(_GoString_); +*/ +import "C" + +import ( + "testing" +) + +const CString = "C string" + +//export CheckIssue6907Go +func CheckIssue6907Go(s string) C.int { + if s == CString { + return 1 + } + return 0 +} + +func test6907Go(t *testing.T) { + if got := C.CheckIssue6907C(CString); got != 1 { + t.Errorf("C.CheckIssue6907C() == %d, want %d", got, 1) + } +} diff --git a/misc/cgo/test/issue6907export_c.c b/misc/cgo/test/issue6907export_c.c new file mode 100644 index 00000000000..9b1a4fc630b --- /dev/null +++ b/misc/cgo/test/issue6907export_c.c @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include + +#include "_cgo_export.h" + +int CheckIssue6907C(_GoString_ s) { + return CheckIssue6907Go(s); +} diff --git a/misc/cgo/test/issue7978.go b/misc/cgo/test/issue7978.go index 7fb62e807ba..b057e3eacb2 100644 --- a/misc/cgo/test/issue7978.go +++ b/misc/cgo/test/issue7978.go @@ -44,8 +44,8 @@ static void issue7978c(uint32_t *sync) { import "C" import ( - "os" "runtime" + "runtime/debug" "strings" "sync/atomic" "testing" @@ -114,12 +114,7 @@ func test7978(t *testing.T) { if C.HAS_SYNC_FETCH_AND_ADD == 0 { t.Skip("clang required for __sync_fetch_and_add support on darwin/arm") } - if runtime.GOOS == "android" || runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { - t.Skip("GOTRACEBACK is not passed on to the exec wrapper") - } - if os.Getenv("GOTRACEBACK") != "2" { - t.Fatalf("GOTRACEBACK must be 2") - } + debug.SetTraceback("2") issue7978sync = 0 go issue7978go() // test in c code, before callback diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go index 74897c7f6f1..7ba5faabeb3 100644 --- a/misc/cgo/testcarchive/carchive_test.go +++ b/misc/cgo/testcarchive/carchive_test.go @@ -6,6 +6,7 @@ package carchive_test import ( "bufio" + "bytes" "debug/elf" "fmt" "io/ioutil" @@ -134,8 +135,10 @@ func cmdToRun(name string) []string { } func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) { + t.Helper() cmd := exec.Command(buildcmd[0], buildcmd[1:]...) cmd.Env = gopathEnv + t.Log(buildcmd) if out, err := cmd.CombinedOutput(); err != nil { t.Logf("%s", out) t.Fatal(err) @@ -171,7 +174,7 @@ func TestInstall(t *testing.T) { testInstall(t, "./testp1"+exeSuffix, filepath.Join("pkg", libgodir, "libgo.a"), filepath.Join("pkg", libgodir, "libgo.h"), - "go", "install", "-buildmode=c-archive", "libgo") + "go", "install", "-i", "-buildmode=c-archive", "libgo") // Test building libgo other than installing it. // Header files are now present. @@ -488,7 +491,7 @@ func TestPIE(t *testing.T) { os.RemoveAll("pkg") }() - cmd := exec.Command("go", "install", "-buildmode=c-archive", "libgo") + cmd := exec.Command("go", "install", "-i", "-buildmode=c-archive", "libgo") cmd.Env = gopathEnv if out, err := cmd.CombinedOutput(); err != nil { t.Logf("%s", out) @@ -549,6 +552,8 @@ func TestSIGPROF(t *testing.T) { switch GOOS { case "windows", "plan9": t.Skipf("skipping SIGPROF test on %s", GOOS) + case "darwin": + t.Skipf("skipping SIGPROF test on %s; see https://golang.org/issue/19320", GOOS) } t.Parallel() @@ -605,9 +610,26 @@ func TestCompileWithoutShared(t *testing.T) { } exe := "./testnoshared" + exeSuffix - ccArgs := append(cc, "-o", exe, "main5.c", "libgo2.a") + + // In some cases, -no-pie is needed here, but not accepted everywhere. First try + // if -no-pie is accepted. See #22126. + ccArgs := append(cc, "-o", exe, "-no-pie", "main5.c", "libgo2.a") t.Log(ccArgs) out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + + // If -no-pie unrecognized, try -nopie if this is possibly clang + if err != nil && bytes.Contains(out, []byte("unknown")) && !strings.Contains(cc[0], "gcc") { + ccArgs = append(cc, "-o", exe, "-nopie", "main5.c", "libgo2.a") + t.Log(ccArgs) + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + } + + // Don't use either -no-pie or -nopie + if err != nil && bytes.Contains(out, []byte("unrecognized")) { + ccArgs := append(cc, "-o", exe, "main5.c", "libgo2.a") + t.Log(ccArgs) + out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput() + } t.Logf("%s", out) if err != nil { t.Fatal(err) diff --git a/misc/cgo/testcshared/cshared_test.go b/misc/cgo/testcshared/cshared_test.go new file mode 100644 index 00000000000..49be0923966 --- /dev/null +++ b/misc/cgo/testcshared/cshared_test.go @@ -0,0 +1,479 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cshared_test + +import ( + "debug/elf" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "testing" + "unicode" +) + +// C compiler with args (from $(go env CC) $(go env GOGCCFLAGS)). +var cc []string + +// An environment with GOPATH=$(pwd). +var gopathEnv []string + +// ".exe" on Windows. +var exeSuffix string + +var GOOS, GOARCH, GOROOT string +var installdir, androiddir string +var libSuffix, libgoname string + +func TestMain(m *testing.M) { + GOOS = goEnv("GOOS") + GOARCH = goEnv("GOARCH") + GOROOT = goEnv("GOROOT") + + if _, err := os.Stat(GOROOT); os.IsNotExist(err) { + log.Fatalf("Unable able to find GOROOT at '%s'", GOROOT) + } + + // Directory where cgo headers and outputs will be installed. + // The installation directory format varies depending on the platform. + installdir = path.Join("pkg", fmt.Sprintf("%s_%s_testcshared", GOOS, GOARCH)) + switch GOOS { + case "darwin": + libSuffix = "dylib" + case "windows": + libSuffix = "dll" + default: + libSuffix = "so" + installdir = path.Join("pkg", fmt.Sprintf("%s_%s_testcshared_shared", GOOS, GOARCH)) + } + + androiddir = fmt.Sprintf("/data/local/tmp/testcshared-%d", os.Getpid()) + if GOOS == "android" { + cmd := exec.Command("adb", "shell", "mkdir", "-p", androiddir) + out, err := cmd.CombinedOutput() + if err != nil { + log.Fatalf("setupAndroid failed: %v\n%s\n", err, out) + } + } + + libgoname = "libgo." + libSuffix + + cc = []string{goEnv("CC")} + + out := goEnv("GOGCCFLAGS") + quote := '\000' + start := 0 + lastSpace := true + backslash := false + s := string(out) + for i, c := range s { + if quote == '\000' && unicode.IsSpace(c) { + if !lastSpace { + cc = append(cc, s[start:i]) + lastSpace = true + } + } else { + if lastSpace { + start = i + lastSpace = false + } + if quote == '\000' && !backslash && (c == '"' || c == '\'') { + quote = c + backslash = false + } else if !backslash && quote == c { + quote = '\000' + } else if (quote == '\000' || quote == '"') && !backslash && c == '\\' { + backslash = true + } else { + backslash = false + } + } + } + if !lastSpace { + cc = append(cc, s[start:]) + } + + switch GOOS { + case "darwin": + // For Darwin/ARM. + // TODO(crawshaw): can we do better? + cc = append(cc, []string{"-framework", "CoreFoundation", "-framework", "Foundation"}...) + case "android": + cc = append(cc, "-pie", "-fuse-ld=gold") + } + libgodir := GOOS + "_" + GOARCH + switch GOOS { + case "darwin": + if GOARCH == "arm" || GOARCH == "arm64" { + libgodir += "_shared" + } + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + libgodir += "_shared" + } + cc = append(cc, "-I", filepath.Join("pkg", libgodir)) + + // Build an environment with GOPATH=$(pwd) + dir, err := os.Getwd() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + gopathEnv = append(os.Environ(), "GOPATH="+dir) + + if GOOS == "windows" { + exeSuffix = ".exe" + } + + st := m.Run() + + os.Remove(libgoname) + os.RemoveAll("pkg") + cleanupHeaders() + cleanupAndroid() + + os.Exit(st) +} + +func goEnv(key string) string { + out, err := exec.Command("go", "env", key).Output() + if err != nil { + fmt.Fprintf(os.Stderr, "go env %s failed:\n%s", key, err) + fmt.Fprintf(os.Stderr, "%s", err.(*exec.ExitError).Stderr) + os.Exit(2) + } + return strings.TrimSpace(string(out)) +} + +func cmdToRun(name string) string { + return "./" + name + exeSuffix +} + +func adbPush(t *testing.T, filename string) { + if GOOS != "android" { + return + } + args := []string{"adb", "push", filename, fmt.Sprintf("%s/%s", androiddir, filename)} + cmd := exec.Command(args[0], args[1:]...) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("adb command failed: %v\n%s\n", err, out) + } +} + +func adbRun(t *testing.T, env []string, adbargs ...string) string { + if GOOS != "android" { + t.Fatalf("trying to run adb command when operating system is not android.") + } + args := []string{"adb", "shell"} + // Propagate LD_LIBRARY_PATH to the adb shell invocation. + for _, e := range env { + if strings.Index(e, "LD_LIBRARY_PATH=") != -1 { + adbargs = append([]string{e}, adbargs...) + break + } + } + shellcmd := fmt.Sprintf("cd %s; %s", androiddir, strings.Join(adbargs, " ")) + args = append(args, shellcmd) + cmd := exec.Command(args[0], args[1:]...) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("adb command failed: %v\n%s\n", err, out) + } + return strings.Replace(string(out), "\r", "", -1) +} + +func run(t *testing.T, env []string, args ...string) string { + t.Helper() + cmd := exec.Command(args[0], args[1:]...) + cmd.Env = env + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("command failed: %v\n%v\n%s\n", args, err, out) + } else { + t.Logf("run: %v", args) + } + return string(out) +} + +func runExe(t *testing.T, env []string, args ...string) string { + t.Helper() + if GOOS == "android" { + return adbRun(t, env, args...) + } + return run(t, env, args...) +} + +func runCC(t *testing.T, args ...string) string { + t.Helper() + // This function is run in parallel, so append to a copy of cc + // rather than cc itself. + return run(t, nil, append(append([]string(nil), cc...), args...)...) +} + +func createHeaders() error { + args := []string{"go", "install", "-i", "-buildmode=c-shared", + "-installsuffix", "testcshared", "libgo"} + cmd := exec.Command(args[0], args[1:]...) + cmd.Env = gopathEnv + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("command failed: %v\n%v\n%s\n", args, err, out) + } + + args = []string{"go", "build", "-buildmode=c-shared", + "-installsuffix", "testcshared", + "-o", libgoname, + filepath.Join("src", "libgo", "libgo.go")} + cmd = exec.Command(args[0], args[1:]...) + cmd.Env = gopathEnv + out, err = cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("command failed: %v\n%v\n%s\n", args, err, out) + } + + if GOOS == "android" { + args = []string{"adb", "push", libgoname, fmt.Sprintf("%s/%s", androiddir, libgoname)} + cmd = exec.Command(args[0], args[1:]...) + out, err = cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("adb command failed: %v\n%s\n", err, out) + } + } + + return nil +} + +var ( + headersOnce sync.Once + headersErr error +) + +func createHeadersOnce(t *testing.T) { + headersOnce.Do(func() { + headersErr = createHeaders() + }) + if headersErr != nil { + t.Fatal(headersErr) + } +} + +func cleanupHeaders() { + os.Remove("libgo.h") +} + +func cleanupAndroid() { + if GOOS != "android" { + return + } + cmd := exec.Command("adb", "shell", "rm", "-rf", androiddir) + out, err := cmd.CombinedOutput() + if err != nil { + log.Fatalf("cleanupAndroid failed: %v\n%s\n", err, out) + } +} + +// test0: exported symbols in shared lib are accessible. +func TestExportedSymbols(t *testing.T) { + t.Parallel() + + cmd := "testp0" + bin := cmdToRun(cmd) + + createHeadersOnce(t) + + runCC(t, "-I", installdir, "-o", cmd, "main0.c", libgoname) + adbPush(t, cmd) + + defer os.Remove(bin) + + out := runExe(t, append(gopathEnv, "LD_LIBRARY_PATH=."), bin) + if strings.TrimSpace(out) != "PASS" { + t.Error(out) + } +} + +// test1: shared library can be dynamically loaded and exported symbols are accessible. +func TestExportedSymbolsWithDynamicLoad(t *testing.T) { + t.Parallel() + + if GOOS == "windows" { + t.Logf("Skipping on %s", GOOS) + return + } + + cmd := "testp1" + bin := cmdToRun(cmd) + + createHeadersOnce(t) + + runCC(t, "-o", cmd, "main1.c", "-ldl") + adbPush(t, cmd) + + defer os.Remove(bin) + + out := runExe(t, nil, bin, "./"+libgoname) + if strings.TrimSpace(out) != "PASS" { + t.Error(out) + } +} + +// test2: tests libgo2 which does not export any functions. +func TestUnexportedSymbols(t *testing.T) { + t.Parallel() + + if GOOS == "windows" { + t.Logf("Skipping on %s", GOOS) + return + } + + cmd := "testp2" + bin := cmdToRun(cmd) + libname := "libgo2." + libSuffix + + run(t, + gopathEnv, + "go", "build", + "-buildmode=c-shared", + "-installsuffix", "testcshared", + "-o", libname, "libgo2", + ) + adbPush(t, libname) + + linkFlags := "-Wl,--no-as-needed" + if GOOS == "darwin" { + linkFlags = "" + } + + runCC(t, "-o", cmd, "main2.c", linkFlags, libname) + adbPush(t, cmd) + + defer os.Remove(libname) + defer os.Remove(bin) + + out := runExe(t, append(gopathEnv, "LD_LIBRARY_PATH=."), bin) + + if strings.TrimSpace(out) != "PASS" { + t.Error(out) + } +} + +// test3: tests main.main is exported on android. +func TestMainExportedOnAndroid(t *testing.T) { + t.Parallel() + + switch GOOS { + case "android": + break + default: + t.Logf("Skipping on %s", GOOS) + return + } + + cmd := "testp3" + bin := cmdToRun(cmd) + + createHeadersOnce(t) + + runCC(t, "-o", cmd, "main3.c", "-ldl") + adbPush(t, cmd) + + defer os.Remove(bin) + + out := runExe(t, nil, bin, "./"+libgoname) + if strings.TrimSpace(out) != "PASS" { + t.Error(out) + } +} + +func testSignalHandlers(t *testing.T, pkgname, cfile, cmd string) { + libname := pkgname + "." + libSuffix + run(t, + gopathEnv, + "go", "build", + "-buildmode=c-shared", + "-installsuffix", "testcshared", + "-o", libname, pkgname, + ) + adbPush(t, libname) + runCC(t, "-pthread", "-o", cmd, cfile, "-ldl") + adbPush(t, cmd) + + bin := cmdToRun(cmd) + + defer os.Remove(libname) + defer os.Remove(bin) + defer os.Remove(pkgname + ".h") + + out := runExe(t, nil, bin, "./"+libname) + if strings.TrimSpace(out) != "PASS" { + t.Error(run(t, nil, bin, libname, "verbose")) + } +} + +// test4: test signal handlers +func TestSignalHandlers(t *testing.T) { + t.Parallel() + if GOOS == "windows" { + t.Logf("Skipping on %s", GOOS) + return + } + testSignalHandlers(t, "libgo4", "main4.c", "testp4") +} + +// test5: test signal handlers with os/signal.Notify +func TestSignalHandlersWithNotify(t *testing.T) { + t.Parallel() + if GOOS == "windows" { + t.Logf("Skipping on %s", GOOS) + return + } + testSignalHandlers(t, "libgo5", "main5.c", "testp5") +} + +func TestPIE(t *testing.T) { + t.Parallel() + + switch GOOS { + case "linux", "android": + break + default: + t.Logf("Skipping on %s", GOOS) + return + } + + createHeadersOnce(t) + + f, err := elf.Open(libgoname) + if err != nil { + t.Fatalf("elf.Open failed: %v", err) + } + defer f.Close() + + ds := f.SectionByType(elf.SHT_DYNAMIC) + if ds == nil { + t.Fatalf("no SHT_DYNAMIC section") + } + d, err := ds.Data() + if err != nil { + t.Fatalf("can't read SHT_DYNAMIC contents: %v", err) + } + for len(d) > 0 { + var tag elf.DynTag + switch f.Class { + case elf.ELFCLASS32: + tag = elf.DynTag(f.ByteOrder.Uint32(d[:4])) + d = d[8:] + case elf.ELFCLASS64: + tag = elf.DynTag(f.ByteOrder.Uint64(d[:8])) + d = d[16:] + } + if tag == elf.DT_TEXTREL { + t.Fatalf("%s has DT_TEXTREL flag", libgoname) + } + } +} diff --git a/misc/cgo/testcshared/src/p/p.go b/misc/cgo/testcshared/src/p/p.go index fb4b5ca8d1a..0f02cf3ce6c 100644 --- a/misc/cgo/testcshared/src/p/p.go +++ b/misc/cgo/testcshared/src/p/p.go @@ -8,5 +8,6 @@ import "C" //export FromPkg func FromPkg() int32 { return 1024 } + //export Divu func Divu(a, b uint32) uint32 { return a / b } diff --git a/misc/cgo/testcshared/test.bash b/misc/cgo/testcshared/test.bash deleted file mode 100755 index 315a0d40367..00000000000 --- a/misc/cgo/testcshared/test.bash +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2015 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# For testing Android, this script requires adb to push and run compiled -# binaries on a target device. - -set -e - -if [ ! -f src/libgo/libgo.go ]; then - cwd=$(pwd) - echo "misc/cgo/testcshared/test.bash is running in $cwd" 1>&2 - exit 1 -fi - -goos=$(go env GOOS) -goarch=$(go env GOARCH) -goroot=$(go env GOROOT) -if [ ! -d "$goroot" ]; then - echo 'misc/cgo/testcshared/test.bash cannot find GOROOT' 1>&2 - echo '$GOROOT:' "$GOROOT" 1>&2 - echo 'go env GOROOT:' "$goroot" 1>&2 - exit 1 -fi - -# Directory where cgo headers and outputs will be installed. -# The installation directory format varies depending on the platform. -installdir=pkg/${goos}_${goarch}_testcshared_shared -if [ "${goos}" = "darwin" ]; then - installdir=pkg/${goos}_${goarch}_testcshared -fi - -# Temporary directory on the android device. -androidpath=/data/local/tmp/testcshared-$$ - -function cleanup() { - rm -f libgo.$libext libgo2.$libext libgo4.$libext libgo5.$libext - rm -f libgo.h libgo4.h libgo5.h - rm -f testp testp2 testp3 testp4 testp5 - rm -rf pkg "${goroot}/${installdir}" - - if [ "$goos" = "android" ]; then - adb shell rm -rf "$androidpath" - fi -} -trap cleanup EXIT - -if [ "$goos" = "android" ]; then - adb shell mkdir -p "$androidpath" -fi - -function run() { - case "$goos" in - "android") - local args=$@ - output=$(adb shell "cd ${androidpath}; $@") - output=$(echo $output|tr -d '\r') - case $output in - *PASS) echo "PASS";; - *) echo "$output";; - esac - ;; - *) - echo $(env $@) - ;; - esac -} - -function binpush() { - bin=${1} - if [ "$goos" = "android" ]; then - adb push "$bin" "${androidpath}/${bin}" 2>/dev/null - fi -} - -rm -rf pkg - -suffix="-installsuffix testcshared" - -libext="so" -if [ "$goos" = "darwin" ]; then - libext="dylib" -fi - -# Create the header files. -GOPATH=$(pwd) go install -buildmode=c-shared $suffix libgo - -GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo.$libext src/libgo/libgo.go -binpush libgo.$libext - -if [ "$goos" = "linux" ] || [ "$goos" = "android" ] ; then - if readelf -d libgo.$libext | grep TEXTREL >/dev/null; then - echo "libgo.$libext has TEXTREL set" - exit 1 - fi -fi - -GOGCCFLAGS=$(go env GOGCCFLAGS) -if [ "$goos" = "android" ]; then - GOGCCFLAGS="${GOGCCFLAGS} -pie -fuse-ld=gold" -fi - -status=0 - -# test0: exported symbols in shared lib are accessible. -# TODO(iant): using _shared here shouldn't really be necessary. -$(go env CC) ${GOGCCFLAGS} -I ${installdir} -o testp main0.c ./libgo.$libext -binpush testp - -output=$(run LD_LIBRARY_PATH=. ./testp) -if [ "$output" != "PASS" ]; then - echo "FAIL test0 got ${output}" - status=1 -fi - -# test1: shared library can be dynamically loaded and exported symbols are accessible. -$(go env CC) ${GOGCCFLAGS} -o testp main1.c -ldl -binpush testp -output=$(run ./testp ./libgo.$libext) -if [ "$output" != "PASS" ]; then - echo "FAIL test1 got ${output}" - status=1 -fi - -# test2: tests libgo2 which does not export any functions. -GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo2.$libext libgo2 -binpush libgo2.$libext -linkflags="-Wl,--no-as-needed" -if [ "$goos" = "darwin" ]; then - linkflags="" -fi -$(go env CC) ${GOGCCFLAGS} -o testp2 main2.c $linkflags libgo2.$libext -binpush testp2 -output=$(run LD_LIBRARY_PATH=. ./testp2) -if [ "$output" != "PASS" ]; then - echo "FAIL test2 got ${output}" - status=1 -fi - -# test3: tests main.main is exported on android. -if [ "$goos" = "android" ]; then - $(go env CC) ${GOGCCFLAGS} -o testp3 main3.c -ldl - binpush testp3 - output=$(run ./testp ./libgo.so) - if [ "$output" != "PASS" ]; then - echo "FAIL test3 got ${output}" - status=1 - fi -fi - -# test4: tests signal handlers -GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo4.$libext libgo4 -binpush libgo4.$libext -$(go env CC) ${GOGCCFLAGS} -pthread -o testp4 main4.c -ldl -binpush testp4 -output=$(run ./testp4 ./libgo4.$libext 2>&1) -if test "$output" != "PASS"; then - echo "FAIL test4 got ${output}" - if test "$goos" != "android"; then - echo "re-running test4 in verbose mode" - ./testp4 ./libgo4.$libext verbose - fi - status=1 -fi - -# test5: tests signal handlers with os/signal.Notify -GOPATH=$(pwd) go build -buildmode=c-shared $suffix -o libgo5.$libext libgo5 -binpush libgo5.$libext -$(go env CC) ${GOGCCFLAGS} -pthread -o testp5 main5.c -ldl -binpush testp5 -output=$(run ./testp5 ./libgo5.$libext 2>&1) -if test "$output" != "PASS"; then - echo "FAIL test5 got ${output}" - if test "$goos" != "android"; then - echo "re-running test5 in verbose mode" - ./testp5 ./libgo5.$libext verbose - fi - status=1 -fi - -if test "$libext" = "dylib"; then - # make sure dylibs are well-formed - if ! otool -l libgo*.dylib >/dev/null; then - status=1 - fi -fi - -if test $status = 0; then - echo "ok" -fi - -exit $status diff --git a/misc/cgo/testplugin/src/host/host.go b/misc/cgo/testplugin/src/host/host.go index 898f44efa15..0ca17da3def 100644 --- a/misc/cgo/testplugin/src/host/host.go +++ b/misc/cgo/testplugin/src/host/host.go @@ -126,14 +126,24 @@ func main() { log.Fatalf(`plugin1.F()=%d, want 17`, gotf) } - // plugin2 has no exported symbols, only an init function. - if _, err := plugin.Open("plugin2.so"); err != nil { + p2, err := plugin.Open("plugin2.so") + if err != nil { log.Fatalf("plugin.Open failed: %v", err) } + // Check that plugin2's init function was called, and + // that it modifies the same global variable as the host. if got, want := common.X, 2; got != want { log.Fatalf("after loading plugin2, common.X=%d, want %d", got, want) } + _, err = plugin.Open("plugin2-dup.so") + if err == nil { + log.Fatal(`plugin.Open("plugin2-dup.so"): duplicate open should have failed`) + } + if s := err.Error(); !strings.Contains(s, "already loaded") { + log.Fatal(`plugin.Open("plugin2.so"): error does not mention "already loaded"`) + } + _, err = plugin.Open("plugin-mismatch.so") if err == nil { log.Fatal(`plugin.Open("plugin-mismatch.so"): should have failed`) @@ -142,6 +152,24 @@ func main() { log.Fatalf(`plugin.Open("plugin-mismatch.so"): error does not mention "different version": %v`, s) } + _, err = plugin.Open("plugin2-dup.so") + if err == nil { + log.Fatal(`plugin.Open("plugin2-dup.so"): duplicate open after bad plugin should have failed`) + } + _, err = plugin.Open("plugin2.so") + if err != nil { + log.Fatalf(`plugin.Open("plugin2.so"): second open with same name failed: %v`, err) + } + + // Test that unexported types with the same names in + // different plugins do not interfere with each other. + // + // See Issue #21386. + UnexportedNameReuse, _ := p.Lookup("UnexportedNameReuse") + UnexportedNameReuse.(func())() + UnexportedNameReuse, _ = p2.Lookup("UnexportedNameReuse") + UnexportedNameReuse.(func())() + testUnnamed() fmt.Println("PASS") diff --git a/misc/cgo/testplugin/src/issue18584/main.go b/misc/cgo/testplugin/src/issue18584/main.go new file mode 100644 index 00000000000..c280fd46203 --- /dev/null +++ b/misc/cgo/testplugin/src/issue18584/main.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "plugin" + +func main() { + p, err := plugin.Open("plugin.so") + if err != nil { + panic(err) + } + + sym, err := p.Lookup("G") + if err != nil { + panic(err) + } + g := sym.(func() bool) + if !g() { + panic("expected types to match, Issue #18584") + } +} diff --git a/misc/cgo/testplugin/src/issue18584/plugin.go b/misc/cgo/testplugin/src/issue18584/plugin.go new file mode 100644 index 00000000000..be0868d3752 --- /dev/null +++ b/misc/cgo/testplugin/src/issue18584/plugin.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "reflect" + +type C struct { +} + +func F(c *C) *C { + return nil +} + +func G() bool { + var c *C + return reflect.TypeOf(F).Out(0) == reflect.TypeOf(c) +} diff --git a/misc/cgo/testplugin/src/issue19418/main.go b/misc/cgo/testplugin/src/issue19418/main.go new file mode 100644 index 00000000000..2ec9f9aaaa2 --- /dev/null +++ b/misc/cgo/testplugin/src/issue19418/main.go @@ -0,0 +1,29 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "plugin" +) + +func main() { + p, err := plugin.Open("plugin.so") + if err != nil { + panic(err) + } + + val, err := p.Lookup("Val") + if err != nil { + panic(err) + } + got := *val.(*string) + const want = "linkstr" + if got != want { + fmt.Fprintf(os.Stderr, "issue19418 value is %q, want %q\n", got, want) + os.Exit(2) + } +} diff --git a/src/internal/cpu/cpu_ppc64.go b/misc/cgo/testplugin/src/issue19418/plugin.go similarity index 80% rename from src/internal/cpu/cpu_ppc64.go rename to misc/cgo/testplugin/src/issue19418/plugin.go index 5b151508479..fe93b161431 100644 --- a/src/internal/cpu/cpu_ppc64.go +++ b/misc/cgo/testplugin/src/issue19418/plugin.go @@ -2,6 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package cpu +package main -const CacheLineSize = 128 +var Val = "val-unset" diff --git a/misc/cgo/testplugin/src/issue19529/plugin.go b/misc/cgo/testplugin/src/issue19529/plugin.go new file mode 100644 index 00000000000..ad2df6cc7c7 --- /dev/null +++ b/misc/cgo/testplugin/src/issue19529/plugin.go @@ -0,0 +1,15 @@ +package main + +import ( + "reflect" +) + +type Foo struct { + Bar string `json:"Bar@baz,omitempty"` +} + +func F() { + println(reflect.TypeOf(Foo{}).Field(0).Tag) +} + +func main() {} diff --git a/misc/cgo/testplugin/src/issue22175/main.go b/misc/cgo/testplugin/src/issue22175/main.go new file mode 100644 index 00000000000..9be9bab9dc3 --- /dev/null +++ b/misc/cgo/testplugin/src/issue22175/main.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" + "plugin" +) + +func main() { + p2, err := plugin.Open("issue22175_plugin1.so") + if err != nil { + panic(err) + } + f, err := p2.Lookup("F") + if err != nil { + panic(err) + } + got := f.(func() int)() + const want = 971 + if got != want { + fmt.Fprintf(os.Stderr, "issue22175: F()=%d, want %d", got, want) + os.Exit(1) + } +} diff --git a/misc/cgo/testplugin/src/issue22175/plugin1.go b/misc/cgo/testplugin/src/issue22175/plugin1.go new file mode 100644 index 00000000000..5ae6cb631e7 --- /dev/null +++ b/misc/cgo/testplugin/src/issue22175/plugin1.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "plugin" + +func F() int { + p2, err := plugin.Open("issue22175_plugin2.so") + if err != nil { + panic(err) + } + g, err := p2.Lookup("G") + if err != nil { + panic(err) + } + return g.(func() int)() +} + +func main() {} diff --git a/misc/cgo/testplugin/src/issue22175/plugin2.go b/misc/cgo/testplugin/src/issue22175/plugin2.go new file mode 100644 index 00000000000..f387a192e67 --- /dev/null +++ b/misc/cgo/testplugin/src/issue22175/plugin2.go @@ -0,0 +1,9 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func G() int { return 971 } + +func main() {} diff --git a/misc/cgo/testplugin/src/issue22295.pkg/main.go b/misc/cgo/testplugin/src/issue22295.pkg/main.go new file mode 100644 index 00000000000..6cb186e1003 --- /dev/null +++ b/misc/cgo/testplugin/src/issue22295.pkg/main.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "log" + "plugin" +) + +func main() { + p, err := plugin.Open("issue.22295.so") + if err != nil { + log.Fatal(err) + } + f, err := p.Lookup("F") + if err != nil { + log.Fatal(err) + } + const want = 2503 + got := f.(func() int)() + if got != want { + log.Fatalf("got %d, want %d", got, want) + } +} diff --git a/misc/cgo/testplugin/src/issue22295.pkg/plugin.go b/misc/cgo/testplugin/src/issue22295.pkg/plugin.go new file mode 100644 index 00000000000..46b08a405bc --- /dev/null +++ b/misc/cgo/testplugin/src/issue22295.pkg/plugin.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var f *int + +func init() { + f = new(int) + *f = 2503 +} + +func F() int { return *f } + +func main() {} diff --git a/misc/cgo/testplugin/src/plugin1/plugin1.go b/misc/cgo/testplugin/src/plugin1/plugin1.go index edcef2c77e9..0a9fa2f2c1f 100644 --- a/misc/cgo/testplugin/src/plugin1/plugin1.go +++ b/misc/cgo/testplugin/src/plugin1/plugin1.go @@ -7,7 +7,10 @@ package main // // No C code required. import "C" -import "common" +import ( + "common" + "reflect" +) func F() int { _ = make([]byte, 1<<21) // trigger stack unwind, Issue #18190. @@ -33,6 +36,21 @@ func init() { call(g) } +type sameNameReusedInPlugins struct { + X string +} + +type sameNameHolder struct { + F *sameNameReusedInPlugins +} + +func UnexportedNameReuse() { + h := sameNameHolder{} + v := reflect.ValueOf(&h).Elem().Field(0) + newval := reflect.New(v.Type().Elem()) + v.Set(newval) +} + func main() { panic("plugin1.main called") } diff --git a/misc/cgo/testplugin/src/plugin2/plugin2.go b/misc/cgo/testplugin/src/plugin2/plugin2.go index 9c507fc3658..a67f2de27a7 100644 --- a/misc/cgo/testplugin/src/plugin2/plugin2.go +++ b/misc/cgo/testplugin/src/plugin2/plugin2.go @@ -13,6 +13,7 @@ import "C" import ( "common" + "reflect" "strings" ) @@ -22,6 +23,21 @@ func init() { common.X = 2 } +type sameNameReusedInPlugins struct { + X string +} + +type sameNameHolder struct { + F *sameNameReusedInPlugins +} + +func UnexportedNameReuse() { + h := sameNameHolder{} + v := reflect.ValueOf(&h).Elem().Field(0) + newval := reflect.New(v.Type().Elem()) + v.Set(newval) +} + func main() { panic("plugin1.main called") } diff --git a/misc/cgo/testplugin/test.bash b/misc/cgo/testplugin/test.bash index 69df5bd2bfa..5ef87625f1a 100755 --- a/misc/cgo/testplugin/test.bash +++ b/misc/cgo/testplugin/test.bash @@ -14,39 +14,77 @@ fi goos=$(go env GOOS) goarch=$(go env GOARCH) +echo SKIP: golang.org/issue/22571. +exit 0 + function cleanup() { - rm -f plugin*.so unnamed*.so iface*.so - rm -rf host pkg sub iface issue18676 issue19534 + rm -f plugin*.so unnamed*.so iface*.so issue* + rm -rf host pkg sub iface } trap cleanup EXIT rm -rf pkg sub mkdir sub -GOPATH=$(pwd) go build -buildmode=plugin plugin1 -GOPATH=$(pwd) go build -buildmode=plugin plugin2 -GOPATH=$(pwd)/altpath go build -buildmode=plugin plugin-mismatch -GOPATH=$(pwd) go build -buildmode=plugin -o=sub/plugin1.so sub/plugin1 -GOPATH=$(pwd) go build -buildmode=plugin unnamed1.go -GOPATH=$(pwd) go build -buildmode=plugin unnamed2.go -GOPATH=$(pwd) go build host +GOPATH=$(pwd) go build -i -gcflags "$GO_GCFLAGS" -buildmode=plugin plugin1 +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin plugin2 +cp plugin2.so plugin2-dup.so +GOPATH=$(pwd)/altpath go build -gcflags "$GO_GCFLAGS" -buildmode=plugin plugin-mismatch +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o=sub/plugin1.so sub/plugin1 +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o=unnamed1.so unnamed1/main.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o=unnamed2.so unnamed2/main.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" host LD_LIBRARY_PATH=$(pwd) ./host # Test that types and itabs get properly uniqified. -GOPATH=$(pwd) go build -buildmode=plugin iface_a -GOPATH=$(pwd) go build -buildmode=plugin iface_b -GOPATH=$(pwd) go build iface +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin iface_a +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin iface_b +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" iface LD_LIBRARY_PATH=$(pwd) ./iface +function _timeout() ( + set -e + $2 & + p=$! + (sleep $1; kill $p 2>/dev/null) & + p2=$! + wait $p 2>/dev/null + kill -0 $p2 2>/dev/null +) + # Test for issue 18676 - make sure we don't add the same itab twice. # The buggy code hangs forever, so use a timeout to check for that. -GOPATH=$(pwd) go build -buildmode=plugin -o plugin.so src/issue18676/plugin.go -GOPATH=$(pwd) go build -o issue18676 src/issue18676/main.go -timeout 10s ./issue18676 +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o plugin.so src/issue18676/plugin.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue18676 src/issue18676/main.go +_timeout 10s ./issue18676 # Test for issue 19534 - that we can load a plugin built in a path with non-alpha # characters -GOPATH=$(pwd) go build -buildmode=plugin -ldflags='-pluginpath=issue.19534' -o plugin.so src/issue19534/plugin.go -GOPATH=$(pwd) go build -o issue19534 src/issue19534/main.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -ldflags='-pluginpath=issue.19534' -o plugin.so src/issue19534/plugin.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue19534 src/issue19534/main.go ./issue19534 + +# Test for issue 18584 +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o plugin.so src/issue18584/plugin.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue18584 src/issue18584/main.go +./issue18584 + +# Test for issue 19418 +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin "-ldflags=-X main.Val=linkstr" -o plugin.so src/issue19418/plugin.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue19418 src/issue19418/main.go +./issue19418 + +# Test for issue 19529 +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o plugin.so src/issue19529/plugin.go + +# Test for issue 22175 +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue22175_plugin1.so src/issue22175/plugin1.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue22175_plugin2.so src/issue22175/plugin2.go +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22175 src/issue22175/main.go +./issue22175 + +# Test for issue 22295 +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -buildmode=plugin -o issue.22295.so issue22295.pkg +GOPATH=$(pwd) go build -gcflags "$GO_GCFLAGS" -o issue22295 src/issue22295.pkg/main.go +./issue22295 diff --git a/misc/cgo/testplugin/unnamed1.go b/misc/cgo/testplugin/unnamed1/main.go similarity index 100% rename from misc/cgo/testplugin/unnamed1.go rename to misc/cgo/testplugin/unnamed1/main.go diff --git a/misc/cgo/testplugin/unnamed2.go b/misc/cgo/testplugin/unnamed2/main.go similarity index 100% rename from misc/cgo/testplugin/unnamed2.go rename to misc/cgo/testplugin/unnamed2/main.go diff --git a/misc/cgo/testsanitizers/cc_test.go b/misc/cgo/testsanitizers/cc_test.go new file mode 100644 index 00000000000..cacb0d93df7 --- /dev/null +++ b/misc/cgo/testsanitizers/cc_test.go @@ -0,0 +1,441 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// sanitizers_test checks the use of Go with sanitizers like msan, asan, etc. +// See https://github.com/google/sanitizers. +package sanitizers_test + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "testing" + "unicode" +) + +var overcommit struct { + sync.Once + value int + err error +} + +// requireOvercommit skips t if the kernel does not allow overcommit. +func requireOvercommit(t *testing.T) { + t.Helper() + + overcommit.Once.Do(func() { + var out []byte + out, overcommit.err = ioutil.ReadFile("/proc/sys/vm/overcommit_memory") + if overcommit.err != nil { + return + } + overcommit.value, overcommit.err = strconv.Atoi(string(bytes.TrimSpace(out))) + }) + + if overcommit.err != nil { + t.Skipf("couldn't determine vm.overcommit_memory (%v); assuming no overcommit", overcommit.err) + } + if overcommit.value == 2 { + t.Skip("vm.overcommit_memory=2") + } +} + +var env struct { + sync.Once + m map[string]string + err error +} + +// goEnv returns the output of $(go env) as a map. +func goEnv(key string) (string, error) { + env.Once.Do(func() { + var out []byte + out, env.err = exec.Command("go", "env", "-json").Output() + if env.err != nil { + return + } + + env.m = make(map[string]string) + env.err = json.Unmarshal(out, &env.m) + }) + if env.err != nil { + return "", env.err + } + + v, ok := env.m[key] + if !ok { + return "", fmt.Errorf("`go env`: no entry for %v", key) + } + return v, nil +} + +// replaceEnv sets the key environment variable to value in cmd. +func replaceEnv(cmd *exec.Cmd, key, value string) { + if cmd.Env == nil { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, key+"="+value) +} + +// mustRun executes t and fails cmd with a well-formatted message if it fails. +func mustRun(t *testing.T, cmd *exec.Cmd) { + t.Helper() + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%#q exited with %v\n%s", strings.Join(cmd.Args, " "), err, out) + } +} + +// cc returns a cmd that executes `$(go env CC) $(go env GOGCCFLAGS) $args`. +func cc(args ...string) (*exec.Cmd, error) { + CC, err := goEnv("CC") + if err != nil { + return nil, err + } + + GOGCCFLAGS, err := goEnv("GOGCCFLAGS") + if err != nil { + return nil, err + } + + // Split GOGCCFLAGS, respecting quoting. + // + // TODO(bcmills): This code also appears in + // misc/cgo/testcarchive/carchive_test.go, and perhaps ought to go in + // src/cmd/dist/test.go as well. Figure out where to put it so that it can be + // shared. + var flags []string + quote := '\000' + start := 0 + lastSpace := true + backslash := false + for i, c := range GOGCCFLAGS { + if quote == '\000' && unicode.IsSpace(c) { + if !lastSpace { + flags = append(flags, GOGCCFLAGS[start:i]) + lastSpace = true + } + } else { + if lastSpace { + start = i + lastSpace = false + } + if quote == '\000' && !backslash && (c == '"' || c == '\'') { + quote = c + backslash = false + } else if !backslash && quote == c { + quote = '\000' + } else if (quote == '\000' || quote == '"') && !backslash && c == '\\' { + backslash = true + } else { + backslash = false + } + } + } + if !lastSpace { + flags = append(flags, GOGCCFLAGS[start:]) + } + + cmd := exec.Command(CC, flags...) + cmd.Args = append(cmd.Args, args...) + return cmd, nil +} + +type version struct { + name string + major, minor int +} + +var compiler struct { + sync.Once + version + err error +} + +// compilerVersion detects the version of $(go env CC). +// +// It returns a non-nil error if the compiler matches a known version schema but +// the version could not be parsed, or if $(go env CC) could not be determined. +func compilerVersion() (version, error) { + compiler.Once.Do(func() { + compiler.err = func() error { + compiler.name = "unknown" + + cmd, err := cc("--version") + if err != nil { + return err + } + out, err := cmd.Output() + if err != nil { + // Compiler does not support "--version" flag: not Clang or GCC. + return nil + } + + var match [][]byte + if bytes.HasPrefix(out, []byte("gcc")) { + compiler.name = "gcc" + + cmd, err := cc("-dumpversion") + if err != nil { + return err + } + out, err := cmd.Output() + if err != nil { + // gcc, but does not support gcc's "-dumpversion" flag?! + return err + } + gccRE := regexp.MustCompile(`(\d+)\.(\d+)`) + match = gccRE.FindSubmatch(out) + } else { + clangRE := regexp.MustCompile(`clang version (\d+)\.(\d+)`) + if match = clangRE.FindSubmatch(out); len(match) > 0 { + compiler.name = "clang" + } + } + + if len(match) < 3 { + return nil // "unknown" + } + if compiler.major, err = strconv.Atoi(string(match[1])); err != nil { + return err + } + if compiler.minor, err = strconv.Atoi(string(match[2])); err != nil { + return err + } + return nil + }() + }) + return compiler.version, compiler.err +} + +type compilerCheck struct { + once sync.Once + err error + skip bool // If true, skip with err instead of failing with it. +} + +type config struct { + sanitizer string + + cFlags, ldFlags, goFlags []string + + sanitizerCheck, runtimeCheck compilerCheck +} + +var configs struct { + sync.Mutex + m map[string]*config +} + +// configure returns the configuration for the given sanitizer. +func configure(sanitizer string) *config { + configs.Lock() + defer configs.Unlock() + if c, ok := configs.m[sanitizer]; ok { + return c + } + + c := &config{ + sanitizer: sanitizer, + cFlags: []string{"-fsanitize=" + sanitizer}, + ldFlags: []string{"-fsanitize=" + sanitizer}, + } + + if testing.Verbose() { + c.goFlags = append(c.goFlags, "-x") + } + + switch sanitizer { + case "memory": + c.goFlags = append(c.goFlags, "-msan") + + case "thread": + c.goFlags = append(c.goFlags, "--installsuffix=tsan") + compiler, _ := compilerVersion() + if compiler.name == "gcc" { + c.cFlags = append(c.cFlags, "-fPIC") + c.ldFlags = append(c.ldFlags, "-fPIC", "-static-libtsan") + } + + default: + panic(fmt.Sprintf("unrecognized sanitizer: %q", sanitizer)) + } + + if configs.m == nil { + configs.m = make(map[string]*config) + } + configs.m[sanitizer] = c + return c +} + +// goCmd returns a Cmd that executes "go $subcommand $args" with appropriate +// additional flags and environment. +func (c *config) goCmd(subcommand string, args ...string) *exec.Cmd { + cmd := exec.Command("go", subcommand) + cmd.Args = append(cmd.Args, c.goFlags...) + cmd.Args = append(cmd.Args, args...) + replaceEnv(cmd, "CGO_CFLAGS", strings.Join(c.cFlags, " ")) + replaceEnv(cmd, "CGO_LDFLAGS", strings.Join(c.ldFlags, " ")) + return cmd +} + +// skipIfCSanitizerBroken skips t if the C compiler does not produce working +// binaries as configured. +func (c *config) skipIfCSanitizerBroken(t *testing.T) { + check := &c.sanitizerCheck + check.once.Do(func() { + check.skip, check.err = c.checkCSanitizer() + }) + if check.err != nil { + t.Helper() + if check.skip { + t.Skip(check.err) + } + t.Fatal(check.err) + } +} + +var cMain = []byte(` +int main() { + return 0; +} +`) + +func (c *config) checkCSanitizer() (skip bool, err error) { + dir, err := ioutil.TempDir("", c.sanitizer) + if err != nil { + return false, fmt.Errorf("failed to create temp directory: %v", err) + } + defer os.RemoveAll(dir) + + src := filepath.Join(dir, "return0.c") + if err := ioutil.WriteFile(src, cMain, 0600); err != nil { + return false, fmt.Errorf("failed to write C source file: %v", err) + } + + dst := filepath.Join(dir, "return0") + cmd, err := cc(c.cFlags...) + if err != nil { + return false, err + } + cmd.Args = append(cmd.Args, c.ldFlags...) + cmd.Args = append(cmd.Args, "-o", dst, src) + out, err := cmd.CombinedOutput() + if err != nil { + if bytes.Contains(out, []byte("-fsanitize")) && + (bytes.Contains(out, []byte("unrecognized")) || + bytes.Contains(out, []byte("unsupported"))) { + return true, errors.New(string(out)) + } + return true, fmt.Errorf("%#q failed: %v\n%s", strings.Join(cmd.Args, " "), err, out) + } + + if out, err := exec.Command(dst).CombinedOutput(); err != nil { + if os.IsNotExist(err) { + return true, fmt.Errorf("%#q failed to produce executable: %v", strings.Join(cmd.Args, " "), err) + } + snippet := bytes.SplitN(out, []byte{'\n'}, 2)[0] + return true, fmt.Errorf("%#q generated broken executable: %v\n%s", strings.Join(cmd.Args, " "), err, snippet) + } + + return false, nil +} + +// skipIfRuntimeIncompatible skips t if the Go runtime is suspected not to work +// with cgo as configured. +func (c *config) skipIfRuntimeIncompatible(t *testing.T) { + check := &c.runtimeCheck + check.once.Do(func() { + check.skip, check.err = c.checkRuntime() + }) + if check.err != nil { + t.Helper() + if check.skip { + t.Skip(check.err) + } + t.Fatal(check.err) + } +} + +func (c *config) checkRuntime() (skip bool, err error) { + if c.sanitizer != "thread" { + return false, nil + } + + // libcgo.h sets CGO_TSAN if it detects TSAN support in the C compiler. + // Dump the preprocessor defines to check that that works. + // (Sometimes it doesn't: see https://golang.org/issue/15983.) + cmd, err := cc(c.cFlags...) + if err != nil { + return false, err + } + cmd.Args = append(cmd.Args, "-dM", "-E", "../../../src/runtime/cgo/libcgo.h") + out, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("%#q exited with %v\n%s", strings.Join(cmd.Args, " "), err, out) + } + if !bytes.Contains(out, []byte("#define CGO_TSAN")) { + return true, fmt.Errorf("%#q did not define CGO_TSAN") + } + return false, nil +} + +// srcPath returns the path to the given file relative to this test's source tree. +func srcPath(path string) string { + return filepath.Join("src", path) +} + +// A tempDir manages a temporary directory within a test. +type tempDir struct { + base string +} + +func (d *tempDir) RemoveAll(t *testing.T) { + t.Helper() + if d.base == "" { + return + } + if err := os.RemoveAll(d.base); err != nil { + t.Fatal("Failed to remove temp dir: %v", err) + } +} + +func (d *tempDir) Join(name string) string { + return filepath.Join(d.base, name) +} + +func newTempDir(t *testing.T) *tempDir { + t.Helper() + dir, err := ioutil.TempDir("", filepath.Dir(t.Name())) + if err != nil { + t.Fatalf("Failed to create temp dir: %v", err) + } + return &tempDir{base: dir} +} + +// hangProneCmd returns an exec.Cmd for a command that is likely to hang. +// +// If one of these tests hangs, the caller is likely to kill the test process +// using SIGINT, which will be sent to all of the processes in the test's group. +// Unfortunately, TSAN in particular is prone to dropping signals, so the SIGINT +// may terminate the test binary but leave the subprocess running. hangProneCmd +// configures subprocess to receive SIGKILL instead to ensure that it won't +// leak. +func hangProneCmd(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + cmd.SysProcAttr = &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGKILL, + } + return cmd +} diff --git a/misc/cgo/testsanitizers/cshared_test.go b/misc/cgo/testsanitizers/cshared_test.go new file mode 100644 index 00000000000..56063ea6201 --- /dev/null +++ b/misc/cgo/testsanitizers/cshared_test.go @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sanitizers_test + +import ( + "fmt" + "io/ioutil" + "strings" + "testing" +) + +func TestShared(t *testing.T) { + t.Parallel() + requireOvercommit(t) + + GOOS, err := goEnv("GOOS") + if err != nil { + t.Fatal(err) + } + libExt := "so" + if GOOS == "darwin" { + libExt = "dylib" + } + + cases := []struct { + src string + sanitizer string + }{ + { + src: "msan_shared.go", + sanitizer: "memory", + }, + { + src: "tsan_shared.go", + sanitizer: "thread", + }, + } + + for _, tc := range cases { + tc := tc + name := strings.TrimSuffix(tc.src, ".go") + t.Run(name, func(t *testing.T) { + t.Parallel() + config := configure(tc.sanitizer) + config.skipIfCSanitizerBroken(t) + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + lib := dir.Join(fmt.Sprintf("lib%s.%s", name, libExt)) + mustRun(t, config.goCmd("build", "-buildmode=c-shared", "-o", lib, srcPath(tc.src))) + + cSrc := dir.Join("main.c") + if err := ioutil.WriteFile(cSrc, cMain, 0600); err != nil { + t.Fatalf("failed to write C source file: %v", err) + } + + dstBin := dir.Join(name) + cmd, err := cc(config.cFlags...) + if err != nil { + t.Fatal(err) + } + cmd.Args = append(cmd.Args, config.ldFlags...) + cmd.Args = append(cmd.Args, "-o", dstBin, cSrc, lib) + mustRun(t, cmd) + + cmd = hangProneCmd(dstBin) + replaceEnv(cmd, "LD_LIBRARY_PATH", ".") + mustRun(t, cmd) + }) + } +} diff --git a/misc/cgo/testsanitizers/msan_test.go b/misc/cgo/testsanitizers/msan_test.go new file mode 100644 index 00000000000..af5afa9ee48 --- /dev/null +++ b/misc/cgo/testsanitizers/msan_test.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sanitizers_test + +import ( + "strings" + "testing" +) + +func TestMSAN(t *testing.T) { + t.Parallel() + requireOvercommit(t) + config := configure("memory") + config.skipIfCSanitizerBroken(t) + + mustRun(t, config.goCmd("build", "std")) + + cases := []struct { + src string + wantErr bool + }{ + {src: "msan.go"}, + {src: "msan2.go"}, + {src: "msan2_cmsan.go"}, + {src: "msan3.go"}, + {src: "msan4.go"}, + {src: "msan5.go"}, + {src: "msan_fail.go", wantErr: true}, + } + for _, tc := range cases { + tc := tc + name := strings.TrimSuffix(tc.src, ".go") + t.Run(name, func(t *testing.T) { + t.Parallel() + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + outPath := dir.Join(name) + mustRun(t, config.goCmd("build", "-o", outPath, srcPath(tc.src))) + + cmd := hangProneCmd(outPath) + if tc.wantErr { + out, err := cmd.CombinedOutput() + if err != nil { + return + } + t.Fatalf("%#q exited without error; want MSAN failure\n%s", strings.Join(cmd.Args, " "), out) + } + mustRun(t, cmd) + }) + } +} diff --git a/misc/cgo/testsanitizers/msan.go b/misc/cgo/testsanitizers/src/msan.go similarity index 100% rename from misc/cgo/testsanitizers/msan.go rename to misc/cgo/testsanitizers/src/msan.go diff --git a/misc/cgo/testsanitizers/msan2.go b/misc/cgo/testsanitizers/src/msan2.go similarity index 100% rename from misc/cgo/testsanitizers/msan2.go rename to misc/cgo/testsanitizers/src/msan2.go diff --git a/misc/cgo/testsanitizers/src/msan2_cmsan.go b/misc/cgo/testsanitizers/src/msan2_cmsan.go new file mode 100644 index 00000000000..8fdaea90c97 --- /dev/null +++ b/misc/cgo/testsanitizers/src/msan2_cmsan.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +/* +#cgo LDFLAGS: -fsanitize=memory +#cgo CPPFLAGS: -fsanitize=memory + +#include +#include +#include + +void f(int32_t *p, int n) { + int32_t * volatile q = (int32_t *)malloc(sizeof(int32_t) * n); + memcpy(p, q, n * sizeof(*p)); + free(q); +} + +void g(int32_t *p, int n) { + if (p[4] != 1) { + abort(); + } +} +*/ +import "C" + +import ( + "unsafe" +) + +func main() { + a := make([]int32, 10) + C.f((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) + a[4] = 1 + C.g((*C.int32_t)(unsafe.Pointer(&a[0])), C.int(len(a))) +} diff --git a/misc/cgo/testsanitizers/msan3.go b/misc/cgo/testsanitizers/src/msan3.go similarity index 100% rename from misc/cgo/testsanitizers/msan3.go rename to misc/cgo/testsanitizers/src/msan3.go diff --git a/misc/cgo/testsanitizers/msan4.go b/misc/cgo/testsanitizers/src/msan4.go similarity index 100% rename from misc/cgo/testsanitizers/msan4.go rename to misc/cgo/testsanitizers/src/msan4.go diff --git a/misc/cgo/testsanitizers/msan5.go b/misc/cgo/testsanitizers/src/msan5.go similarity index 100% rename from misc/cgo/testsanitizers/msan5.go rename to misc/cgo/testsanitizers/src/msan5.go diff --git a/misc/cgo/testsanitizers/msan_fail.go b/misc/cgo/testsanitizers/src/msan_fail.go similarity index 100% rename from misc/cgo/testsanitizers/msan_fail.go rename to misc/cgo/testsanitizers/src/msan_fail.go diff --git a/misc/cgo/testsanitizers/msan_shared.go b/misc/cgo/testsanitizers/src/msan_shared.go similarity index 100% rename from misc/cgo/testsanitizers/msan_shared.go rename to misc/cgo/testsanitizers/src/msan_shared.go diff --git a/misc/cgo/testsanitizers/tsan.go b/misc/cgo/testsanitizers/src/tsan.go similarity index 100% rename from misc/cgo/testsanitizers/tsan.go rename to misc/cgo/testsanitizers/src/tsan.go diff --git a/misc/cgo/testsanitizers/tsan10.go b/misc/cgo/testsanitizers/src/tsan10.go similarity index 100% rename from misc/cgo/testsanitizers/tsan10.go rename to misc/cgo/testsanitizers/src/tsan10.go diff --git a/misc/cgo/testsanitizers/tsan11.go b/misc/cgo/testsanitizers/src/tsan11.go similarity index 100% rename from misc/cgo/testsanitizers/tsan11.go rename to misc/cgo/testsanitizers/src/tsan11.go diff --git a/misc/cgo/testsanitizers/tsan12.go b/misc/cgo/testsanitizers/src/tsan12.go similarity index 100% rename from misc/cgo/testsanitizers/tsan12.go rename to misc/cgo/testsanitizers/src/tsan12.go diff --git a/misc/cgo/testsanitizers/tsan2.go b/misc/cgo/testsanitizers/src/tsan2.go similarity index 100% rename from misc/cgo/testsanitizers/tsan2.go rename to misc/cgo/testsanitizers/src/tsan2.go diff --git a/misc/cgo/testsanitizers/tsan3.go b/misc/cgo/testsanitizers/src/tsan3.go similarity index 100% rename from misc/cgo/testsanitizers/tsan3.go rename to misc/cgo/testsanitizers/src/tsan3.go diff --git a/misc/cgo/testsanitizers/tsan4.go b/misc/cgo/testsanitizers/src/tsan4.go similarity index 100% rename from misc/cgo/testsanitizers/tsan4.go rename to misc/cgo/testsanitizers/src/tsan4.go diff --git a/misc/cgo/testsanitizers/tsan5.go b/misc/cgo/testsanitizers/src/tsan5.go similarity index 100% rename from misc/cgo/testsanitizers/tsan5.go rename to misc/cgo/testsanitizers/src/tsan5.go diff --git a/misc/cgo/testsanitizers/tsan6.go b/misc/cgo/testsanitizers/src/tsan6.go similarity index 100% rename from misc/cgo/testsanitizers/tsan6.go rename to misc/cgo/testsanitizers/src/tsan6.go diff --git a/misc/cgo/testsanitizers/tsan7.go b/misc/cgo/testsanitizers/src/tsan7.go similarity index 100% rename from misc/cgo/testsanitizers/tsan7.go rename to misc/cgo/testsanitizers/src/tsan7.go diff --git a/misc/cgo/testsanitizers/tsan8.go b/misc/cgo/testsanitizers/src/tsan8.go similarity index 100% rename from misc/cgo/testsanitizers/tsan8.go rename to misc/cgo/testsanitizers/src/tsan8.go diff --git a/misc/cgo/testsanitizers/tsan9.go b/misc/cgo/testsanitizers/src/tsan9.go similarity index 100% rename from misc/cgo/testsanitizers/tsan9.go rename to misc/cgo/testsanitizers/src/tsan9.go diff --git a/misc/cgo/testsanitizers/tsan_shared.go b/misc/cgo/testsanitizers/src/tsan_shared.go similarity index 100% rename from misc/cgo/testsanitizers/tsan_shared.go rename to misc/cgo/testsanitizers/src/tsan_shared.go diff --git a/misc/cgo/testsanitizers/test.bash b/misc/cgo/testsanitizers/test.bash deleted file mode 100755 index 9f80af6c507..00000000000 --- a/misc/cgo/testsanitizers/test.bash +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2015 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This directory is intended to test the use of Go with sanitizers -# like msan, asan, etc. See https://github.com/google/sanitizers . - -set -e - -# The sanitizers were originally developed with clang, so prefer it. -CC=cc -if test -x "$(type -p clang)"; then - CC=clang -fi -export CC - -if [ "$(sysctl -n vm.overcommit_memory)" = 2 ]; then - echo "skipping msan/tsan tests: vm.overcommit_memory=2" >&2 - exit 0 -fi - -msan=yes - -TMPDIR=${TMPDIR:-/tmp} -echo 'int main() { return 0; }' > ${TMPDIR}/testsanitizers$$.c -if $CC -fsanitize=memory -o ${TMPDIR}/testsanitizers$$ ${TMPDIR}/testsanitizers$$.c 2>&1 | grep "unrecognized" >& /dev/null; then - echo "skipping msan tests: $CC -fsanitize=memory not supported" - msan=no -elif ! test -x ${TMPDIR}/testsanitizers$$; then - echo "skipping msan tests: $CC -fsanitize-memory did not generate an executable" - msan=no -elif ! ${TMPDIR}/testsanitizers$$ >/dev/null 2>&1; then - echo "skipping msan tests: $CC -fsanitize-memory generates broken executable" - msan=no -fi -rm -f ${TMPDIR}/testsanitizers$$.* - -tsan=yes - -# The memory and thread sanitizers in versions of clang before 3.6 -# don't work with Go. -if test "$msan" = "yes" && $CC --version | grep clang >& /dev/null; then - ver=$($CC --version | sed -e 's/.* version \([0-9.-]*\).*/\1/') - major=$(echo $ver | sed -e 's/\([0-9]*\).*/\1/') - minor=$(echo $ver | sed -e 's/[0-9]*\.\([0-9]*\).*/\1/') - if test "$major" -lt 3 || test "$major" -eq 3 -a "$minor" -lt 6; then - echo "skipping msan/tsan tests: clang version $major.$minor (older than 3.6)" - msan=no - tsan=no - fi - - # Clang before 3.8 does not work with Linux at or after 4.1. - # golang.org/issue/12898. - if test "$msan" = "yes" -a "$major" -lt 3 || test "$major" -eq 3 -a "$minor" -lt 8; then - if test "$(uname)" = Linux; then - linuxver=$(uname -r) - linuxmajor=$(echo $linuxver | sed -e 's/\([0-9]*\).*/\1/') - linuxminor=$(echo $linuxver | sed -e 's/[0-9]*\.\([0-9]*\).*/\1/') - if test "$linuxmajor" -gt 4 || test "$linuxmajor" -eq 4 -a "$linuxminor" -ge 1; then - echo "skipping msan/tsan tests: clang version $major.$minor (older than 3.8) incompatible with linux version $linuxmajor.$linuxminor (4.1 or newer)" - msan=no - tsan=no - fi - fi - fi -fi - -status=0 - -testmsanshared() { - goos=$(go env GOOS) - suffix="-installsuffix testsanitizers" - libext="so" - if [ "$goos" = "darwin" ]; then - libext="dylib" - fi - go build -msan -buildmode=c-shared $suffix -o ${TMPDIR}/libmsanshared.$libext msan_shared.go - - echo 'int main() { return 0; }' > ${TMPDIR}/testmsanshared.c - $CC $(go env GOGCCFLAGS) -fsanitize=memory -o ${TMPDIR}/testmsanshared ${TMPDIR}/testmsanshared.c ${TMPDIR}/libmsanshared.$libext - - if ! LD_LIBRARY_PATH=. ${TMPDIR}/testmsanshared; then - echo "FAIL: msan_shared" - status=1 - fi - rm -f ${TMPDIR}/{testmsanshared,testmsanshared.c,libmsanshared.$libext} -} - -if test "$msan" = "yes"; then - if ! go build -msan std; then - echo "FAIL: build -msan std" - status=1 - fi - - if ! go run -msan msan.go; then - echo "FAIL: msan" - status=1 - fi - - if ! CGO_LDFLAGS="-fsanitize=memory" CGO_CPPFLAGS="-fsanitize=memory" go run -msan -a msan2.go; then - echo "FAIL: msan2 with -fsanitize=memory" - status=1 - fi - - if ! go run -msan -a msan2.go; then - echo "FAIL: msan2" - status=1 - fi - - if ! go run -msan msan3.go; then - echo "FAIL: msan3" - status=1 - fi - - if ! go run -msan msan4.go; then - echo "FAIL: msan4" - status=1 - fi - - if ! go run -msan msan5.go; then - echo "FAIL: msan5" - status=1 - fi - - if go run -msan msan_fail.go 2>/dev/null; then - echo "FAIL: msan_fail" - status=1 - fi - - testmsanshared -fi - -testtsanshared() { - goos=$(go env GOOS) - suffix="-installsuffix tsan" - libext="so" - if [ "$goos" = "darwin" ]; then - libext="dylib" - fi - go build -buildmode=c-shared $suffix -o ${TMPDIR}/libtsanshared.$libext tsan_shared.go - - echo 'int main() { return 0; }' > ${TMPDIR}/testtsanshared.c - $CC $(go env GOGCCFLAGS) -fsanitize=thread -o ${TMPDIR}/testtsanshared ${TMPDIR}/testtsanshared.c ${TMPDIR}/libtsanshared.$libext - - if ! LD_LIBRARY_PATH=. ${TMPDIR}/testtsanshared; then - echo "FAIL: tsan_shared" - status=1 - fi - rm -f ${TMPDIR}/{testtsanshared,testtsanshared.c,libtsanshared.$libext} -} - -if test "$tsan" = "yes"; then - echo 'int main() { return 0; }' > ${TMPDIR}/testsanitizers$$.c - ok=yes - if ! $CC -fsanitize=thread ${TMPDIR}/testsanitizers$$.c -o ${TMPDIR}/testsanitizers$$ &> ${TMPDIR}/testsanitizers$$.err; then - ok=no - fi - if grep "unrecognized" ${TMPDIR}/testsanitizers$$.err >& /dev/null; then - echo "skipping tsan tests: -fsanitize=thread not supported" - tsan=no - elif test "$ok" != "yes"; then - cat ${TMPDIR}/testsanitizers$$.err - echo "skipping tsan tests: -fsanitizer=thread build failed" - tsan=no - elif ! ${TMPDIR}/testsanitizers$$ 2>&1; then - echo "skipping tsan tests: running tsan program failed" - tsan=no - fi - rm -f ${TMPDIR}/testsanitizers$$* -fi - -# Run a TSAN test. -# $1 test name -# $2 environment variables -# $3 go run args -testtsan() { - err=${TMPDIR}/tsanerr$$.out - if ! env $2 go run $3 $1 2>$err; then - cat $err - echo "FAIL: $1" - status=1 - elif grep -i warning $err >/dev/null 2>&1; then - cat $err - echo "FAIL: $1" - status=1 - fi - rm -f $err -} - -if test "$tsan" = "yes"; then - testtsan tsan.go - testtsan tsan2.go - testtsan tsan3.go - testtsan tsan4.go - testtsan tsan8.go - testtsan tsan9.go - - # These tests are only reliable using clang or GCC version 7 or later. - # Otherwise runtime/cgo/libcgo.h can't tell whether TSAN is in use. - ok=false - clang=false - if ${CC} --version | grep clang >/dev/null 2>&1; then - ok=true - clang=true - else - ver=$($CC -dumpversion) - major=$(echo $ver | sed -e 's/\([0-9]*\).*/\1/') - if test "$major" -lt 7; then - echo "skipping remaining TSAN tests: GCC version $major (older than 7)" - else - ok=true - fi - fi - - if test "$ok" = "true"; then - # These tests require rebuilding os/user with -fsanitize=thread. - testtsan tsan5.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan" - testtsan tsan6.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan" - testtsan tsan7.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan" - - # The remaining tests reportedly hang when built with GCC; issue #21196. - if test "$clang" = "true"; then - testtsan tsan10.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan" - testtsan tsan11.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan" - testtsan tsan12.go "CGO_CFLAGS=-fsanitize=thread CGO_LDFLAGS=-fsanitize=thread" "-installsuffix=tsan" - fi - - testtsanshared - fi -fi - -exit $status diff --git a/misc/cgo/testsanitizers/tsan_test.go b/misc/cgo/testsanitizers/tsan_test.go new file mode 100644 index 00000000000..ec4e0033fb4 --- /dev/null +++ b/misc/cgo/testsanitizers/tsan_test.go @@ -0,0 +1,56 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sanitizers_test + +import ( + "strings" + "testing" +) + +func TestTSAN(t *testing.T) { + t.Parallel() + requireOvercommit(t) + config := configure("thread") + config.skipIfCSanitizerBroken(t) + + mustRun(t, config.goCmd("build", "std")) + + cases := []struct { + src string + needsRuntime bool + }{ + {src: "tsan.go"}, + {src: "tsan2.go"}, + {src: "tsan3.go"}, + {src: "tsan4.go"}, + {src: "tsan5.go", needsRuntime: true}, + {src: "tsan6.go", needsRuntime: true}, + {src: "tsan7.go", needsRuntime: true}, + {src: "tsan8.go"}, + {src: "tsan9.go"}, + {src: "tsan10.go", needsRuntime: true}, + {src: "tsan11.go", needsRuntime: true}, + {src: "tsan12.go", needsRuntime: true}, + } + for _, tc := range cases { + tc := tc + name := strings.TrimSuffix(tc.src, ".go") + t.Run(name, func(t *testing.T) { + t.Parallel() + + dir := newTempDir(t) + defer dir.RemoveAll(t) + + outPath := dir.Join(name) + mustRun(t, config.goCmd("build", "-o", outPath, srcPath(tc.src))) + + cmd := hangProneCmd(outPath) + if tc.needsRuntime { + config.skipIfRuntimeIncompatible(t) + } + mustRun(t, cmd) + }) + } +} diff --git a/misc/cgo/testshared/shared_test.go b/misc/cgo/testshared/shared_test.go index 9e682a2fb59..f1e8f0605b6 100644 --- a/misc/cgo/testshared/shared_test.go +++ b/misc/cgo/testshared/shared_test.go @@ -10,6 +10,7 @@ import ( "debug/elf" "encoding/binary" "errors" + "flag" "fmt" "go/build" "io" @@ -46,7 +47,7 @@ func run(t *testing.T, msg string, args ...string) { func goCmd(t *testing.T, args ...string) { newargs := []string{args[0], "-installsuffix=" + suffix} if testing.Verbose() { - newargs = append(newargs, "-v") + newargs = append(newargs, "-x") } newargs = append(newargs, args[1:]...) c := exec.Command("go", newargs...) @@ -57,6 +58,7 @@ func goCmd(t *testing.T, args ...string) { c.Stdout = os.Stdout c.Stderr = os.Stderr err = c.Run() + output = []byte("(output above)") } else { output, err = c.CombinedOutput() } @@ -161,6 +163,8 @@ func testMain(m *testing.M) (int, error) { } func TestMain(m *testing.M) { + flag.Parse() + // Some of the tests install binaries into a custom GOPATH. // That won't work if GOBIN is set. os.Unsetenv("GOBIN") @@ -461,13 +465,13 @@ func TestGopathShlib(t *testing.T) { // that is not mapped into memory. func testPkgListNote(t *testing.T, f *elf.File, note *note) { if note.section.Flags != 0 { - t.Errorf("package list section has flags %v", note.section.Flags) + t.Errorf("package list section has flags %v, want 0", note.section.Flags) } if isOffsetLoaded(f, note.section.Offset) { t.Errorf("package list section contained in PT_LOAD segment") } if note.desc != "depBase\n" { - t.Errorf("incorrect package list %q", note.desc) + t.Errorf("incorrect package list %q, want %q", note.desc, "depBase\n") } } @@ -476,7 +480,7 @@ func testPkgListNote(t *testing.T, f *elf.File, note *note) { // bytes into it. func testABIHashNote(t *testing.T, f *elf.File, note *note) { if note.section.Flags != elf.SHF_ALLOC { - t.Errorf("abi hash section has flags %v", note.section.Flags) + t.Errorf("abi hash section has flags %v, want SHF_ALLOC", note.section.Flags) } if !isOffsetLoaded(f, note.section.Offset) { t.Errorf("abihash section not contained in PT_LOAD segment") @@ -497,13 +501,13 @@ func testABIHashNote(t *testing.T, f *elf.File, note *note) { return } if elf.ST_BIND(hashbytes.Info) != elf.STB_LOCAL { - t.Errorf("%s has incorrect binding %v", hashbytes.Name, elf.ST_BIND(hashbytes.Info)) + t.Errorf("%s has incorrect binding %v, want STB_LOCAL", hashbytes.Name, elf.ST_BIND(hashbytes.Info)) } if f.Sections[hashbytes.Section] != note.section { - t.Errorf("%s has incorrect section %v", hashbytes.Name, f.Sections[hashbytes.Section].Name) + t.Errorf("%s has incorrect section %v, want %s", hashbytes.Name, f.Sections[hashbytes.Section].Name, note.section.Name) } if hashbytes.Value-note.section.Addr != 16 { - t.Errorf("%s has incorrect offset into section %d", hashbytes.Name, hashbytes.Value-note.section.Addr) + t.Errorf("%s has incorrect offset into section %d, want 16", hashbytes.Name, hashbytes.Value-note.section.Addr) } } @@ -511,14 +515,14 @@ func testABIHashNote(t *testing.T, f *elf.File, note *note) { // was linked against in an unmapped section. func testDepsNote(t *testing.T, f *elf.File, note *note) { if note.section.Flags != 0 { - t.Errorf("package list section has flags %v", note.section.Flags) + t.Errorf("package list section has flags %v, want 0", note.section.Flags) } if isOffsetLoaded(f, note.section.Offset) { t.Errorf("package list section contained in PT_LOAD segment") } // libdepBase.so just links against the lib containing the runtime. if note.desc != soname { - t.Errorf("incorrect dependency list %q", note.desc) + t.Errorf("incorrect dependency list %q, want %q", note.desc, soname) } } @@ -556,7 +560,7 @@ func TestNotes(t *testing.T) { abiHashNoteFound = true case 3: // ELF_NOTE_GODEPS_TAG if depsNoteFound { - t.Error("multiple abi hash notes") + t.Error("multiple depedency list notes") } testDepsNote(t, f, note) depsNoteFound = true @@ -594,6 +598,7 @@ func TestThreeGopathShlibs(t *testing.T) { // If gccgo is not available or not new enough call t.Skip. Otherwise, // return a build.Context that is set up for gccgo. func prepGccgo(t *testing.T) build.Context { + t.Skip("golang.org/issue/22472") gccgoName := os.Getenv("GCCGO") if gccgoName == "" { gccgoName = "gccgo" @@ -643,6 +648,8 @@ func TestGoPathShlibGccgo(t *testing.T) { // library with gccgo, another GOPATH package that depends on the first and an // executable that links the second library. func TestTwoGopathShlibsGccgo(t *testing.T) { + t.Skip("golang.org/issue/22224") + gccgoContext := prepGccgo(t) libgoRE := regexp.MustCompile("libgo.so.[0-9]+") @@ -696,18 +703,55 @@ func resetFileStamps() { reset(gorootInstallDir) } -// touch makes path newer than the "old" time stamp used by resetFileStamps. -func touch(path string) { +// touch changes path and returns a function that changes it back. +// It also sets the time of the file, so that we can see if it is rewritten. +func touch(t *testing.T, path string) (cleanup func()) { + data, err := ioutil.ReadFile(path) + if err != nil { + t.Fatal(err) + } + old := make([]byte, len(data)) + copy(old, data) + if bytes.HasPrefix(data, []byte("!\n")) { + // Change last digit of build ID. + // (Content ID in the new content-based build IDs.) + const marker = `build id "` + i := bytes.Index(data, []byte(marker)) + if i < 0 { + t.Fatal("cannot find build id in archive") + } + j := bytes.IndexByte(data[i+len(marker):], '"') + if j < 0 { + t.Fatal("cannot find build id in archive") + } + i += len(marker) + j - 1 + if data[i] == 'a' { + data[i] = 'b' + } else { + data[i] = 'a' + } + } else { + // assume it's a text file + data = append(data, '\n') + } + if err := ioutil.WriteFile(path, data, 0666); err != nil { + t.Fatal(err) + } if err := os.Chtimes(path, nearlyNew, nearlyNew); err != nil { - log.Fatalf("os.Chtimes failed: %v", err) + t.Fatal(err) + } + return func() { + if err := ioutil.WriteFile(path, old, 0666); err != nil { + t.Fatal(err) + } } } // isNew returns if the path is newer than the time stamp used by touch. -func isNew(path string) bool { +func isNew(t *testing.T, path string) bool { fi, err := os.Stat(path) if err != nil { - log.Fatalf("os.Stat failed: %v", err) + t.Fatal(err) } return fi.ModTime().After(stampTime) } @@ -715,14 +759,16 @@ func isNew(path string) bool { // Fail unless path has been rebuilt (i.e. is newer than the time stamp used by // isNew) func AssertRebuilt(t *testing.T, msg, path string) { - if !isNew(path) { + t.Helper() + if !isNew(t, path) { t.Errorf("%s was not rebuilt (%s)", msg, path) } } // Fail if path has been rebuilt (i.e. is newer than the time stamp used by isNew) func AssertNotRebuilt(t *testing.T, msg, path string) { - if isNew(path) { + t.Helper() + if isNew(t, path) { t.Errorf("%s was rebuilt (%s)", msg, path) } } @@ -732,41 +778,55 @@ func TestRebuilding(t *testing.T) { goCmd(t, "install", "-linkshared", "exe") // If the source is newer than both the .a file and the .so, both are rebuilt. - resetFileStamps() - touch("src/depBase/dep.go") - goCmd(t, "install", "-linkshared", "exe") - AssertRebuilt(t, "new source", filepath.Join(gopathInstallDir, "depBase.a")) - AssertRebuilt(t, "new source", filepath.Join(gopathInstallDir, "libdepBase.so")) + t.Run("newsource", func(t *testing.T) { + resetFileStamps() + cleanup := touch(t, "src/depBase/dep.go") + defer func() { + cleanup() + goCmd(t, "install", "-linkshared", "exe") + }() + goCmd(t, "install", "-linkshared", "exe") + AssertRebuilt(t, "new source", filepath.Join(gopathInstallDir, "depBase.a")) + AssertRebuilt(t, "new source", filepath.Join(gopathInstallDir, "libdepBase.so")) + }) // If the .a file is newer than the .so, the .so is rebuilt (but not the .a) - resetFileStamps() - touch(filepath.Join(gopathInstallDir, "depBase.a")) - goCmd(t, "install", "-linkshared", "exe") - AssertNotRebuilt(t, "new .a file", filepath.Join(gopathInstallDir, "depBase.a")) - AssertRebuilt(t, "new .a file", filepath.Join(gopathInstallDir, "libdepBase.so")) + t.Run("newarchive", func(t *testing.T) { + resetFileStamps() + goCmd(t, "list", "-linkshared", "-f={{.ImportPath}} {{.Stale}} {{.StaleReason}} {{.Target}}", "depBase") + AssertNotRebuilt(t, "new .a file before build", filepath.Join(gopathInstallDir, "depBase.a")) + cleanup := touch(t, filepath.Join(gopathInstallDir, "depBase.a")) + defer func() { + cleanup() + goCmd(t, "install", "-v", "-linkshared", "exe") + }() + goCmd(t, "install", "-v", "-linkshared", "exe") + AssertNotRebuilt(t, "new .a file", filepath.Join(gopathInstallDir, "depBase.a")) + AssertRebuilt(t, "new .a file", filepath.Join(gopathInstallDir, "libdepBase.so")) + }) } -func appendFile(path, content string) { +func appendFile(t *testing.T, path, content string) { f, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0660) if err != nil { - log.Fatalf("os.OpenFile failed: %v", err) + t.Fatalf("os.OpenFile failed: %v", err) } defer func() { err := f.Close() if err != nil { - log.Fatalf("f.Close failed: %v", err) + t.Fatalf("f.Close failed: %v", err) } }() _, err = f.WriteString(content) if err != nil { - log.Fatalf("f.WriteString failed: %v", err) + t.Fatalf("f.WriteString failed: %v", err) } } -func writeFile(path, content string) { +func writeFile(t *testing.T, path, content string) { err := ioutil.WriteFile(path, []byte(content), 0644) if err != nil { - log.Fatalf("ioutil.WriteFile failed: %v", err) + t.Fatalf("ioutil.WriteFile failed: %v", err) } } @@ -780,7 +840,7 @@ func TestABIChecking(t *testing.T) { // some senses but suffices for the narrow definition of ABI compatibility the // toolchain uses today. resetFileStamps() - appendFile("src/depBase/dep.go", "func ABIBreak() {}\n") + appendFile(t, "src/depBase/dep.go", "func ABIBreak() {}\n") goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase") c := exec.Command("./bin/exe") output, err := c.CombinedOutput() @@ -811,7 +871,7 @@ func TestABIChecking(t *testing.T) { // function) and rebuild libdepBase.so, exe still works, even if new function // is in a file by itself. resetFileStamps() - writeFile("src/depBase/dep2.go", "package depBase\nfunc noABIBreak() {}\n") + writeFile(t, "src/depBase/dep2.go", "package depBase\nfunc noABIBreak() {}\n") goCmd(t, "install", "-buildmode=shared", "-linkshared", "depBase") run(t, "after non-ABI breaking change", "./bin/exe") } @@ -838,3 +898,12 @@ func TestInterface(t *testing.T) { goCmd(t, "install", "-linkshared", "iface") run(t, "running type/itab uniqueness tester", "./bin/iface") } + +// Access a global variable from a library. +func TestGlobal(t *testing.T) { + goCmd(t, "install", "-buildmode=shared", "-linkshared", "globallib") + goCmd(t, "install", "-linkshared", "global") + run(t, "global executable", "./bin/global") + AssertIsLinkedTo(t, "./bin/global", soname) + AssertHasRPath(t, "./bin/global", gorootInstallDir) +} diff --git a/misc/cgo/testshared/src/depBase/dep.go b/misc/cgo/testshared/src/depBase/dep.go index 9f86710db01..569c210aa14 100644 --- a/misc/cgo/testshared/src/depBase/dep.go +++ b/misc/cgo/testshared/src/depBase/dep.go @@ -22,7 +22,7 @@ type Dep struct { func (d *Dep) Method() int { // This code below causes various go.itab.* symbols to be generated in // the shared library. Similar code in ../exe/exe.go results in - // exercising https://github.com/golang/go/issues/17594 + // exercising https://golang.org/issues/17594 reflect.TypeOf(os.Stdout).Elem() return 10 } diff --git a/misc/cgo/testshared/src/division/division.go b/misc/cgo/testshared/src/division/division.go index a0b11a55e22..bb5fc984602 100644 --- a/misc/cgo/testshared/src/division/division.go +++ b/misc/cgo/testshared/src/division/division.go @@ -14,4 +14,4 @@ func main() { if a != 8 { panic("FAIL") } -} \ No newline at end of file +} diff --git a/misc/cgo/testshared/src/exe/exe.go b/misc/cgo/testshared/src/exe/exe.go index 84302a811f0..bd864d88ad8 100644 --- a/misc/cgo/testshared/src/exe/exe.go +++ b/misc/cgo/testshared/src/exe/exe.go @@ -25,7 +25,7 @@ func main() { defer depBase.ImplementedInAsm() // This code below causes various go.itab.* symbols to be generated in // the executable. Similar code in ../depBase/dep.go results in - // exercising https://github.com/golang/go/issues/17594 + // exercising https://golang.org/issues/17594 reflect.TypeOf(os.Stdout).Elem() runtime.GC() depBase.V = depBase.F() + 1 diff --git a/misc/cgo/testshared/src/global/main.go b/misc/cgo/testshared/src/global/main.go new file mode 100644 index 00000000000..94e7f247dee --- /dev/null +++ b/misc/cgo/testshared/src/global/main.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "globallib" +) + +//go:noinline +func testLoop() { + for i, s := range globallib.Data { + if s != int64(i) { + panic("testLoop: mismatch") + } + } +} + +//go:noinline +func ptrData() *[1<<20 + 10]int64 { + return &globallib.Data +} + +//go:noinline +func testMediumOffset() { + for i, s := range globallib.Data[1<<16-2:] { + if s != int64(i)+1<<16-2 { + panic("testMediumOffset: index mismatch") + } + } + + x := globallib.Data[1<<16-1] + if x != 1<<16-1 { + panic("testMediumOffset: direct mismatch") + } + + y := &globallib.Data[1<<16-3] + if y != &ptrData()[1<<16-3] { + panic("testMediumOffset: address mismatch") + } +} + +//go:noinline +func testLargeOffset() { + for i, s := range globallib.Data[1<<20:] { + if s != int64(i)+1<<20 { + panic("testLargeOffset: index mismatch") + } + } + + x := globallib.Data[1<<20+1] + if x != 1<<20+1 { + panic("testLargeOffset: direct mismatch") + } + + y := &globallib.Data[1<<20+2] + if y != &ptrData()[1<<20+2] { + panic("testLargeOffset: address mismatch") + } +} + +func main() { + testLoop() + + // SSA rules commonly merge offsets into addresses. These + // tests access global data in different ways to try + // and exercise different SSA rules. + testMediumOffset() + testLargeOffset() +} diff --git a/misc/cgo/testshared/src/globallib/global.go b/misc/cgo/testshared/src/globallib/global.go new file mode 100644 index 00000000000..b4372a2e9e2 --- /dev/null +++ b/misc/cgo/testshared/src/globallib/global.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package globallib + +// Data is large enough to that offsets into it do not fit into +// 16-bit or 20-bit immediates. Ideally we'd also try and overrun +// 32-bit immediates, but that requires the test machine to have +// too much memory. +var Data [1<<20 + 10]int64 + +func init() { + for i := range Data { + Data[i] = int64(i) + } +} diff --git a/misc/ios/go_darwin_arm_exec.go b/misc/ios/go_darwin_arm_exec.go index e84e513f933..56dbb009a18 100644 --- a/misc/ios/go_darwin_arm_exec.go +++ b/misc/ios/go_darwin_arm_exec.go @@ -49,6 +49,7 @@ var ( appID string teamID string bundleID string + deviceID string ) // lock is a file lock to serialize iOS runs. It is global to avoid the @@ -77,6 +78,9 @@ func main() { // https://developer.apple.com/membercenter/index.action#accountSummary as Team ID. teamID = getenv("GOIOS_TEAM_ID") + // Device IDs as listed with ios-deploy -c. + deviceID = os.Getenv("GOIOS_DEVICE_ID") + parts := strings.SplitN(appID, ".", 2) // For compatibility with the old builders, use a fallback bundle ID bundleID = "golang.gotest" @@ -96,7 +100,7 @@ func main() { // // The lock file is never deleted, to avoid concurrent locks on distinct // files with the same path. - lockName := filepath.Join(os.TempDir(), "go_darwin_arm_exec.lock") + lockName := filepath.Join(os.TempDir(), "go_darwin_arm_exec-"+deviceID+".lock") lock, err = os.OpenFile(lockName, os.O_CREATE|os.O_RDONLY, 0666) if err != nil { log.Fatal(err) @@ -228,6 +232,16 @@ func run(bin string, args []string) (err error) { os.Stdout.Write(b) }() + cond := func(out *buf) bool { + i0 := s.out.LastIndex([]byte("(lldb)")) + i1 := s.out.LastIndex([]byte("fruitstrap")) + i2 := s.out.LastIndex([]byte(" connect")) + return i0 > 0 && i1 > 0 && i2 > 0 + } + if err := s.wait("lldb start", cond, 15*time.Second); err != nil { + panic(waitPanic{err}) + } + // Script LLDB. Oh dear. s.do(`process handle SIGHUP --stop false --pass true --notify false`) s.do(`process handle SIGPIPE --stop false --pass true --notify false`) @@ -294,7 +308,7 @@ func newSession(appdir string, args []string, opts options) (*lldbSession, error if err != nil { return nil, err } - s.cmd = exec.Command( + cmdArgs := []string{ // lldb tries to be clever with terminals. // So we wrap it in script(1) and be clever // right back at it. @@ -307,9 +321,13 @@ func newSession(appdir string, args []string, opts options) (*lldbSession, error "-u", "-r", "-n", - `--args=`+strings.Join(args, " ")+``, + `--args=` + strings.Join(args, " ") + ``, "--bundle", appdir, - ) + } + if deviceID != "" { + cmdArgs = append(cmdArgs, "--id", deviceID) + } + s.cmd = exec.Command(cmdArgs[0], cmdArgs[1:]...) if debug { log.Println(strings.Join(s.cmd.Args, " ")) } @@ -340,15 +358,6 @@ func newSession(appdir string, args []string, opts options) (*lldbSession, error s.exited <- s.cmd.Wait() }() - cond := func(out *buf) bool { - i0 := s.out.LastIndex([]byte("(lldb)")) - i1 := s.out.LastIndex([]byte("fruitstrap")) - i2 := s.out.LastIndex([]byte(" connect")) - return i0 > 0 && i1 > 0 && i2 > 0 - } - if err := s.wait("lldb start", cond, 15*time.Second); err != nil { - panic(waitPanic{err}) - } return s, nil } @@ -377,6 +386,9 @@ func (s *lldbSession) wait(reason string, cond func(out *buf) bool, extraTimeout } return fmt.Errorf("test timeout (%s)", reason) case <-doTimedout: + if p := s.cmd.Process; p != nil { + p.Kill() + } return fmt.Errorf("command timeout (%s for %v)", reason, doTimeout) case err := <-s.exited: return fmt.Errorf("exited (%s: %v)", reason, err) diff --git a/misc/nacl/testzip.proto b/misc/nacl/testzip.proto index 8bf25400cbc..f15a2ab2246 100644 --- a/misc/nacl/testzip.proto +++ b/misc/nacl/testzip.proto @@ -22,6 +22,9 @@ go src=.. internal syntax parser.go + cover + testdata + + doc main.go pkg.go @@ -31,6 +34,9 @@ go src=.. internal objfile objfile.go + buildid + testdata + + gofmt gofmt.go gofmt_test.go @@ -64,6 +70,10 @@ go src=.. armasm testdata + + arm64 + arm64asm + testdata + + x86 x86asm testdata diff --git a/misc/swig/stdio/file.go b/misc/swig/stdio/file.go new file mode 100644 index 00000000000..a582f776f6c --- /dev/null +++ b/misc/swig/stdio/file.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is here just to cause problems. +// file.swig turns into a file also named file.go. +// Make sure cmd/go keeps them separate +// when both are passed to cgo. + +package file + +//int F(void) { return 1; } +import "C" + +func F() int { return int(C.F()) } diff --git a/misc/swig/stdio/file_test.go b/misc/swig/stdio/file_test.go index b1a520e6bc9..aea92aafd55 100644 --- a/misc/swig/stdio/file_test.go +++ b/misc/swig/stdio/file_test.go @@ -20,3 +20,9 @@ func TestRead(t *testing.T) { t.Error("fclose failed") } } + +func TestF(t *testing.T) { + if x := F(); x != 1 { + t.Fatalf("x = %d, want 1", x) + } +} diff --git a/misc/trace/trace_viewer_full.html b/misc/trace/trace_viewer_full.html new file mode 100644 index 00000000000..f0d2e60b185 --- /dev/null +++ b/misc/trace/trace_viewer_full.html @@ -0,0 +1,9525 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/misc/trace/trace_viewer_lean.html b/misc/trace/trace_viewer_lean.html deleted file mode 100644 index 076cb4359a4..00000000000 --- a/misc/trace/trace_viewer_lean.html +++ /dev/null @@ -1,7758 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go index d49c5c3fd9e..4a2c173bf3a 100644 --- a/src/archive/tar/common.go +++ b/src/archive/tar/common.go @@ -3,20 +3,22 @@ // license that can be found in the LICENSE file. // Package tar implements access to tar archives. -// It aims to cover most of the variations, including those produced -// by GNU and BSD tars. // -// References: -// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 -// http://www.gnu.org/software/tar/manual/html_node/Standard.html -// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +// Tape archives (tar) are a file format for storing a sequence of files that +// can be read and written in a streaming manner. +// This package aims to cover most variations of the format, +// including those produced by GNU and BSD tar tools. package tar import ( "errors" "fmt" + "math" "os" "path" + "reflect" + "strconv" + "strings" "time" ) @@ -24,42 +26,500 @@ import ( // architectures. If a large value is encountered when decoding, the result // stored in Header will be the truncated version. -// Header type flags. -const ( - TypeReg = '0' // regular file - TypeRegA = '\x00' // regular file - TypeLink = '1' // hard link - TypeSymlink = '2' // symbolic link - TypeChar = '3' // character device node - TypeBlock = '4' // block device node - TypeDir = '5' // directory - TypeFifo = '6' // fifo node - TypeCont = '7' // reserved - TypeXHeader = 'x' // extended header - TypeXGlobalHeader = 'g' // global extended header - TypeGNULongName = 'L' // Next file has a long name - TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name - TypeGNUSparse = 'S' // sparse file +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errMissData = errors.New("archive/tar: sparse file references non-existent data") + errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") + errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") ) +type headerError []string + +func (he headerError) Error() string { + const prefix = "archive/tar: cannot encode header" + var ss []string + for _, s := range he { + if s != "" { + ss = append(ss, s) + } + } + if len(ss) == 0 { + return prefix + } + return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and ")) +} + +// Type flags for Header.Typeflag. +const ( + // Type '0' indicates a regular file. + TypeReg = '0' + TypeRegA = '\x00' // For legacy support; use TypeReg instead + + // Type '1' to '6' are header-only flags and may not have a data body. + TypeLink = '1' // Hard link + TypeSymlink = '2' // Symbolic link + TypeChar = '3' // Character device node + TypeBlock = '4' // Block device node + TypeDir = '5' // Directory + TypeFifo = '6' // FIFO node + + // Type '7' is reserved. + TypeCont = '7' + + // Type 'x' is used by the PAX format to store key-value records that + // are only relevant to the next file. + // This package transparently handles these types. + TypeXHeader = 'x' + + // Type 'g' is used by the PAX format to store key-value records that + // are relevant to all subsequent files. + // This package only supports parsing and composing such headers, + // but does not currently support persisting the global state across files. + TypeXGlobalHeader = 'g' + + // Type 'S' indicates a sparse file in the GNU format. + TypeGNUSparse = 'S' + + // Types 'L' and 'K' are used by the GNU format for a meta file + // used to store the path or link name for the next file. + // This package transparently handles these types. + TypeGNULongName = 'L' + TypeGNULongLink = 'K' +) + +// Keywords for PAX extended header records. +const ( + paxNone = "" // Indicates that no PAX key is suitable + paxPath = "path" + paxLinkpath = "linkpath" + paxSize = "size" + paxUid = "uid" + paxGid = "gid" + paxUname = "uname" + paxGname = "gname" + paxMtime = "mtime" + paxAtime = "atime" + paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid + paxCharset = "charset" // Currently unused + paxComment = "comment" // Currently unused + + paxSchilyXattr = "SCHILY.xattr." + + // Keywords for GNU sparse files in a PAX extended header. + paxGNUSparse = "GNU.sparse." + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// basicKeys is a set of the PAX keys for which we have built-in support. +// This does not contain "charset" or "comment", which are both PAX-specific, +// so adding them as first-class features of Header is unlikely. +// Users can use the PAXRecords field to set it themselves. +var basicKeys = map[string]bool{ + paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true, + paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true, +} + // A Header represents a single header in a tar archive. // Some fields may not be populated. +// +// For forward compatibility, users that retrieve a Header from Reader.Next, +// mutate it in some ways, and then pass it back to Writer.WriteHeader +// should do so by creating a new Header and copying the fields +// that they are interested in preserving. type Header struct { - Name string // name of header file entry - Mode int64 // permission and mode bits - Uid int // user id of owner - Gid int // group id of owner - Size int64 // length in bytes - ModTime time.Time // modified time - Typeflag byte // type of header entry - Linkname string // target name of link - Uname string // user name of owner - Gname string // group name of owner - Devmajor int64 // major number of character or block device - Devminor int64 // minor number of character or block device - AccessTime time.Time // access time - ChangeTime time.Time // status change time - Xattrs map[string]string + Typeflag byte // Type of header entry (should be TypeReg for most files) + + Name string // Name of file entry + Linkname string // Target name of link (valid for TypeLink or TypeSymlink) + + Size int64 // Logical file size in bytes + Mode int64 // Permission and mode bits + Uid int // User ID of owner + Gid int // Group ID of owner + Uname string // User name of owner + Gname string // Group name of owner + + // If the Format is unspecified, then Writer.WriteHeader rounds ModTime + // to the nearest second and ignores the AccessTime and ChangeTime fields. + // + // To use AccessTime or ChangeTime, specify the Format as PAX or GNU. + // To use sub-second resolution, specify the Format as PAX. + ModTime time.Time // Modification time + AccessTime time.Time // Access time (requires either PAX or GNU support) + ChangeTime time.Time // Change time (requires either PAX or GNU support) + + Devmajor int64 // Major device number (valid for TypeChar or TypeBlock) + Devminor int64 // Minor device number (valid for TypeChar or TypeBlock) + + // Xattrs stores extended attributes as PAX records under the + // "SCHILY.xattr." namespace. + // + // The following are semantically equivalent: + // h.Xattrs[key] = value + // h.PAXRecords["SCHILY.xattr."+key] = value + // + // When Writer.WriteHeader is called, the contents of Xattrs will take + // precedence over those in PAXRecords. + // + // Deprecated: Use PAXRecords instead. + Xattrs map[string]string + + // PAXRecords is a map of PAX extended header records. + // + // User-defined records should have keys of the following form: + // VENDOR.keyword + // Where VENDOR is some namespace in all uppercase, and keyword may + // not contain the '=' character (e.g., "GOLANG.pkg.version"). + // The key and value should be non-empty UTF-8 strings. + // + // When Writer.WriteHeader is called, PAX records derived from the + // the other fields in Header take precedence over PAXRecords. + PAXRecords map[string]string + + // Format specifies the format of the tar header. + // + // This is set by Reader.Next as a best-effort guess at the format. + // Since the Reader liberally reads some non-compliant files, + // it is possible for this to be FormatUnknown. + // + // If the format is unspecified when Writer.WriteHeader is called, + // then it uses the first format (in the order of USTAR, PAX, GNU) + // capable of encoding this Header (see Format). + Format Format +} + +// sparseEntry represents a Length-sized fragment at Offset in the file. +type sparseEntry struct{ Offset, Length int64 } + +func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length } + +// A sparse file can be represented as either a sparseDatas or a sparseHoles. +// As long as the total size is known, they are equivalent and one can be +// converted to the other form and back. The various tar formats with sparse +// file support represent sparse files in the sparseDatas form. That is, they +// specify the fragments in the file that has data, and treat everything else as +// having zero bytes. As such, the encoding and decoding logic in this package +// deals with sparseDatas. +// +// However, the external API uses sparseHoles instead of sparseDatas because the +// zero value of sparseHoles logically represents a normal file (i.e., there are +// no holes in it). On the other hand, the zero value of sparseDatas implies +// that the file has no data in it, which is rather odd. +// +// As an example, if the underlying raw file contains the 10-byte data: +// var compactFile = "abcdefgh" +// +// And the sparse map has the following entries: +// var spd sparseDatas = []sparseEntry{ +// {Offset: 2, Length: 5}, // Data fragment for 2..6 +// {Offset: 18, Length: 3}, // Data fragment for 18..20 +// } +// var sph sparseHoles = []sparseEntry{ +// {Offset: 0, Length: 2}, // Hole fragment for 0..1 +// {Offset: 7, Length: 11}, // Hole fragment for 7..17 +// {Offset: 21, Length: 4}, // Hole fragment for 21..24 +// } +// +// Then the content of the resulting sparse file with a Header.Size of 25 is: +// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type ( + sparseDatas []sparseEntry + sparseHoles []sparseEntry +) + +// validateSparseEntries reports whether sp is a valid sparse map. +// It does not matter whether sp represents data fragments or hole fragments. +func validateSparseEntries(sp []sparseEntry, size int64) bool { + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + if size < 0 { + return false + } + var pre sparseEntry + for _, cur := range sp { + switch { + case cur.Offset < 0 || cur.Length < 0: + return false // Negative values are never okay + case cur.Offset > math.MaxInt64-cur.Length: + return false // Integer overflow with large length + case cur.endOffset() > size: + return false // Region extends beyond the actual size + case pre.endOffset() > cur.Offset: + return false // Regions cannot overlap and must be in order + } + pre = cur + } + return true +} + +// alignSparseEntries mutates src and returns dst where each fragment's +// starting offset is aligned up to the nearest block edge, and each +// ending offset is aligned down to the nearest block edge. +// +// Even though the Go tar Reader and the BSD tar utility can handle entries +// with arbitrary offsets and lengths, the GNU tar utility can only handle +// offsets and lengths that are multiples of blockSize. +func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry { + dst := src[:0] + for _, s := range src { + pos, end := s.Offset, s.endOffset() + pos += blockPadding(+pos) // Round-up to nearest blockSize + if end != size { + end -= blockPadding(-end) // Round-down to nearest blockSize + } + if pos < end { + dst = append(dst, sparseEntry{Offset: pos, Length: end - pos}) + } + } + return dst +} + +// invertSparseEntries converts a sparse map from one form to the other. +// If the input is sparseHoles, then it will output sparseDatas and vice-versa. +// The input must have been already validated. +// +// This function mutates src and returns a normalized map where: +// * adjacent fragments are coalesced together +// * only the last fragment may be empty +// * the endOffset of the last fragment is the total size +func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry { + dst := src[:0] + var pre sparseEntry + for _, cur := range src { + if cur.Length == 0 { + continue // Skip empty fragments + } + pre.Length = cur.Offset - pre.Offset + if pre.Length > 0 { + dst = append(dst, pre) // Only add non-empty fragments + } + pre.Offset = cur.endOffset() + } + pre.Length = size - pre.Offset // Possibly the only empty fragment + return append(dst, pre) +} + +// fileState tracks the number of logical (includes sparse holes) and physical +// (actual in tar archive) bytes remaining for the current file. +// +// Invariant: LogicalRemaining >= PhysicalRemaining +type fileState interface { + LogicalRemaining() int64 + PhysicalRemaining() int64 +} + +// allowedFormats determines which formats can be used. +// The value returned is the logical OR of multiple possible formats. +// If the value is FormatUnknown, then the input Header cannot be encoded +// and an error is returned explaining why. +// +// As a by-product of checking the fields, this function returns paxHdrs, which +// contain all fields that could not be directly encoded. +// A value receiver ensures that this method does not mutate the source Header. +func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) { + format = FormatUSTAR | FormatPAX | FormatGNU + paxHdrs = make(map[string]string) + + var whyNoUSTAR, whyNoPAX, whyNoGNU string + var preferPAX bool // Prefer PAX over USTAR + verifyString := func(s string, size int, name, paxKey string) { + // NUL-terminator is optional for path and linkpath. + // Technically, it is required for uname and gname, + // but neither GNU nor BSD tar checks for it. + tooLong := len(s) > size + allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath + if hasNUL(s) || (tooLong && !allowLongGNU) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s) + format.mustNotBe(FormatGNU) + } + if !isASCII(s) || tooLong { + canSplitUSTAR := paxKey == paxPath + if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s) + format.mustNotBe(FormatUSTAR) + } + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = s + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == s { + paxHdrs[paxKey] = v + } + } + verifyNumeric := func(n int64, size int, name, paxKey string) { + if !fitsInBase256(size, n) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n) + format.mustNotBe(FormatGNU) + } + if !fitsInOctal(size, n) { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n) + format.mustNotBe(FormatUSTAR) + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = strconv.FormatInt(n, 10) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) { + paxHdrs[paxKey] = v + } + } + verifyTime := func(ts time.Time, size int, name, paxKey string) { + if ts.IsZero() { + return // Always okay + } + if !fitsInBase256(size, ts.Unix()) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts) + format.mustNotBe(FormatGNU) + } + isMtime := paxKey == paxMtime + fitsOctal := fitsInOctal(size, ts.Unix()) + if (isMtime && !fitsOctal) || !isMtime { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts) + format.mustNotBe(FormatUSTAR) + } + needsNano := ts.Nanosecond() != 0 + if !isMtime || !fitsOctal || needsNano { + preferPAX = true // USTAR may truncate sub-second measurements + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = formatPAXTime(ts) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) { + paxHdrs[paxKey] = v + } + } + + // Check basic fields. + var blk block + v7 := blk.V7() + ustar := blk.USTAR() + gnu := blk.GNU() + verifyString(h.Name, len(v7.Name()), "Name", paxPath) + verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath) + verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname) + verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname) + verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone) + verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid) + verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid) + verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize) + verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone) + verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone) + verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime) + verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime) + verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime) + + // Check for header-only types. + var whyOnlyPAX, whyOnlyGNU string + switch h.Typeflag { + case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse: + // Exclude TypeLink and TypeSymlink, since they may reference directories. + if strings.HasSuffix(h.Name, "/") { + return FormatUnknown, nil, headerError{"filename may not have trailing slash"} + } + case TypeXHeader, TypeGNULongName, TypeGNULongLink: + return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"} + case TypeXGlobalHeader: + h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format} + if !reflect.DeepEqual(h, h2) { + return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"} + } + whyOnlyPAX = "only PAX supports TypeXGlobalHeader" + format.mayOnlyBe(FormatPAX) + } + if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 { + return FormatUnknown, nil, headerError{"negative size on header-only type"} + } + + // Check PAX records. + if len(h.Xattrs) > 0 { + for k, v := range h.Xattrs { + paxHdrs[paxSchilyXattr+k] = v + } + whyOnlyPAX = "only PAX supports Xattrs" + format.mayOnlyBe(FormatPAX) + } + if len(h.PAXRecords) > 0 { + for k, v := range h.PAXRecords { + switch _, exists := paxHdrs[k]; { + case exists: + continue // Do not overwrite existing records + case h.Typeflag == TypeXGlobalHeader: + paxHdrs[k] = v // Copy all records + case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse): + paxHdrs[k] = v // Ignore local records that may conflict + } + } + whyOnlyPAX = "only PAX supports PAXRecords" + format.mayOnlyBe(FormatPAX) + } + for k, v := range paxHdrs { + if !validPAXRecord(k, v) { + return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)} + } + } + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Check sparse files. + if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse { + if isHeaderOnlyType(h.Typeflag) { + return FormatUnknown, nil, headerError{"header-only type cannot be sparse"} + } + if !validateSparseEntries(h.SparseHoles, h.Size) { + return FormatUnknown, nil, headerError{"invalid sparse holes"} + } + if h.Typeflag == TypeGNUSparse { + whyOnlyGNU = "only GNU supports TypeGNUSparse" + format.mayOnlyBe(FormatGNU) + } else { + whyNoGNU = "GNU supports sparse files only with TypeGNUSparse" + format.mustNotBe(FormatGNU) + } + whyNoUSTAR = "USTAR does not support sparse files" + format.mustNotBe(FormatUSTAR) + } + */ + + // Check desired format. + if wantFormat := h.Format; wantFormat != FormatUnknown { + if wantFormat.has(FormatPAX) && !preferPAX { + wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too + } + format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted + } + if format == FormatUnknown { + switch h.Format { + case FormatUSTAR: + err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU} + case FormatPAX: + err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU} + case FormatGNU: + err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX} + default: + err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU} + } + } + return format, paxHdrs, err } // FileInfo returns an os.FileInfo for the Header. @@ -92,63 +552,43 @@ func (fi headerFileInfo) Mode() (mode os.FileMode) { // Set setuid, setgid and sticky bits. if fi.h.Mode&c_ISUID != 0 { - // setuid mode |= os.ModeSetuid } if fi.h.Mode&c_ISGID != 0 { - // setgid mode |= os.ModeSetgid } if fi.h.Mode&c_ISVTX != 0 { - // sticky mode |= os.ModeSticky } - // Set file mode bits. - // clear perm, setuid, setgid and sticky bits. - m := os.FileMode(fi.h.Mode) &^ 07777 - if m == c_ISDIR { - // directory + // Set file mode bits; clear perm, setuid, setgid, and sticky bits. + switch m := os.FileMode(fi.h.Mode) &^ 07777; m { + case c_ISDIR: mode |= os.ModeDir - } - if m == c_ISFIFO { - // named pipe (FIFO) + case c_ISFIFO: mode |= os.ModeNamedPipe - } - if m == c_ISLNK { - // symbolic link + case c_ISLNK: mode |= os.ModeSymlink - } - if m == c_ISBLK { - // device file + case c_ISBLK: mode |= os.ModeDevice - } - if m == c_ISCHR { - // Unix character device + case c_ISCHR: mode |= os.ModeDevice mode |= os.ModeCharDevice - } - if m == c_ISSOCK { - // Unix domain socket + case c_ISSOCK: mode |= os.ModeSocket } switch fi.h.Typeflag { case TypeSymlink: - // symbolic link mode |= os.ModeSymlink case TypeChar: - // character device node mode |= os.ModeDevice mode |= os.ModeCharDevice case TypeBlock: - // block device node mode |= os.ModeDevice case TypeDir: - // directory mode |= os.ModeDir case TypeFifo: - // fifo node mode |= os.ModeNamedPipe } @@ -176,33 +616,16 @@ const ( c_ISSOCK = 0140000 // Socket ) -// Keywords for the PAX Extended Header -const ( - paxAtime = "atime" - paxCharset = "charset" - paxComment = "comment" - paxCtime = "ctime" // please note that ctime is not a valid pax header. - paxGid = "gid" - paxGname = "gname" - paxLinkpath = "linkpath" - paxMtime = "mtime" - paxPath = "path" - paxSize = "size" - paxUid = "uid" - paxUname = "uname" - paxXattr = "SCHILY.xattr." - paxNone = "" -) - // FileInfoHeader creates a partially-populated Header from fi. // If fi describes a symlink, FileInfoHeader records link as the link target. // If fi describes a directory, a slash is appended to the name. -// Because os.FileInfo's Name method returns only the base name of -// the file it describes, it may be necessary to modify the Name field -// of the returned header to provide the full path name of the file. +// +// Since os.FileInfo's Name method only returns the base name of +// the file it describes, it may be necessary to modify Header.Name +// to provide the full path name of the file. func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { if fi == nil { - return nil, errors.New("tar: FileInfo is nil") + return nil, errors.New("archive/tar: FileInfo is nil") } fm := fi.Mode() h := &Header{ @@ -265,6 +688,12 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { h.Size = 0 h.Linkname = sys.Linkname } + if sys.PAXRecords != nil { + h.PAXRecords = make(map[string]string) + for k, v := range sys.PAXRecords { + h.PAXRecords[k] = v + } + } } if sysStat != nil { return h, sysStat(fi, h) @@ -282,3 +711,10 @@ func isHeaderOnlyType(flag byte) bool { return false } } + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} diff --git a/src/archive/tar/example_test.go b/src/archive/tar/example_test.go index 5f0ce2f4029..a2474b959f6 100644 --- a/src/archive/tar/example_test.go +++ b/src/archive/tar/example_test.go @@ -13,14 +13,10 @@ import ( "os" ) -func Example() { - // Create a buffer to write our archive to. - buf := new(bytes.Buffer) - - // Create a new tar archive. - tw := tar.NewWriter(buf) - - // Add some files to the archive. +func Example_minimal() { + // Create and add some files to the archive. + var buf bytes.Buffer + tw := tar.NewWriter(&buf) var files = []struct { Name, Body string }{ @@ -35,34 +31,29 @@ func Example() { Size: int64(len(file.Body)), } if err := tw.WriteHeader(hdr); err != nil { - log.Fatalln(err) + log.Fatal(err) } if _, err := tw.Write([]byte(file.Body)); err != nil { - log.Fatalln(err) + log.Fatal(err) } } - // Make sure to check the error on Close. if err := tw.Close(); err != nil { - log.Fatalln(err) + log.Fatal(err) } - // Open the tar archive for reading. - r := bytes.NewReader(buf.Bytes()) - tr := tar.NewReader(r) - - // Iterate through the files in the archive. + // Open and iterate through the files in the archive. + tr := tar.NewReader(&buf) for { hdr, err := tr.Next() if err == io.EOF { - // end of tar archive - break + break // End of archive } if err != nil { - log.Fatalln(err) + log.Fatal(err) } fmt.Printf("Contents of %s:\n", hdr.Name) if _, err := io.Copy(os.Stdout, tr); err != nil { - log.Fatalln(err) + log.Fatal(err) } fmt.Println() } diff --git a/src/archive/tar/format.go b/src/archive/tar/format.go index c2c9910d002..6e29698a14a 100644 --- a/src/archive/tar/format.go +++ b/src/archive/tar/format.go @@ -4,38 +4,133 @@ package tar +import "strings" + +// Format represents the tar archive format. +// +// The original tar format was introduced in Unix V7. +// Since then, there have been multiple competing formats attempting to +// standardize or extend the V7 format to overcome its limitations. +// The most common formats are the USTAR, PAX, and GNU formats, +// each with their own advantages and limitations. +// +// The following table captures the capabilities of each format: +// +// | USTAR | PAX | GNU +// ------------------+--------+-----------+---------- +// Name | 256B | unlimited | unlimited +// Linkname | 100B | unlimited | unlimited +// Size | uint33 | unlimited | uint89 +// Mode | uint21 | uint21 | uint57 +// Uid/Gid | uint21 | unlimited | uint57 +// Uname/Gname | 32B | unlimited | 32B +// ModTime | uint33 | unlimited | int89 +// AccessTime | n/a | unlimited | int89 +// ChangeTime | n/a | unlimited | int89 +// Devmajor/Devminor | uint21 | uint21 | uint57 +// ------------------+--------+-----------+---------- +// string encoding | ASCII | UTF-8 | binary +// sub-second times | no | yes | no +// sparse files | no | yes | yes +// +// The table's upper portion shows the Header fields, where each format reports +// the maximum number of bytes allowed for each string field and +// the integer type used to store each numeric field +// (where timestamps are stored as the number of seconds since the Unix epoch). +// +// The table's lower portion shows specialized features of each format, +// such as supported string encodings, support for sub-second timestamps, +// or support for sparse files. +// +// The Writer currently provides no support for sparse files. +type Format int + // Constants to identify various tar formats. const ( - // The format is unknown. - formatUnknown = (1 << iota) / 2 // Sequence of 0, 1, 2, 4, 8, etc... + // Deliberately hide the meaning of constants from public API. + _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc... + + // FormatUnknown indicates that the format is unknown. + FormatUnknown // The format of the original Unix V7 tar tool prior to standardization. formatV7 - // The old and new GNU formats, which are incompatible with USTAR. - // This does cover the old GNU sparse extension. - // This does not cover the GNU sparse extensions using PAX headers, - // versions 0.0, 0.1, and 1.0; these fall under the PAX format. - formatGNU + // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988. + // + // While this format is compatible with most tar readers, + // the format has several limitations making it unsuitable for some usages. + // Most notably, it cannot support sparse files, files larger than 8GiB, + // filenames larger than 256 characters, and non-ASCII filenames. + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + FormatUSTAR + + // FormatPAX represents the PAX header format defined in POSIX.1-2001. + // + // PAX extends USTAR by writing a special file with Typeflag TypeXHeader + // preceding the original header. This file contains a set of key-value + // records, which are used to overcome USTAR's shortcomings, in addition to + // providing the ability to have sub-second resolution for timestamps. + // + // Some newer formats add their own extensions to PAX by defining their + // own keys and assigning certain semantic meaning to the associated values. + // For example, sparse file support in PAX is implemented using keys + // defined by the GNU manual (e.g., "GNU.sparse.map"). + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html + FormatPAX + + // FormatGNU represents the GNU header format. + // + // The GNU header format is older than the USTAR and PAX standards and + // is not compatible with them. The GNU format supports + // arbitrary file sizes, filenames of arbitrary encoding and length, + // sparse files, and other features. + // + // It is recommended that PAX be chosen over GNU unless the target + // application can only parse GNU formatted archives. + // + // Reference: + // http://www.gnu.org/software/tar/manual/html_node/Standard.html + FormatGNU // Schily's tar format, which is incompatible with USTAR. // This does not cover STAR extensions to the PAX format; these fall under // the PAX format. formatSTAR - // USTAR is the former standardization of tar defined in POSIX.1-1988. - // This is incompatible with the GNU and STAR formats. - formatUSTAR - - // PAX is the latest standardization of tar defined in POSIX.1-2001. - // This is an extension of USTAR and is "backwards compatible" with it. - // - // Some newer formats add their own extensions to PAX, such as GNU sparse - // files and SCHILY extended attributes. Since they are backwards compatible - // with PAX, they will be labelled as "PAX". - formatPAX + formatMax ) +func (f Format) has(f2 Format) bool { return f&f2 != 0 } +func (f *Format) mayBe(f2 Format) { *f |= f2 } +func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 } +func (f *Format) mustNotBe(f2 Format) { *f &^= f2 } + +var formatNames = map[Format]string{ + formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR", +} + +func (f Format) String() string { + var ss []string + for f2 := Format(1); f2 < formatMax; f2 <<= 1 { + if f.has(f2) { + ss = append(ss, formatNames[f2]) + } + } + switch len(ss) { + case 0: + return "" + case 1: + return ss[0] + default: + return "(" + strings.Join(ss, " | ") + ")" + } +} + // Magics used to identify various formats. const ( magicGNU, versionGNU = "ustar ", " \x00" @@ -50,6 +145,12 @@ const ( prefixSize = 155 // Max length of the prefix field in USTAR format ) +// blockPadding computes the number of bytes needed to pad offset up to the +// nearest block edge where 0 <= n < blockSize. +func blockPadding(offset int64) (n int64) { + return -offset & (blockSize - 1) +} + var zeroBlock block type block [blockSize]byte @@ -63,14 +164,14 @@ func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } // GetFormat checks that the block is a valid tar header based on the checksum. // It then attempts to guess the specific format based on magic values. -// If the checksum fails, then formatUnknown is returned. -func (b *block) GetFormat() (format int) { +// If the checksum fails, then FormatUnknown is returned. +func (b *block) GetFormat() Format { // Verify checksum. var p parser value := p.parseOctal(b.V7().Chksum()) chksum1, chksum2 := b.ComputeChecksum() if p.err != nil || (value != chksum1 && value != chksum2) { - return formatUnknown + return FormatUnknown } // Guess the magic values. @@ -81,9 +182,9 @@ func (b *block) GetFormat() (format int) { case magic == magicUSTAR && trailer == trailerSTAR: return formatSTAR case magic == magicUSTAR: - return formatUSTAR + return FormatUSTAR | FormatPAX case magic == magicGNU && version == versionGNU: - return formatGNU + return FormatGNU default: return formatV7 } @@ -91,19 +192,19 @@ func (b *block) GetFormat() (format int) { // SetFormat writes the magic values necessary for specified format // and then updates the checksum accordingly. -func (b *block) SetFormat(format int) { +func (b *block) SetFormat(format Format) { // Set the magic values. - switch format { - case formatV7: + switch { + case format.has(formatV7): // Do nothing. - case formatGNU: + case format.has(FormatGNU): copy(b.GNU().Magic(), magicGNU) copy(b.GNU().Version(), versionGNU) - case formatSTAR: + case format.has(formatSTAR): copy(b.STAR().Magic(), magicUSTAR) copy(b.STAR().Version(), versionUSTAR) copy(b.STAR().Trailer(), trailerSTAR) - case formatUSTAR, formatPAX: + case format.has(FormatUSTAR | FormatPAX): copy(b.USTAR().Magic(), magicUSTAR) copy(b.USTAR().Version(), versionUSTAR) default: @@ -128,12 +229,17 @@ func (b *block) ComputeChecksum() (unsigned, signed int64) { if 148 <= i && i < 156 { c = ' ' // Treat the checksum field itself as all spaces. } - unsigned += int64(uint8(c)) + unsigned += int64(c) signed += int64(int8(c)) } return unsigned, signed } +// Reset clears the block with all zeros. +func (b *block) Reset() { + *b = block{} +} + type headerV7 [blockSize]byte func (h *headerV7) Name() []byte { return h[000:][:100] } @@ -187,11 +293,11 @@ func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } type sparseArray []byte -func (s sparseArray) Entry(i int) sparseNode { return (sparseNode)(s[i*24:]) } +func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) } func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } func (s sparseArray) MaxEntries() int { return len(s) / 24 } -type sparseNode []byte +type sparseElem []byte -func (s sparseNode) Offset() []byte { return s[00:][:12] } -func (s sparseNode) NumBytes() []byte { return s[12:][:12] } +func (s sparseElem) Offset() []byte { return s[00:][:12] } +func (s sparseElem) Length() []byte { return s[12:][:12] } diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go index 9abe888218f..f4eeb557be9 100644 --- a/src/archive/tar/reader.go +++ b/src/archive/tar/reader.go @@ -4,33 +4,23 @@ package tar -// TODO(dsymonds): -// - pax extensions - import ( "bytes" - "errors" "io" "io/ioutil" - "math" "strconv" "strings" "time" ) -var ( - ErrHeader = errors.New("archive/tar: invalid tar header") -) - -// A Reader provides sequential access to the contents of a tar archive. -// A tar archive consists of a sequence of files. -// The Next method advances to the next file in the archive (including the first), -// and then it can be treated as an io.Reader to access the file's data. +// Reader provides sequential access to the contents of a tar archive. +// Reader.Next advances to the next file in the archive (including the first), +// and then Reader can be treated as an io.Reader to access the file's data. type Reader struct { r io.Reader - pad int64 // amount of padding (ignored) after current file entry - curr numBytesReader // reader for current file entry - blk block // buffer to use as temporary local storage + pad int64 // Amount of padding (ignored) after current file entry + curr fileReader // Reader for current file entry + blk block // Buffer to use as temporary local storage // err is a persistent error. // It is only the responsibility of every exported method of Reader to @@ -38,68 +28,21 @@ type Reader struct { err error } -// A numBytesReader is an io.Reader with a numBytes method, returning the number -// of bytes remaining in the underlying encoded data. -type numBytesReader interface { +type fileReader interface { io.Reader - numBytes() int64 -} + fileState -// A regFileReader is a numBytesReader for reading file data from a tar archive. -type regFileReader struct { - r io.Reader // underlying reader - nb int64 // number of unread bytes for current file entry + WriteTo(io.Writer) (int64, error) } -// A sparseFileReader is a numBytesReader for reading sparse file data from a -// tar archive. -type sparseFileReader struct { - rfr numBytesReader // Reads the sparse-encoded file data - sp []sparseEntry // The sparse map for the file - pos int64 // Keeps track of file position - total int64 // Total size of the file -} - -// A sparseEntry holds a single entry in a sparse file's sparse map. -// -// Sparse files are represented using a series of sparseEntrys. -// Despite the name, a sparseEntry represents an actual data fragment that -// references data found in the underlying archive stream. All regions not -// covered by a sparseEntry are logically filled with zeros. -// -// For example, if the underlying raw file contains the 10-byte data: -// var compactData = "abcdefgh" -// -// And the sparse map has the following entries: -// var sp = []sparseEntry{ -// {offset: 2, numBytes: 5} // Data fragment for [2..7] -// {offset: 18, numBytes: 3} // Data fragment for [18..21] -// } -// -// Then the content of the resulting sparse file with a "real" size of 25 is: -// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 -type sparseEntry struct { - offset int64 // Starting position of the fragment - numBytes int64 // Length of the fragment -} - -// Keywords for GNU sparse files in a PAX extended header -const ( - paxGNUSparseNumBlocks = "GNU.sparse.numblocks" - paxGNUSparseOffset = "GNU.sparse.offset" - paxGNUSparseNumBytes = "GNU.sparse.numbytes" - paxGNUSparseMap = "GNU.sparse.map" - paxGNUSparseName = "GNU.sparse.name" - paxGNUSparseMajor = "GNU.sparse.major" - paxGNUSparseMinor = "GNU.sparse.minor" - paxGNUSparseSize = "GNU.sparse.size" - paxGNUSparseRealSize = "GNU.sparse.realsize" -) - // NewReader creates a new Reader reading from r. -func NewReader(r io.Reader) *Reader { return &Reader{r: r} } +func NewReader(r io.Reader) *Reader { + return &Reader{r: r, curr: ®FileReader{r, 0}} +} // Next advances to the next entry in the tar archive. +// The Header.Size determines how many bytes can be read for the next file. +// Any remaining data in the current file is automatically discarded. // // io.EOF is returned at the end of the input. func (tr *Reader) Next() (*Header, error) { @@ -112,18 +55,26 @@ func (tr *Reader) Next() (*Header, error) { } func (tr *Reader) next() (*Header, error) { - var extHdrs map[string]string + var paxHdrs map[string]string + var gnuLongName, gnuLongLink string // Externally, Next iterates through the tar archive as if it is a series of // files. Internally, the tar format often uses fake "files" to add meta // data that describes the next file. These meta data "files" should not // normally be visible to the outside. As such, this loop iterates through // one or more "header files" until it finds a "normal file". + format := FormatUSTAR | FormatPAX | FormatGNU loop: for { - if err := tr.skipUnread(); err != nil { + // Discard the remainder of the file and any padding. + if err := discard(tr.r, tr.curr.PhysicalRemaining()); err != nil { return nil, err } + if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil { + return nil, err + } + tr.pad = 0 + hdr, rawHdr, err := tr.readHeader() if err != nil { return nil, err @@ -131,43 +82,58 @@ loop: if err := tr.handleRegularFile(hdr); err != nil { return nil, err } + format.mayOnlyBe(hdr.Format) // Check for PAX/GNU special headers and files. switch hdr.Typeflag { - case TypeXHeader: - extHdrs, err = parsePAX(tr) + case TypeXHeader, TypeXGlobalHeader: + format.mayOnlyBe(FormatPAX) + paxHdrs, err = parsePAX(tr) if err != nil { return nil, err } + if hdr.Typeflag == TypeXGlobalHeader { + mergePAX(hdr, paxHdrs) + return &Header{ + Name: hdr.Name, + Typeflag: hdr.Typeflag, + Xattrs: hdr.Xattrs, + PAXRecords: hdr.PAXRecords, + Format: format, + }, nil + } continue loop // This is a meta header affecting the next header case TypeGNULongName, TypeGNULongLink: + format.mayOnlyBe(FormatGNU) realname, err := ioutil.ReadAll(tr) if err != nil { return nil, err } - // Convert GNU extensions to use PAX headers. - if extHdrs == nil { - extHdrs = make(map[string]string) - } var p parser switch hdr.Typeflag { case TypeGNULongName: - extHdrs[paxPath] = p.parseString(realname) + gnuLongName = p.parseString(realname) case TypeGNULongLink: - extHdrs[paxLinkpath] = p.parseString(realname) - } - if p.err != nil { - return nil, p.err + gnuLongLink = p.parseString(realname) } continue loop // This is a meta header affecting the next header default: // The old GNU sparse format is handled here since it is technically // just a regular file with additional attributes. - if err := mergePAX(hdr, extHdrs); err != nil { + if err := mergePAX(hdr, paxHdrs); err != nil { return nil, err } + if gnuLongName != "" { + hdr.Name = gnuLongName + } + if gnuLongLink != "" { + hdr.Linkname = gnuLongLink + } + if hdr.Typeflag == TypeRegA && strings.HasSuffix(hdr.Name, "/") { + hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories + } // The extended headers may have updated the size. // Thus, setup the regFileReader again after merging PAX headers. @@ -177,9 +143,15 @@ loop: // Sparse formats rely on being able to read from the logical data // section; there must be a preceding call to handleRegularFile. - if err := tr.handleSparseFile(hdr, rawHdr, extHdrs); err != nil { + if err := tr.handleSparseFile(hdr, rawHdr); err != nil { return nil, err } + + // Set the final guess at the format. + if format.has(FormatUSTAR) && format.has(FormatPAX) { + format.mayOnlyBe(FormatUSTAR) + } + hdr.Format = format return hdr, nil // This is a file, so stop } } @@ -197,105 +169,86 @@ func (tr *Reader) handleRegularFile(hdr *Header) error { return ErrHeader } - tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + tr.pad = blockPadding(nb) tr.curr = ®FileReader{r: tr.r, nb: nb} return nil } // handleSparseFile checks if the current file is a sparse format of any type // and sets the curr reader appropriately. -func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block, extHdrs map[string]string) error { - var sp []sparseEntry +func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { + var spd sparseDatas var err error if hdr.Typeflag == TypeGNUSparse { - sp, err = tr.readOldGNUSparseMap(hdr, rawHdr) - if err != nil { - return err - } + spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) } else { - sp, err = tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) - if err != nil { - return err - } + spd, err = tr.readGNUSparsePAXHeaders(hdr) } // If sp is non-nil, then this is a sparse file. - // Note that it is possible for len(sp) to be zero. - if sp != nil { - tr.curr, err = newSparseFileReader(tr.curr, sp, hdr.Size) + // Note that it is possible for len(sp) == 0. + if err == nil && spd != nil { + if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { + return ErrHeader + } + sph := invertSparseEntries(spd, hdr.Size) + tr.curr = &sparseFileReader{tr.curr, sph, 0} } return err } -// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then -// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to -// be treated as a regular file. -func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { - var sparseFormat string - - // Check for sparse format indicators - major, majorOk := headers[paxGNUSparseMajor] - minor, minorOk := headers[paxGNUSparseMinor] - sparseName, sparseNameOk := headers[paxGNUSparseName] - _, sparseMapOk := headers[paxGNUSparseMap] - sparseSize, sparseSizeOk := headers[paxGNUSparseSize] - sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] - - // Identify which, if any, sparse format applies from which PAX headers are set - if majorOk && minorOk { - sparseFormat = major + "." + minor - } else if sparseNameOk && sparseMapOk { - sparseFormat = "0.1" - } else if sparseSizeOk { - sparseFormat = "0.0" - } else { - // Not a PAX format GNU sparse file. - return nil, nil +// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. +// If they are found, then this function reads the sparse map and returns it. +// This assumes that 0.0 headers have already been converted to 0.1 headers +// by the the PAX header parsing logic. +func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { + // Identify the version of GNU headers. + var is1x0 bool + major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] + switch { + case major == "0" && (minor == "0" || minor == "1"): + is1x0 = false + case major == "1" && minor == "0": + is1x0 = true + case major != "" || minor != "": + return nil, nil // Unknown GNU sparse PAX version + case hdr.PAXRecords[paxGNUSparseMap] != "": + is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess + default: + return nil, nil // Not a PAX format GNU sparse file. } + hdr.Format.mayOnlyBe(FormatPAX) - // Check for unknown sparse format - if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { - return nil, nil + // Update hdr from GNU sparse PAX headers. + if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { + hdr.Name = name } - - // Update hdr from GNU sparse PAX headers - if sparseNameOk { - hdr.Name = sparseName + size := hdr.PAXRecords[paxGNUSparseSize] + if size == "" { + size = hdr.PAXRecords[paxGNUSparseRealSize] } - if sparseSizeOk { - realSize, err := strconv.ParseInt(sparseSize, 10, 64) + if size != "" { + n, err := strconv.ParseInt(size, 10, 64) if err != nil { return nil, ErrHeader } - hdr.Size = realSize - } else if sparseRealSizeOk { - realSize, err := strconv.ParseInt(sparseRealSize, 10, 64) - if err != nil { - return nil, ErrHeader - } - hdr.Size = realSize + hdr.Size = n } - // Set up the sparse map, according to the particular sparse format in use - var sp []sparseEntry - var err error - switch sparseFormat { - case "0.0", "0.1": - sp, err = readGNUSparseMap0x1(headers) - case "1.0": - sp, err = readGNUSparseMap1x0(tr.curr) + // Read the sparse map according to the appropriate format. + if is1x0 { + return readGNUSparseMap1x0(tr.curr) } - return sp, err + return readGNUSparseMap0x1(hdr.PAXRecords) } -// mergePAX merges well known headers according to PAX standard. -// In general headers with the same name as those found -// in the header struct overwrite those found in the header -// struct with higher precision or longer values. Esp. useful -// for name and linkname fields. -func mergePAX(hdr *Header, headers map[string]string) (err error) { - var id64 int64 - for k, v := range headers { +// mergePAX merges paxHdrs into hdr for all relevant fields of Header. +func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { + for k, v := range paxHdrs { + if v == "" { + continue // Keep the original USTAR value + } + var id64 int64 switch k { case paxPath: hdr.Name = v @@ -320,17 +273,18 @@ func mergePAX(hdr *Header, headers map[string]string) (err error) { case paxSize: hdr.Size, err = strconv.ParseInt(v, 10, 64) default: - if strings.HasPrefix(k, paxXattr) { + if strings.HasPrefix(k, paxSchilyXattr) { if hdr.Xattrs == nil { hdr.Xattrs = make(map[string]string) } - hdr.Xattrs[k[len(paxXattr):]] = v + hdr.Xattrs[k[len(paxSchilyXattr):]] = v } } if err != nil { return ErrHeader } } + hdr.PAXRecords = paxHdrs return nil } @@ -348,7 +302,7 @@ func parsePAX(r io.Reader) (map[string]string, error) { // headers since 0.0 headers were not PAX compliant. var sparseMap []string - extHdrs := make(map[string]string) + paxHdrs := make(map[string]string) for len(sbuf) > 0 { key, value, residual, err := parsePAXRecord(sbuf) if err != nil { @@ -366,58 +320,13 @@ func parsePAX(r io.Reader) (map[string]string, error) { } sparseMap = append(sparseMap, value) default: - // According to PAX specification, a value is stored only if it is - // non-empty. Otherwise, the key is deleted. - if len(value) > 0 { - extHdrs[key] = value - } else { - delete(extHdrs, key) - } + paxHdrs[key] = value } } if len(sparseMap) > 0 { - extHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") + paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") } - return extHdrs, nil -} - -// skipUnread skips any unread bytes in the existing file entry, as well as any -// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is -// encountered in the data portion; it is okay to hit io.EOF in the padding. -// -// Note that this function still works properly even when sparse files are being -// used since numBytes returns the bytes remaining in the underlying io.Reader. -func (tr *Reader) skipUnread() error { - dataSkip := tr.numBytes() // Number of data bytes to skip - totalSkip := dataSkip + tr.pad // Total number of bytes to skip - tr.curr, tr.pad = nil, 0 - - // If possible, Seek to the last byte before the end of the data section. - // Do this because Seek is often lazy about reporting errors; this will mask - // the fact that the tar stream may be truncated. We can rely on the - // io.CopyN done shortly afterwards to trigger any IO errors. - var seekSkipped int64 // Number of bytes skipped via Seek - if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { - // Not all io.Seeker can actually Seek. For example, os.Stdin implements - // io.Seeker, but calling Seek always returns an error and performs - // no action. Thus, we try an innocent seek to the current position - // to see if Seek is really supported. - pos1, err := sr.Seek(0, io.SeekCurrent) - if err == nil { - // Seek seems supported, so perform the real Seek. - pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent) - if err != nil { - return err - } - seekSkipped = pos2 - pos1 - } - } - - copySkipped, err := io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) - if err == io.EOF && seekSkipped+copySkipped < dataSkip { - err = io.ErrUnexpectedEOF - } - return err + return paxHdrs, nil } // readHeader reads the next block header and assumes that the underlying reader @@ -445,7 +354,7 @@ func (tr *Reader) readHeader() (*Header, *block, error) { // Verify the header matches a known format. format := tr.blk.GetFormat() - if format == formatUnknown { + if format == FormatUnknown { return nil, nil, ErrHeader } @@ -454,59 +363,86 @@ func (tr *Reader) readHeader() (*Header, *block, error) { // Unpack the V7 header. v7 := tr.blk.V7() + hdr.Typeflag = v7.TypeFlag()[0] hdr.Name = p.parseString(v7.Name()) + hdr.Linkname = p.parseString(v7.LinkName()) + hdr.Size = p.parseNumeric(v7.Size()) hdr.Mode = p.parseNumeric(v7.Mode()) hdr.Uid = int(p.parseNumeric(v7.UID())) hdr.Gid = int(p.parseNumeric(v7.GID())) - hdr.Size = p.parseNumeric(v7.Size()) hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) - hdr.Typeflag = v7.TypeFlag()[0] - hdr.Linkname = p.parseString(v7.LinkName()) - - // The atime and ctime fields are often left unused. Some versions of Go - // had a bug in the tar.Writer where it would output an invalid tar file - // in certain rare situations because the logic incorrectly believed that - // the old GNU format had a prefix field. This is wrong and leads to - // an outputted file that actually mangles the atime and ctime fields. - // - // In order to continue reading tar files created by a buggy writer, we - // try to parse the atime and ctime fields, but just return the zero value - // of time.Time when we cannot parse them. - // - // See https://golang.org/issues/12594 - tryParseTime := func(b []byte) time.Time { - var p parser - n := p.parseNumeric(b) - if b[0] != 0x00 && p.err == nil { - return time.Unix(n, 0) - } - return time.Time{} - } // Unpack format specific fields. if format > formatV7 { ustar := tr.blk.USTAR() hdr.Uname = p.parseString(ustar.UserName()) hdr.Gname = p.parseString(ustar.GroupName()) - if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { - hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) - hdr.Devminor = p.parseNumeric(ustar.DevMinor()) - } + hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) + hdr.Devminor = p.parseNumeric(ustar.DevMinor()) var prefix string - switch format { - case formatUSTAR: + switch { + case format.has(FormatUSTAR | FormatPAX): + hdr.Format = format ustar := tr.blk.USTAR() prefix = p.parseString(ustar.Prefix()) - case formatSTAR: + + // For Format detection, check if block is properly formatted since + // the parser is more liberal than what USTAR actually permits. + notASCII := func(r rune) bool { return r >= 0x80 } + if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { + hdr.Format = FormatUnknown // Non-ASCII characters in block. + } + nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } + if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && + nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { + hdr.Format = FormatUnknown // Numeric fields must end in NUL + } + case format.has(formatSTAR): star := tr.blk.STAR() prefix = p.parseString(star.Prefix()) hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) - case formatGNU: + case format.has(FormatGNU): + hdr.Format = format + var p2 parser gnu := tr.blk.GNU() - hdr.AccessTime = tryParseTime(gnu.AccessTime()) - hdr.ChangeTime = tryParseTime(gnu.ChangeTime()) + if b := gnu.AccessTime(); b[0] != 0 { + hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) + } + if b := gnu.ChangeTime(); b[0] != 0 { + hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) + } + + // Prior to Go1.8, the Writer had a bug where it would output + // an invalid tar file in certain rare situations because the logic + // incorrectly believed that the old GNU format had a prefix field. + // This is wrong and leads to an output file that mangles the + // atime and ctime fields, which are often left unused. + // + // In order to continue reading tar files created by former, buggy + // versions of Go, we skeptically parse the atime and ctime fields. + // If we are unable to parse them and the prefix field looks like + // an ASCII string, then we fallback on the pre-Go1.8 behavior + // of treating these fields as the USTAR prefix field. + // + // Note that this will not use the fallback logic for all possible + // files generated by a pre-Go1.8 toolchain. If the generated file + // happened to have a prefix field that parses as valid + // atime and ctime fields (e.g., when they are valid octal strings), + // then it is impossible to distinguish between an valid GNU file + // and an invalid pre-Go1.8 file. + // + // See https://golang.org/issues/12594 + // See https://golang.org/issues/21005 + if p2.err != nil { + hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} + ustar := tr.blk.USTAR() + if s := p.parseString(ustar.Prefix()); isASCII(s) { + prefix = s + } + hdr.Format = FormatUnknown // Buggy file is not GNU + } } if len(prefix) > 0 { hdr.Name = prefix + "/" + hdr.Name @@ -523,21 +459,22 @@ func (tr *Reader) readHeader() (*Header, *block, error) { // The Header.Size does not reflect the size of any extended headers used. // Thus, this function will read from the raw io.Reader to fetch extra headers. // This method mutates blk in the process. -func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, error) { +func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { // Make sure that the input format is GNU. // Unfortunately, the STAR format also has a sparse header format that uses // the same type flag but has a completely different layout. - if blk.GetFormat() != formatGNU { + if blk.GetFormat() != FormatGNU { return nil, ErrHeader } + hdr.Format.mayOnlyBe(FormatGNU) var p parser hdr.Size = p.parseNumeric(blk.GNU().RealSize()) if p.err != nil { return nil, p.err } - var s sparseArray = blk.GNU().Sparse() - var sp = make([]sparseEntry, 0, s.MaxEntries()) + s := blk.GNU().Sparse() + spd := make(sparseDatas, 0, s.MaxEntries()) for { for i := 0; i < s.MaxEntries(); i++ { // This termination condition is identical to GNU and BSD tar. @@ -545,25 +482,22 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, e break // Don't return, need to process extended headers (even if empty) } offset := p.parseNumeric(s.Entry(i).Offset()) - numBytes := p.parseNumeric(s.Entry(i).NumBytes()) + length := p.parseNumeric(s.Entry(i).Length()) if p.err != nil { return nil, p.err } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + spd = append(spd, sparseEntry{Offset: offset, Length: length}) } if s.IsExtended()[0] > 0 { // There are more entries. Read an extension header and parse its entries. - if _, err := io.ReadFull(tr.r, blk[:]); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } + if _, err := mustReadFull(tr.r, blk[:]); err != nil { return nil, err } s = blk.Sparse() continue } - return sp, nil // Done + return spd, nil // Done } } @@ -571,28 +505,27 @@ func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, e // version 1.0. The format of the sparse map consists of a series of // newline-terminated numeric fields. The first field is the number of entries // and is always present. Following this are the entries, consisting of two -// fields (offset, numBytes). This function must stop reading at the end +// fields (offset, length). This function must stop reading at the end // boundary of the block containing the last newline. // // Note that the GNU manual says that numeric values should be encoded in octal // format. However, the GNU tar utility itself outputs these values in decimal. // As such, this library treats values as being encoded in decimal. -func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { - var cntNewline int64 - var buf bytes.Buffer - var blk = make([]byte, blockSize) +func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { + var ( + cntNewline int64 + buf bytes.Buffer + blk block + ) - // feedTokens copies data in numBlock chunks from r into buf until there are + // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. - var feedTokens = func(cnt int64) error { - for cntNewline < cnt { - if _, err := io.ReadFull(r, blk); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } + feedTokens := func(n int64) error { + for cntNewline < n { + if _, err := mustReadFull(r, blk[:]); err != nil { return err } - buf.Write(blk) + buf.Write(blk[:]) for _, c := range blk { if c == '\n' { cntNewline++ @@ -604,10 +537,10 @@ func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { // nextToken gets the next token delimited by a newline. This assumes that // at least one newline exists in the buffer. - var nextToken = func() string { + nextToken := func() string { cntNewline-- tok, _ := buf.ReadString('\n') - return tok[:len(tok)-1] // Cut off newline + return strings.TrimRight(tok, "\n") } // Parse for the number of entries. @@ -626,80 +559,67 @@ func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { if err := feedTokens(2 * numEntries); err != nil { return nil, err } - sp := make([]sparseEntry, 0, numEntries) + spd := make(sparseDatas, 0, numEntries) for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(nextToken(), 10, 64) - if err != nil { + offset, err1 := strconv.ParseInt(nextToken(), 10, 64) + length, err2 := strconv.ParseInt(nextToken(), 10, 64) + if err1 != nil || err2 != nil { return nil, ErrHeader } - numBytes, err := strconv.ParseInt(nextToken(), 10, 64) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + spd = append(spd, sparseEntry{Offset: offset, Length: length}) } - return sp, nil + return spd, nil } // readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format // version 0.1. The sparse map is stored in the PAX headers. -func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { +func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { // Get number of entries. // Use integer overflow resistant math to check this. - numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { return nil, ErrHeader } // There should be two numbers in sparseMap for each entry. - sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") + sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") + if len(sparseMap) == 1 && sparseMap[0] == "" { + sparseMap = sparseMap[:0] + } if int64(len(sparseMap)) != 2*numEntries { return nil, ErrHeader } // Loop through the entries in the sparse map. // numEntries is trusted now. - sp := make([]sparseEntry, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) - if err != nil { + spd := make(sparseDatas, 0, numEntries) + for len(sparseMap) >= 2 { + offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) + length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) + if err1 != nil || err2 != nil { return nil, ErrHeader } - numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + spd = append(spd, sparseEntry{Offset: offset, Length: length}) + sparseMap = sparseMap[2:] } - return sp, nil + return spd, nil } -// numBytes returns the number of bytes left to read in the current file's entry -// in the tar archive, or 0 if there is no current file. -func (tr *Reader) numBytes() int64 { - if tr.curr == nil { - // No current file, so no bytes - return 0 - } - return tr.curr.numBytes() -} - -// Read reads from the current entry in the tar archive. -// It returns 0, io.EOF when it reaches the end of that entry, -// until Next is called to advance to the next entry. +// Read reads from the current file in the tar archive. +// It returns (0, io.EOF) when it reaches the end of that file, +// until Next is called to advance to the next file. // -// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// If the current file is sparse, then the regions marked as a hole +// are read back as NUL-bytes. +// +// Calling Read on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what // the Header.Size claims. func (tr *Reader) Read(b []byte) (int, error) { if tr.err != nil { return 0, tr.err } - if tr.curr == nil { - return 0, io.EOF - } - n, err := tr.curr.Read(b) if err != nil && err != io.EOF { tr.err = err @@ -707,116 +627,229 @@ func (tr *Reader) Read(b []byte) (int, error) { return n, err } -func (rfr *regFileReader) Read(b []byte) (n int, err error) { - if rfr.nb == 0 { - // file consumed - return 0, io.EOF +// writeTo writes the content of the current file to w. +// The bytes written matches the number of remaining bytes in the current file. +// +// If the current file is sparse and w is an io.WriteSeeker, +// then writeTo uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are filled with NULs. +// This always writes the last byte to ensure w is the right size. +// +// TODO(dsnet): Re-export this when adding sparse file support. +// See https://golang.org/issue/22735 +func (tr *Reader) writeTo(w io.Writer) (int64, error) { + if tr.err != nil { + return 0, tr.err } - if int64(len(b)) > rfr.nb { - b = b[0:rfr.nb] - } - n, err = rfr.r.Read(b) - rfr.nb -= int64(n) - - if err == io.EOF && rfr.nb > 0 { - err = io.ErrUnexpectedEOF - } - return -} - -// numBytes returns the number of bytes left to read in the file's data in the tar archive. -func (rfr *regFileReader) numBytes() int64 { - return rfr.nb -} - -// newSparseFileReader creates a new sparseFileReader, but validates all of the -// sparse entries before doing so. -func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { - if total < 0 { - return nil, ErrHeader // Total size cannot be negative - } - - // Validate all sparse entries. These are the same checks as performed by - // the BSD tar utility. - for i, s := range sp { - switch { - case s.offset < 0 || s.numBytes < 0: - return nil, ErrHeader // Negative values are never okay - case s.offset > math.MaxInt64-s.numBytes: - return nil, ErrHeader // Integer overflow with large length - case s.offset+s.numBytes > total: - return nil, ErrHeader // Region extends beyond the "real" size - case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: - return nil, ErrHeader // Regions can't overlap and must be in order - } - } - return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil -} - -// readHole reads a sparse hole ending at endOffset. -func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { - n64 := endOffset - sfr.pos - if n64 > int64(len(b)) { - n64 = int64(len(b)) - } - n := int(n64) - for i := 0; i < n; i++ { - b[i] = 0 - } - sfr.pos += n64 - return n -} - -// Read reads the sparse file data in expanded form. -func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { - // Skip past all empty fragments. - for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { - sfr.sp = sfr.sp[1:] - } - - // If there are no more fragments, then it is possible that there - // is one last sparse hole. - if len(sfr.sp) == 0 { - // This behavior matches the BSD tar utility. - // However, GNU tar stops returning data even if sfr.total is unmet. - if sfr.pos < sfr.total { - return sfr.readHole(b, sfr.total), nil - } - return 0, io.EOF - } - - // In front of a data fragment, so read a hole. - if sfr.pos < sfr.sp[0].offset { - return sfr.readHole(b, sfr.sp[0].offset), nil - } - - // In a data fragment, so read from it. - // This math is overflow free since we verify that offset and numBytes can - // be safely added when creating the sparseFileReader. - endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment - bytesLeft := endPos - sfr.pos // Bytes left in fragment - if int64(len(b)) > bytesLeft { - b = b[:bytesLeft] - } - - n, err = sfr.rfr.Read(b) - sfr.pos += int64(n) - if err == io.EOF { - if sfr.pos < endPos { - err = io.ErrUnexpectedEOF // There was supposed to be more data - } else if sfr.pos < sfr.total { - err = nil // There is still an implicit sparse hole at the end - } - } - - if sfr.pos == endPos { - sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it + n, err := tr.curr.WriteTo(w) + if err != nil { + tr.err = err } return n, err } -// numBytes returns the number of bytes left to read in the sparse file's -// sparse-encoded data in the tar archive. -func (sfr *sparseFileReader) numBytes() int64 { - return sfr.rfr.numBytes() +// regFileReader is a fileReader for reading data from a regular file entry. +type regFileReader struct { + r io.Reader // Underlying Reader + nb int64 // Number of remaining bytes to read +} + +func (fr *regFileReader) Read(b []byte) (n int, err error) { + if int64(len(b)) > fr.nb { + b = b[:fr.nb] + } + if len(b) > 0 { + n, err = fr.r.Read(b) + fr.nb -= int64(n) + } + switch { + case err == io.EOF && fr.nb > 0: + return n, io.ErrUnexpectedEOF + case err == nil && fr.nb == 0: + return n, io.EOF + default: + return n, err + } +} + +func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { + return io.Copy(w, struct{ io.Reader }{fr}) +} + +func (fr regFileReader) LogicalRemaining() int64 { + return fr.nb +} + +func (fr regFileReader) PhysicalRemaining() int64 { + return fr.nb +} + +// sparseFileReader is a fileReader for reading data from a sparse file entry. +type sparseFileReader struct { + fr fileReader // Underlying fileReader + sp sparseHoles // Normalized list of sparse holes + pos int64 // Current position in sparse file +} + +func (sr *sparseFileReader) Read(b []byte) (n int, err error) { + finished := int64(len(b)) >= sr.LogicalRemaining() + if finished { + b = b[:sr.LogicalRemaining()] + } + + b0 := b + endPos := sr.pos + int64(len(b)) + for endPos > sr.pos && err == nil { + var nf int // Bytes read in fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + bf := b[:min(int64(len(b)), holeStart-sr.pos)] + nf, err = tryReadFull(sr.fr, bf) + } else { // In a hole fragment + bf := b[:min(int64(len(b)), holeEnd-sr.pos)] + nf, err = tryReadFull(zeroReader{}, bf) + } + b = b[nf:] + sr.pos += int64(nf) + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + n = len(b0) - len(b) + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + case finished: + return n, io.EOF + default: + return n, nil + } +} + +func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { + ws, ok := w.(io.WriteSeeker) + if ok { + if _, err := ws.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(w, struct{ io.Reader }{sr}) + } + + var writeLastByte bool + pos0 := sr.pos + for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { + var nf int64 // Size of fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + nf = holeStart - sr.pos + nf, err = io.CopyN(ws, sr.fr, nf) + } else { // In a hole fragment + nf = holeEnd - sr.pos + if sr.PhysicalRemaining() == 0 { + writeLastByte = true + nf-- + } + _, err = ws.Seek(nf, io.SeekCurrent) + } + sr.pos += nf + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // write a single byte to ensure the file is the right size. + if writeLastByte && err == nil { + _, err = ws.Write([]byte{0}) + sr.pos++ + } + + n = sr.pos - pos0 + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + default: + return n, nil + } +} + +func (sr sparseFileReader) LogicalRemaining() int64 { + return sr.sp[len(sr.sp)-1].endOffset() - sr.pos +} +func (sr sparseFileReader) PhysicalRemaining() int64 { + return sr.fr.PhysicalRemaining() +} + +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +// mustReadFull is like io.ReadFull except it returns +// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read. +func mustReadFull(r io.Reader, b []byte) (int, error) { + n, err := tryReadFull(r, b) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err +} + +// tryReadFull is like io.ReadFull except it returns +// io.EOF when it is hit before len(b) bytes are read. +func tryReadFull(r io.Reader, b []byte) (n int, err error) { + for len(b) > n && err == nil { + var nn int + nn, err = r.Read(b[n:]) + n += nn + } + if len(b) == n && err == io.EOF { + err = nil + } + return n, err +} + +// discard skips n bytes in r, reporting an error if unable to do so. +func discard(r io.Reader, n int64) error { + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := r.(io.Seeker); ok && n > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, io.SeekCurrent) + if pos1 >= 0 && err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(n-1, io.SeekCurrent) + if pos2 < 0 || err != nil { + return err + } + seekSkipped = pos2 - pos1 + } + } + + copySkipped, err := io.CopyN(ioutil.Discard, r, n-seekSkipped) + if err == io.EOF && seekSkipped+copySkipped < n { + err = io.ErrUnexpectedEOF + } + return err } diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go index 338686836b6..a6832d33b1b 100644 --- a/src/archive/tar/reader_test.go +++ b/src/archive/tar/reader_test.go @@ -7,12 +7,15 @@ package tar import ( "bytes" "crypto/md5" + "errors" "fmt" "io" "io/ioutil" "math" "os" + "path" "reflect" + "strconv" "strings" "testing" "time" @@ -36,6 +39,7 @@ func TestReader(t *testing.T) { Typeflag: '0', Uname: "dsymonds", Gname: "eng", + Format: FormatGNU, }, { Name: "small2.txt", Mode: 0640, @@ -46,6 +50,7 @@ func TestReader(t *testing.T) { Typeflag: '0', Uname: "dsymonds", Gname: "eng", + Format: FormatGNU, }}, chksums: []string{ "e38b27eaccb4391bdec553a7f3ae6b2f", @@ -66,6 +71,7 @@ func TestReader(t *testing.T) { Gname: "david", Devmajor: 0, Devminor: 0, + Format: FormatGNU, }, { Name: "sparse-posix-0.0", Mode: 420, @@ -79,6 +85,12 @@ func TestReader(t *testing.T) { Gname: "david", Devmajor: 0, Devminor: 0, + PAXRecords: map[string]string{ + "GNU.sparse.size": "200", + "GNU.sparse.numblocks": "95", + "GNU.sparse.map": "1,1,3,1,5,1,7,1,9,1,11,1,13,1,15,1,17,1,19,1,21,1,23,1,25,1,27,1,29,1,31,1,33,1,35,1,37,1,39,1,41,1,43,1,45,1,47,1,49,1,51,1,53,1,55,1,57,1,59,1,61,1,63,1,65,1,67,1,69,1,71,1,73,1,75,1,77,1,79,1,81,1,83,1,85,1,87,1,89,1,91,1,93,1,95,1,97,1,99,1,101,1,103,1,105,1,107,1,109,1,111,1,113,1,115,1,117,1,119,1,121,1,123,1,125,1,127,1,129,1,131,1,133,1,135,1,137,1,139,1,141,1,143,1,145,1,147,1,149,1,151,1,153,1,155,1,157,1,159,1,161,1,163,1,165,1,167,1,169,1,171,1,173,1,175,1,177,1,179,1,181,1,183,1,185,1,187,1,189,1", + }, + Format: FormatPAX, }, { Name: "sparse-posix-0.1", Mode: 420, @@ -92,6 +104,13 @@ func TestReader(t *testing.T) { Gname: "david", Devmajor: 0, Devminor: 0, + PAXRecords: map[string]string{ + "GNU.sparse.size": "200", + "GNU.sparse.numblocks": "95", + "GNU.sparse.map": "1,1,3,1,5,1,7,1,9,1,11,1,13,1,15,1,17,1,19,1,21,1,23,1,25,1,27,1,29,1,31,1,33,1,35,1,37,1,39,1,41,1,43,1,45,1,47,1,49,1,51,1,53,1,55,1,57,1,59,1,61,1,63,1,65,1,67,1,69,1,71,1,73,1,75,1,77,1,79,1,81,1,83,1,85,1,87,1,89,1,91,1,93,1,95,1,97,1,99,1,101,1,103,1,105,1,107,1,109,1,111,1,113,1,115,1,117,1,119,1,121,1,123,1,125,1,127,1,129,1,131,1,133,1,135,1,137,1,139,1,141,1,143,1,145,1,147,1,149,1,151,1,153,1,155,1,157,1,159,1,161,1,163,1,165,1,167,1,169,1,171,1,173,1,175,1,177,1,179,1,181,1,183,1,185,1,187,1,189,1", + "GNU.sparse.name": "sparse-posix-0.1", + }, + Format: FormatPAX, }, { Name: "sparse-posix-1.0", Mode: 420, @@ -105,6 +124,13 @@ func TestReader(t *testing.T) { Gname: "david", Devmajor: 0, Devminor: 0, + PAXRecords: map[string]string{ + "GNU.sparse.major": "1", + "GNU.sparse.minor": "0", + "GNU.sparse.realsize": "200", + "GNU.sparse.name": "sparse-posix-1.0", + }, + Format: FormatPAX, }, { Name: "end", Mode: 420, @@ -118,6 +144,7 @@ func TestReader(t *testing.T) { Gname: "david", Devmajor: 0, Devminor: 0, + Format: FormatGNU, }}, chksums: []string{ "6f53234398c2449fe67c1812d993012f", @@ -186,6 +213,13 @@ func TestReader(t *testing.T) { ChangeTime: time.Unix(1350244992, 23960108), AccessTime: time.Unix(1350244992, 23960108), Typeflag: TypeReg, + PAXRecords: map[string]string{ + "path": "a/123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + "mtime": "1350244992.023960108", + "atime": "1350244992.023960108", + "ctime": "1350244992.023960108", + }, + Format: FormatPAX, }, { Name: "a/b", Mode: 0777, @@ -199,6 +233,13 @@ func TestReader(t *testing.T) { AccessTime: time.Unix(1350266320, 910238425), Typeflag: TypeSymlink, Linkname: "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + PAXRecords: map[string]string{ + "linkpath": "123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100", + "mtime": "1350266320.910238425", + "atime": "1350266320.910238425", + "ctime": "1350266320.910238425", + }, + Format: FormatPAX, }}, }, { file: "testdata/pax-bad-hdr-file.tar", @@ -218,10 +259,63 @@ func TestReader(t *testing.T) { Typeflag: '0', Uname: "joetsai", Gname: "eng", + PAXRecords: map[string]string{ + "size": "000000000000000000000999", + }, + Format: FormatPAX, }}, chksums: []string{ "0afb597b283fe61b5d4879669a350556", }, + }, { + file: "testdata/pax-records.tar", + headers: []*Header{{ + Typeflag: TypeReg, + Name: "file", + Uname: strings.Repeat("long", 10), + ModTime: time.Unix(0, 0), + PAXRecords: map[string]string{ + "GOLANG.pkg": "tar", + "comment": "Hello, 世界", + "uname": strings.Repeat("long", 10), + }, + Format: FormatPAX, + }}, + }, { + file: "testdata/pax-global-records.tar", + headers: []*Header{{ + Typeflag: TypeXGlobalHeader, + Name: "global1", + PAXRecords: map[string]string{"path": "global1", "mtime": "1500000000.0"}, + Format: FormatPAX, + }, { + Typeflag: TypeReg, + Name: "file1", + ModTime: time.Unix(0, 0), + Format: FormatUSTAR, + }, { + Typeflag: TypeReg, + Name: "file2", + PAXRecords: map[string]string{"path": "file2"}, + ModTime: time.Unix(0, 0), + Format: FormatPAX, + }, { + Typeflag: TypeXGlobalHeader, + Name: "GlobalHead.0.0", + PAXRecords: map[string]string{"path": ""}, + Format: FormatPAX, + }, { + Typeflag: TypeReg, + Name: "file3", + ModTime: time.Unix(0, 0), + Format: FormatUSTAR, + }, { + Typeflag: TypeReg, + Name: "file4", + ModTime: time.Unix(1400000000, 0), + PAXRecords: map[string]string{"mtime": "1400000000"}, + Format: FormatPAX, + }}, }, { file: "testdata/nil-uid.tar", // golang.org/issue/5290 headers: []*Header{{ @@ -237,6 +331,7 @@ func TestReader(t *testing.T) { Gname: "eyefi", Devmajor: 0, Devminor: 0, + Format: FormatGNU, }}, }, { file: "testdata/xattrs.tar", @@ -258,6 +353,15 @@ func TestReader(t *testing.T) { // Interestingly, selinux encodes the terminating null inside the xattr "security.selinux": "unconfined_u:object_r:default_t:s0\x00", }, + PAXRecords: map[string]string{ + "mtime": "1386065770.44825232", + "atime": "1389782991.41987522", + "ctime": "1389782956.794414986", + "SCHILY.xattr.user.key": "value", + "SCHILY.xattr.user.key2": "value2", + "SCHILY.xattr.security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + Format: FormatPAX, }, { Name: "small2.txt", Mode: 0644, @@ -273,6 +377,13 @@ func TestReader(t *testing.T) { Xattrs: map[string]string{ "security.selinux": "unconfined_u:object_r:default_t:s0\x00", }, + PAXRecords: map[string]string{ + "mtime": "1386065770.449252304", + "atime": "1389782991.41987522", + "ctime": "1386065770.449252304", + "SCHILY.xattr.security.selinux": "unconfined_u:object_r:default_t:s0\x00", + }, + Format: FormatPAX, }}, }, { // Matches the behavior of GNU, BSD, and STAR tar utilities. @@ -282,6 +393,7 @@ func TestReader(t *testing.T) { Linkname: "GNU4/GNU4/long-linkpath-name", ModTime: time.Unix(0, 0), Typeflag: '2', + Format: FormatGNU, }}, }, { // GNU tar file with atime and ctime fields set. @@ -300,6 +412,7 @@ func TestReader(t *testing.T) { Gname: "dsnet", AccessTime: time.Unix(1441974501, 0), ChangeTime: time.Unix(1441973436, 0), + Format: FormatGNU, }, { Name: "test2/foo", Mode: 33188, @@ -312,6 +425,7 @@ func TestReader(t *testing.T) { Gname: "dsnet", AccessTime: time.Unix(1441974501, 0), ChangeTime: time.Unix(1441973436, 0), + Format: FormatGNU, }, { Name: "test2/sparse", Mode: 33188, @@ -324,6 +438,7 @@ func TestReader(t *testing.T) { Gname: "dsnet", AccessTime: time.Unix(1441991948, 0), ChangeTime: time.Unix(1441973436, 0), + Format: FormatGNU, }}, }, { // Matches the behavior of GNU and BSD tar utilities. @@ -333,7 +448,75 @@ func TestReader(t *testing.T) { Linkname: "PAX4/PAX4/long-linkpath-name", ModTime: time.Unix(0, 0), Typeflag: '2', + PAXRecords: map[string]string{ + "linkpath": "PAX4/PAX4/long-linkpath-name", + }, + Format: FormatPAX, }}, + }, { + // Both BSD and GNU tar truncate long names at first NUL even + // if there is data following that NUL character. + // This is reasonable as GNU long names are C-strings. + file: "testdata/gnu-long-nul.tar", + headers: []*Header{{ + Name: "0123456789", + Mode: 0644, + Uid: 1000, + Gid: 1000, + ModTime: time.Unix(1486082191, 0), + Typeflag: '0', + Uname: "rawr", + Gname: "dsnet", + Format: FormatGNU, + }}, + }, { + // This archive was generated by Writer but is readable by both + // GNU and BSD tar utilities. + // The archive generated by GNU is nearly byte-for-byte identical + // to the Go version except the Go version sets a negative Devminor + // just to force the GNU format. + file: "testdata/gnu-utf8.tar", + headers: []*Header{{ + Name: "☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹", + Mode: 0644, + Uid: 1000, Gid: 1000, + ModTime: time.Unix(0, 0), + Typeflag: '0', + Uname: "☺", + Gname: "⚹", + Format: FormatGNU, + }}, + }, { + // This archive was generated by Writer but is readable by both + // GNU and BSD tar utilities. + // The archive generated by GNU is nearly byte-for-byte identical + // to the Go version except the Go version sets a negative Devminor + // just to force the GNU format. + file: "testdata/gnu-not-utf8.tar", + headers: []*Header{{ + Name: "hi\x80\x81\x82\x83bye", + Mode: 0644, + Uid: 1000, + Gid: 1000, + ModTime: time.Unix(0, 0), + Typeflag: '0', + Uname: "rawr", + Gname: "dsnet", + Format: FormatGNU, + }}, + }, { + // BSD tar v3.1.2 and GNU tar v1.27.1 both rejects PAX records + // with NULs in the key. + file: "testdata/pax-nul-xattrs.tar", + err: ErrHeader, + }, { + // BSD tar v3.1.2 rejects a PAX path with NUL in the value, while + // GNU tar v1.27.1 simply truncates at first NUL. + // We emulate the behavior of BSD since it is strange doing NUL + // truncations since PAX records are length-prefix strings instead + // of NUL-terminated C-strings. + file: "testdata/pax-nul-path.tar", + err: ErrHeader, }, { file: "testdata/neg-size.tar", err: ErrHeader, @@ -346,483 +529,214 @@ func TestReader(t *testing.T) { }, { file: "testdata/issue12435.tar", err: ErrHeader, + }, { + // Ensure that we can read back the original Header as written with + // a buggy pre-Go1.8 tar.Writer. + file: "testdata/invalid-go17.tar", + headers: []*Header{{ + Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/foo", + Uid: 010000000, + ModTime: time.Unix(0, 0), + }}, + }, { + // USTAR archive with a regular entry with non-zero device numbers. + file: "testdata/ustar-file-devs.tar", + headers: []*Header{{ + Name: "file", + Mode: 0644, + Typeflag: '0', + ModTime: time.Unix(0, 0), + Devmajor: 1, + Devminor: 1, + Format: FormatUSTAR, + }}, + }, { + // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1. + file: "testdata/gnu-nil-sparse-data.tar", + headers: []*Header{{ + Name: "sparse.db", + Typeflag: TypeGNUSparse, + Size: 1000, + ModTime: time.Unix(0, 0), + Format: FormatGNU, + }}, + }, { + // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1. + file: "testdata/gnu-nil-sparse-hole.tar", + headers: []*Header{{ + Name: "sparse.db", + Typeflag: TypeGNUSparse, + Size: 1000, + ModTime: time.Unix(0, 0), + Format: FormatGNU, + }}, + }, { + // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1. + file: "testdata/pax-nil-sparse-data.tar", + headers: []*Header{{ + Name: "sparse.db", + Typeflag: TypeReg, + Size: 1000, + ModTime: time.Unix(0, 0), + PAXRecords: map[string]string{ + "size": "1512", + "GNU.sparse.major": "1", + "GNU.sparse.minor": "0", + "GNU.sparse.realsize": "1000", + "GNU.sparse.name": "sparse.db", + }, + Format: FormatPAX, + }}, + }, { + // Generated by Go, works on BSD tar v3.1.2 and GNU tar v.1.27.1. + file: "testdata/pax-nil-sparse-hole.tar", + headers: []*Header{{ + Name: "sparse.db", + Typeflag: TypeReg, + Size: 1000, + ModTime: time.Unix(0, 0), + PAXRecords: map[string]string{ + "size": "512", + "GNU.sparse.major": "1", + "GNU.sparse.minor": "0", + "GNU.sparse.realsize": "1000", + "GNU.sparse.name": "sparse.db", + }, + Format: FormatPAX, + }}, + }, { + file: "testdata/trailing-slash.tar", + headers: []*Header{{ + Typeflag: TypeDir, + Name: strings.Repeat("123456789/", 30), + ModTime: time.Unix(0, 0), + PAXRecords: map[string]string{ + "path": strings.Repeat("123456789/", 30), + }, + Format: FormatPAX, + }}, }} - for i, v := range vectors { - f, err := os.Open(v.file) - if err != nil { - t.Errorf("file %s, test %d: unexpected error: %v", v.file, i, err) - continue - } - defer f.Close() - - // Capture all headers and checksums. - var ( - tr = NewReader(f) - hdrs []*Header - chksums []string - rdbuf = make([]byte, 8) - ) - for { - var hdr *Header - hdr, err = tr.Next() + for _, v := range vectors { + t.Run(path.Base(v.file), func(t *testing.T) { + f, err := os.Open(v.file) if err != nil { - if err == io.EOF { - err = nil // Expected error + t.Fatalf("unexpected error: %v", err) + } + defer f.Close() + + // Capture all headers and checksums. + var ( + tr = NewReader(f) + hdrs []*Header + chksums []string + rdbuf = make([]byte, 8) + ) + for { + var hdr *Header + hdr, err = tr.Next() + if err != nil { + if err == io.EOF { + err = nil // Expected error + } + break } - break - } - hdrs = append(hdrs, hdr) + hdrs = append(hdrs, hdr) - if v.chksums == nil { - continue + if v.chksums == nil { + continue + } + h := md5.New() + _, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read + if err != nil { + break + } + chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil))) } - h := md5.New() - _, err = io.CopyBuffer(h, tr, rdbuf) // Effectively an incremental read - if err != nil { - break - } - chksums = append(chksums, fmt.Sprintf("%x", h.Sum(nil))) - } - for j, hdr := range hdrs { - if j >= len(v.headers) { - t.Errorf("file %s, test %d, entry %d: unexpected header:\ngot %+v", - v.file, i, j, *hdr) - continue + for i, hdr := range hdrs { + if i >= len(v.headers) { + t.Fatalf("entry %d: unexpected header:\ngot %+v", i, *hdr) + continue + } + if !reflect.DeepEqual(*hdr, *v.headers[i]) { + t.Fatalf("entry %d: incorrect header:\ngot %+v\nwant %+v", i, *hdr, *v.headers[i]) + } } - if !reflect.DeepEqual(*hdr, *v.headers[j]) { - t.Errorf("file %s, test %d, entry %d: incorrect header:\ngot %+v\nwant %+v", - v.file, i, j, *hdr, *v.headers[j]) + if len(hdrs) != len(v.headers) { + t.Fatalf("got %d headers, want %d headers", len(hdrs), len(v.headers)) } - } - if len(hdrs) != len(v.headers) { - t.Errorf("file %s, test %d: got %d headers, want %d headers", - v.file, i, len(hdrs), len(v.headers)) - } - for j, sum := range chksums { - if j >= len(v.chksums) { - t.Errorf("file %s, test %d, entry %d: unexpected sum: got %s", - v.file, i, j, sum) - continue + for i, sum := range chksums { + if i >= len(v.chksums) { + t.Fatalf("entry %d: unexpected sum: got %s", i, sum) + continue + } + if sum != v.chksums[i] { + t.Fatalf("entry %d: incorrect checksum: got %s, want %s", i, sum, v.chksums[i]) + } } - if sum != v.chksums[j] { - t.Errorf("file %s, test %d, entry %d: incorrect checksum: got %s, want %s", - v.file, i, j, sum, v.chksums[j]) - } - } - if err != v.err { - t.Errorf("file %s, test %d: unexpected error: got %v, want %v", - v.file, i, err, v.err) - } - f.Close() + if err != v.err { + t.Fatalf("unexpected error: got %v, want %v", err, v.err) + } + f.Close() + }) } } func TestPartialRead(t *testing.T) { - f, err := os.Open("testdata/gnu.tar") - if err != nil { - t.Fatalf("Unexpected error: %v", err) + type testCase struct { + cnt int // Number of bytes to read + output string // Expected value of string read } - defer f.Close() - - tr := NewReader(f) - - // Read the first four bytes; Next() should skip the last byte. - hdr, err := tr.Next() - if err != nil || hdr == nil { - t.Fatalf("Didn't get first file: %v", err) - } - buf := make([]byte, 4) - if _, err := io.ReadFull(tr, buf); err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if expected := []byte("Kilt"); !bytes.Equal(buf, expected) { - t.Errorf("Contents = %v, want %v", buf, expected) - } - - // Second file - hdr, err = tr.Next() - if err != nil || hdr == nil { - t.Fatalf("Didn't get second file: %v", err) - } - buf = make([]byte, 6) - if _, err := io.ReadFull(tr, buf); err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if expected := []byte("Google"); !bytes.Equal(buf, expected) { - t.Errorf("Contents = %v, want %v", buf, expected) - } -} - -func TestSparseFileReader(t *testing.T) { vectors := []struct { - realSize int64 // Real size of the output file - sparseMap []sparseEntry // Input sparse map - sparseData string // Input compact data - expected string // Expected output data - err error // Expected error outcome + file string + cases []testCase }{{ - realSize: 8, - sparseMap: []sparseEntry{ - {offset: 0, numBytes: 2}, - {offset: 5, numBytes: 3}, + file: "testdata/gnu.tar", + cases: []testCase{ + {4, "Kilt"}, + {6, "Google"}, }, - sparseData: "abcde", - expected: "ab\x00\x00\x00cde", }, { - realSize: 10, - sparseMap: []sparseEntry{ - {offset: 0, numBytes: 2}, - {offset: 5, numBytes: 3}, + file: "testdata/sparse-formats.tar", + cases: []testCase{ + {2, "\x00G"}, + {4, "\x00G\x00o"}, + {6, "\x00G\x00o\x00G"}, + {8, "\x00G\x00o\x00G\x00o"}, + {4, "end\n"}, }, - sparseData: "abcde", - expected: "ab\x00\x00\x00cde\x00\x00", - }, { - realSize: 8, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 2}, - }, - sparseData: "abcde", - expected: "\x00abc\x00\x00de", - }, { - realSize: 8, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 0}, - {offset: 6, numBytes: 0}, - {offset: 6, numBytes: 2}, - }, - sparseData: "abcde", - expected: "\x00abc\x00\x00de", - }, { - realSize: 10, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 2}, - }, - sparseData: "abcde", - expected: "\x00abc\x00\x00de\x00\x00", - }, { - realSize: 10, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 2}, - {offset: 8, numBytes: 0}, - {offset: 8, numBytes: 0}, - {offset: 8, numBytes: 0}, - {offset: 8, numBytes: 0}, - }, - sparseData: "abcde", - expected: "\x00abc\x00\x00de\x00\x00", - }, { - realSize: 2, - sparseMap: []sparseEntry{}, - sparseData: "", - expected: "\x00\x00", - }, { - realSize: -2, - sparseMap: []sparseEntry{}, - err: ErrHeader, - }, { - realSize: -10, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 2}, - }, - sparseData: "abcde", - err: ErrHeader, - }, { - realSize: 10, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 5}, - }, - sparseData: "abcde", - err: ErrHeader, - }, { - realSize: 35, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: 5}, - }, - sparseData: "abcde", - err: io.ErrUnexpectedEOF, - }, { - realSize: 35, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 6, numBytes: -5}, - }, - sparseData: "abcde", - err: ErrHeader, - }, { - realSize: 35, - sparseMap: []sparseEntry{ - {offset: math.MaxInt64, numBytes: 3}, - {offset: 6, numBytes: -5}, - }, - sparseData: "abcde", - err: ErrHeader, - }, { - realSize: 10, - sparseMap: []sparseEntry{ - {offset: 1, numBytes: 3}, - {offset: 2, numBytes: 2}, - }, - sparseData: "abcde", - err: ErrHeader, }} - for i, v := range vectors { - r := bytes.NewReader([]byte(v.sparseData)) - rfr := ®FileReader{r: r, nb: int64(len(v.sparseData))} + for _, v := range vectors { + t.Run(path.Base(v.file), func(t *testing.T) { + f, err := os.Open(v.file) + if err != nil { + t.Fatalf("Open() error: %v", err) + } + defer f.Close() - var ( - sfr *sparseFileReader - err error - buf []byte - ) + tr := NewReader(f) + for i, tc := range v.cases { + hdr, err := tr.Next() + if err != nil || hdr == nil { + t.Fatalf("entry %d, Next(): got %v, want %v", i, err, nil) + } + buf := make([]byte, tc.cnt) + if _, err := io.ReadFull(tr, buf); err != nil { + t.Fatalf("entry %d, ReadFull(): got %v, want %v", i, err, nil) + } + if string(buf) != tc.output { + t.Fatalf("entry %d, ReadFull(): got %q, want %q", i, string(buf), tc.output) + } + } - sfr, err = newSparseFileReader(rfr, v.sparseMap, v.realSize) - if err != nil { - goto fail - } - if sfr.numBytes() != int64(len(v.sparseData)) { - t.Errorf("test %d, numBytes() before reading: got %d, want %d", i, sfr.numBytes(), len(v.sparseData)) - } - buf, err = ioutil.ReadAll(sfr) - if err != nil { - goto fail - } - if string(buf) != v.expected { - t.Errorf("test %d, ReadAll(): got %q, want %q", i, string(buf), v.expected) - } - if sfr.numBytes() != 0 { - t.Errorf("test %d, numBytes() after reading: got %d, want %d", i, sfr.numBytes(), 0) - } - - fail: - if err != v.err { - t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) - } - } -} - -func TestReadOldGNUSparseMap(t *testing.T) { - const ( - t00 = "00000000000\x0000000000000\x00" - t11 = "00000000001\x0000000000001\x00" - t12 = "00000000001\x0000000000002\x00" - t21 = "00000000002\x0000000000001\x00" - ) - - mkBlk := func(size, sp0, sp1, sp2, sp3, ext string, format int) *block { - var blk block - copy(blk.GNU().RealSize(), size) - copy(blk.GNU().Sparse().Entry(0), sp0) - copy(blk.GNU().Sparse().Entry(1), sp1) - copy(blk.GNU().Sparse().Entry(2), sp2) - copy(blk.GNU().Sparse().Entry(3), sp3) - copy(blk.GNU().Sparse().IsExtended(), ext) - if format != formatUnknown { - blk.SetFormat(format) - } - return &blk - } - - vectors := []struct { - data string // Input data - rawHdr *block // Input raw header - want []sparseEntry // Expected sparse entries to be outputted - err error // Expected error to be returned - }{ - {"", mkBlk("", "", "", "", "", "", formatUnknown), nil, ErrHeader}, - {"", mkBlk("1234", "fewa", "", "", "", "", formatGNU), nil, ErrHeader}, - {"", mkBlk("0031", "", "", "", "", "", formatGNU), nil, nil}, - {"", mkBlk("1234", t00, t11, "", "", "", formatGNU), - []sparseEntry{{0, 0}, {1, 1}}, nil}, - {"", mkBlk("1234", t11, t12, t21, t11, "", formatGNU), - []sparseEntry{{1, 1}, {1, 2}, {2, 1}, {1, 1}}, nil}, - {"", mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU), - []sparseEntry{}, io.ErrUnexpectedEOF}, - {t11 + t11, - mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU), - []sparseEntry{}, io.ErrUnexpectedEOF}, - {t11 + t21 + strings.Repeat("\x00", 512), - mkBlk("1234", t11, t12, t21, t11, "\x80", formatGNU), - []sparseEntry{{1, 1}, {1, 2}, {2, 1}, {1, 1}, {1, 1}, {2, 1}}, nil}, - } - - for i, v := range vectors { - tr := Reader{r: strings.NewReader(v.data)} - hdr := new(Header) - got, err := tr.readOldGNUSparseMap(hdr, v.rawHdr) - if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) { - t.Errorf("test %d, readOldGNUSparseMap(...): got %v, want %v", i, got, v.want) - } - if err != v.err { - t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) - } - } -} - -func TestReadGNUSparseMap0x1(t *testing.T) { - const ( - maxUint = ^uint(0) - maxInt = int(maxUint >> 1) - ) - var ( - big1 = fmt.Sprintf("%d", int64(maxInt)) - big2 = fmt.Sprintf("%d", (int64(maxInt)/2)+1) - big3 = fmt.Sprintf("%d", (int64(maxInt) / 3)) - ) - - vectors := []struct { - extHdrs map[string]string // Input data - sparseMap []sparseEntry // Expected sparse entries to be outputted - err error // Expected errors that may be raised - }{{ - extHdrs: map[string]string{paxGNUSparseNumBlocks: "-4"}, - err: ErrHeader, - }, { - extHdrs: map[string]string{paxGNUSparseNumBlocks: "fee "}, - err: ErrHeader, - }, { - extHdrs: map[string]string{ - paxGNUSparseNumBlocks: big1, - paxGNUSparseMap: "0,5,10,5,20,5,30,5", - }, - err: ErrHeader, - }, { - extHdrs: map[string]string{ - paxGNUSparseNumBlocks: big2, - paxGNUSparseMap: "0,5,10,5,20,5,30,5", - }, - err: ErrHeader, - }, { - extHdrs: map[string]string{ - paxGNUSparseNumBlocks: big3, - paxGNUSparseMap: "0,5,10,5,20,5,30,5", - }, - err: ErrHeader, - }, { - extHdrs: map[string]string{ - paxGNUSparseNumBlocks: "4", - paxGNUSparseMap: "0.5,5,10,5,20,5,30,5", - }, - err: ErrHeader, - }, { - extHdrs: map[string]string{ - paxGNUSparseNumBlocks: "4", - paxGNUSparseMap: "0,5.5,10,5,20,5,30,5", - }, - err: ErrHeader, - }, { - extHdrs: map[string]string{ - paxGNUSparseNumBlocks: "4", - paxGNUSparseMap: "0,fewafewa.5,fewafw,5,20,5,30,5", - }, - err: ErrHeader, - }, { - extHdrs: map[string]string{ - paxGNUSparseNumBlocks: "4", - paxGNUSparseMap: "0,5,10,5,20,5,30,5", - }, - sparseMap: []sparseEntry{{0, 5}, {10, 5}, {20, 5}, {30, 5}}, - }} - - for i, v := range vectors { - sp, err := readGNUSparseMap0x1(v.extHdrs) - if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) { - t.Errorf("test %d, readGNUSparseMap0x1(...): got %v, want %v", i, sp, v.sparseMap) - } - if err != v.err { - t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) - } - } -} - -func TestReadGNUSparseMap1x0(t *testing.T) { - sp := []sparseEntry{{1, 2}, {3, 4}} - for i := 0; i < 98; i++ { - sp = append(sp, sparseEntry{54321, 12345}) - } - - vectors := []struct { - input string // Input data - sparseMap []sparseEntry // Expected sparse entries to be outputted - cnt int // Expected number of bytes read - err error // Expected errors that may be raised - }{{ - input: "", - cnt: 0, - err: io.ErrUnexpectedEOF, - }, { - input: "ab", - cnt: 2, - err: io.ErrUnexpectedEOF, - }, { - input: strings.Repeat("\x00", 512), - cnt: 512, - err: io.ErrUnexpectedEOF, - }, { - input: strings.Repeat("\x00", 511) + "\n", - cnt: 512, - err: ErrHeader, - }, { - input: strings.Repeat("\n", 512), - cnt: 512, - err: ErrHeader, - }, { - input: "0\n" + strings.Repeat("\x00", 510) + strings.Repeat("a", 512), - sparseMap: []sparseEntry{}, - cnt: 512, - }, { - input: strings.Repeat("0", 512) + "0\n" + strings.Repeat("\x00", 510), - sparseMap: []sparseEntry{}, - cnt: 1024, - }, { - input: strings.Repeat("0", 1024) + "1\n2\n3\n" + strings.Repeat("\x00", 506), - sparseMap: []sparseEntry{{2, 3}}, - cnt: 1536, - }, { - input: strings.Repeat("0", 1024) + "1\n2\n\n" + strings.Repeat("\x00", 509), - cnt: 1536, - err: ErrHeader, - }, { - input: strings.Repeat("0", 1024) + "1\n2\n" + strings.Repeat("\x00", 508), - cnt: 1536, - err: io.ErrUnexpectedEOF, - }, { - input: "-1\n2\n\n" + strings.Repeat("\x00", 506), - cnt: 512, - err: ErrHeader, - }, { - input: "1\nk\n2\n" + strings.Repeat("\x00", 506), - cnt: 512, - err: ErrHeader, - }, { - input: "100\n1\n2\n3\n4\n" + strings.Repeat("54321\n0000000000000012345\n", 98) + strings.Repeat("\x00", 512), - cnt: 2560, - sparseMap: sp, - }} - - for i, v := range vectors { - r := strings.NewReader(v.input) - sp, err := readGNUSparseMap1x0(r) - if !reflect.DeepEqual(sp, v.sparseMap) && !(len(sp) == 0 && len(v.sparseMap) == 0) { - t.Errorf("test %d, readGNUSparseMap1x0(...): got %v, want %v", i, sp, v.sparseMap) - } - if numBytes := len(v.input) - r.Len(); numBytes != v.cnt { - t.Errorf("test %d, bytes read: got %v, want %v", i, numBytes, v.cnt) - } - if err != v.err { - t.Errorf("test %d, unexpected error: got %v, want %v", i, err, v.err) - } + if _, err := tr.Next(); err != io.EOF { + t.Fatalf("Next(): got %v, want EOF", err) + } + }) } } @@ -950,17 +864,17 @@ func TestReadTruncation(t *testing.T) { } cnt++ if s2 == "manual" { - if _, err = io.Copy(ioutil.Discard, tr); err != nil { + if _, err = tr.writeTo(ioutil.Discard); err != nil { break } } } if err != v.err { - t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %v, want %v", + t.Errorf("test %d, NewReader(%s) with %s discard: got %v, want %v", i, s1, s2, err, v.err) } if cnt != v.cnt { - t.Errorf("test %d, NewReader(%s(...)) with %s discard: got %d headers, want %d headers", + t.Errorf("test %d, NewReader(%s) with %s discard: got %d headers, want %d headers", i, s1, s2, cnt, v.cnt) } } @@ -1025,12 +939,18 @@ func TestMergePAX(t *testing.T) { Name: "a/b/c", Uid: 1000, ModTime: time.Unix(1350244992, 23960108), + PAXRecords: map[string]string{ + "path": "a/b/c", + "uid": "1000", + "mtime": "1350244992.023960108", + }, }, ok: true, }, { in: map[string]string{ "gid": "gtgergergersagersgers", }, + ok: false, }, { in: map[string]string{ "missing": "missing", @@ -1038,6 +958,10 @@ func TestMergePAX(t *testing.T) { }, want: &Header{ Xattrs: map[string]string{"key": "value"}, + PAXRecords: map[string]string{ + "missing": "missing", + "SCHILY.xattr.key": "value", + }, }, ok: true, }} @@ -1070,7 +994,7 @@ func TestParsePAX(t *testing.T) { {"13 key1=haha\n13 key2=nana\n13 key3=kaka\n", map[string]string{"key1": "haha", "key2": "nana", "key3": "kaka"}, true}, {"13 key1=val1\n13 key2=val2\n8 key1=\n", - map[string]string{"key2": "val2"}, true}, + map[string]string{"key1": "", "key2": "val2"}, true}, {"22 GNU.sparse.size=10\n26 GNU.sparse.numblocks=2\n" + "23 GNU.sparse.offset=1\n25 GNU.sparse.numbytes=2\n" + "23 GNU.sparse.offset=3\n25 GNU.sparse.numbytes=4\n", @@ -1087,10 +1011,600 @@ func TestParsePAX(t *testing.T) { r := strings.NewReader(v.in) got, err := parsePAX(r) if !reflect.DeepEqual(got, v.want) && !(len(got) == 0 && len(v.want) == 0) { - t.Errorf("test %d, parsePAX(...):\ngot %v\nwant %v", i, got, v.want) + t.Errorf("test %d, parsePAX():\ngot %v\nwant %v", i, got, v.want) } if ok := err == nil; ok != v.ok { - t.Errorf("test %d, parsePAX(...): got %v, want %v", i, ok, v.ok) + t.Errorf("test %d, parsePAX(): got %v, want %v", i, ok, v.ok) + } + } +} + +func TestReadOldGNUSparseMap(t *testing.T) { + populateSparseMap := func(sa sparseArray, sps []string) []string { + for i := 0; len(sps) > 0 && i < sa.MaxEntries(); i++ { + copy(sa.Entry(i), sps[0]) + sps = sps[1:] + } + if len(sps) > 0 { + copy(sa.IsExtended(), "\x80") + } + return sps + } + + makeInput := func(format Format, size string, sps ...string) (out []byte) { + // Write the initial GNU header. + var blk block + gnu := blk.GNU() + sparse := gnu.Sparse() + copy(gnu.RealSize(), size) + sps = populateSparseMap(sparse, sps) + if format != FormatUnknown { + blk.SetFormat(format) + } + out = append(out, blk[:]...) + + // Write extended sparse blocks. + for len(sps) > 0 { + var blk block + sps = populateSparseMap(blk.Sparse(), sps) + out = append(out, blk[:]...) + } + return out + } + + makeSparseStrings := func(sp []sparseEntry) (out []string) { + var f formatter + for _, s := range sp { + var b [24]byte + f.formatNumeric(b[:12], s.Offset) + f.formatNumeric(b[12:], s.Length) + out = append(out, string(b[:])) + } + return out + } + + vectors := []struct { + input []byte + wantMap sparseDatas + wantSize int64 + wantErr error + }{{ + input: makeInput(FormatUnknown, ""), + wantErr: ErrHeader, + }, { + input: makeInput(FormatGNU, "1234", "fewa"), + wantSize: 01234, + wantErr: ErrHeader, + }, { + input: makeInput(FormatGNU, "0031"), + wantSize: 031, + }, { + input: makeInput(FormatGNU, "80"), + wantErr: ErrHeader, + }, { + input: makeInput(FormatGNU, "1234", + makeSparseStrings(sparseDatas{{0, 0}, {1, 1}})...), + wantMap: sparseDatas{{0, 0}, {1, 1}}, + wantSize: 01234, + }, { + input: makeInput(FormatGNU, "1234", + append(makeSparseStrings(sparseDatas{{0, 0}, {1, 1}}), []string{"", "blah"}...)...), + wantMap: sparseDatas{{0, 0}, {1, 1}}, + wantSize: 01234, + }, { + input: makeInput(FormatGNU, "3333", + makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}})...), + wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}}, + wantSize: 03333, + }, { + input: makeInput(FormatGNU, "", + append(append( + makeSparseStrings(sparseDatas{{0, 1}, {2, 1}}), + []string{"", ""}...), + makeSparseStrings(sparseDatas{{4, 1}, {6, 1}})...)...), + wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}}, + }, { + input: makeInput(FormatGNU, "", + makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...)[:blockSize], + wantErr: io.ErrUnexpectedEOF, + }, { + input: makeInput(FormatGNU, "", + makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...)[:3*blockSize/2], + wantErr: io.ErrUnexpectedEOF, + }, { + input: makeInput(FormatGNU, "", + makeSparseStrings(sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}})...), + wantMap: sparseDatas{{0, 1}, {2, 1}, {4, 1}, {6, 1}, {8, 1}, {10, 1}}, + }, { + input: makeInput(FormatGNU, "", + makeSparseStrings(sparseDatas{{10 << 30, 512}, {20 << 30, 512}})...), + wantMap: sparseDatas{{10 << 30, 512}, {20 << 30, 512}}, + }} + + for i, v := range vectors { + var blk block + var hdr Header + v.input = v.input[copy(blk[:], v.input):] + tr := Reader{r: bytes.NewReader(v.input)} + got, err := tr.readOldGNUSparseMap(&hdr, &blk) + if !equalSparseEntries(got, v.wantMap) { + t.Errorf("test %d, readOldGNUSparseMap(): got %v, want %v", i, got, v.wantMap) + } + if err != v.wantErr { + t.Errorf("test %d, readOldGNUSparseMap() = %v, want %v", i, err, v.wantErr) + } + if hdr.Size != v.wantSize { + t.Errorf("test %d, Header.Size = %d, want %d", i, hdr.Size, v.wantSize) + } + } +} + +func TestReadGNUSparsePAXHeaders(t *testing.T) { + padInput := func(s string) string { + return s + string(zeroBlock[:blockPadding(int64(len(s)))]) + } + + vectors := []struct { + inputData string + inputHdrs map[string]string + wantMap sparseDatas + wantSize int64 + wantName string + wantErr error + }{{ + inputHdrs: nil, + wantErr: nil, + }, { + inputHdrs: map[string]string{ + paxGNUSparseNumBlocks: strconv.FormatInt(math.MaxInt64, 10), + paxGNUSparseMap: "0,1,2,3", + }, + wantErr: ErrHeader, + }, { + inputHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4\x00", + paxGNUSparseMap: "0,1,2,3", + }, + wantErr: ErrHeader, + }, { + inputHdrs: map[string]string{ + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,1,2,3", + }, + wantErr: ErrHeader, + }, { + inputHdrs: map[string]string{ + paxGNUSparseNumBlocks: "2", + paxGNUSparseMap: "0,1,2,3", + }, + wantMap: sparseDatas{{0, 1}, {2, 3}}, + }, { + inputHdrs: map[string]string{ + paxGNUSparseNumBlocks: "2", + paxGNUSparseMap: "0, 1,2,3", + }, + wantErr: ErrHeader, + }, { + inputHdrs: map[string]string{ + paxGNUSparseNumBlocks: "2", + paxGNUSparseMap: "0,1,02,3", + paxGNUSparseRealSize: "4321", + }, + wantMap: sparseDatas{{0, 1}, {2, 3}}, + wantSize: 4321, + }, { + inputHdrs: map[string]string{ + paxGNUSparseNumBlocks: "2", + paxGNUSparseMap: "0,one1,2,3", + }, + wantErr: ErrHeader, + }, { + inputHdrs: map[string]string{ + paxGNUSparseMajor: "0", + paxGNUSparseMinor: "0", + paxGNUSparseNumBlocks: "2", + paxGNUSparseMap: "0,1,2,3", + paxGNUSparseSize: "1234", + paxGNUSparseRealSize: "4321", + paxGNUSparseName: "realname", + }, + wantMap: sparseDatas{{0, 1}, {2, 3}}, + wantSize: 1234, + wantName: "realname", + }, { + inputHdrs: map[string]string{ + paxGNUSparseMajor: "0", + paxGNUSparseMinor: "0", + paxGNUSparseNumBlocks: "1", + paxGNUSparseMap: "10737418240,512", + paxGNUSparseSize: "10737418240", + paxGNUSparseName: "realname", + }, + wantMap: sparseDatas{{10737418240, 512}}, + wantSize: 10737418240, + wantName: "realname", + }, { + inputHdrs: map[string]string{ + paxGNUSparseMajor: "0", + paxGNUSparseMinor: "0", + paxGNUSparseNumBlocks: "0", + paxGNUSparseMap: "", + }, + wantMap: sparseDatas{}, + }, { + inputHdrs: map[string]string{ + paxGNUSparseMajor: "0", + paxGNUSparseMinor: "1", + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + wantMap: sparseDatas{{0, 5}, {10, 5}, {20, 5}, {30, 5}}, + }, { + inputHdrs: map[string]string{ + paxGNUSparseMajor: "1", + paxGNUSparseMinor: "0", + paxGNUSparseNumBlocks: "4", + paxGNUSparseMap: "0,5,10,5,20,5,30,5", + }, + wantErr: io.ErrUnexpectedEOF, + }, { + inputData: padInput("0\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantMap: sparseDatas{}, + }, { + inputData: padInput("0\n")[:blockSize-1] + "#", + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantMap: sparseDatas{}, + }, { + inputData: padInput("0"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantErr: io.ErrUnexpectedEOF, + }, { + inputData: padInput("ab\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantErr: ErrHeader, + }, { + inputData: padInput("1\n2\n3\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantMap: sparseDatas{{2, 3}}, + }, { + inputData: padInput("1\n2\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantErr: io.ErrUnexpectedEOF, + }, { + inputData: padInput("1\n2\n\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantErr: ErrHeader, + }, { + inputData: string(zeroBlock[:]) + padInput("0\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantErr: ErrHeader, + }, { + inputData: strings.Repeat("0", blockSize) + padInput("1\n5\n1\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantMap: sparseDatas{{5, 1}}, + }, { + inputData: padInput(fmt.Sprintf("%d\n", int64(math.MaxInt64))), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantErr: ErrHeader, + }, { + inputData: padInput(strings.Repeat("0", 300) + "1\n" + strings.Repeat("0", 1000) + "5\n" + strings.Repeat("0", 800) + "2\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantMap: sparseDatas{{5, 2}}, + }, { + inputData: padInput("2\n10737418240\n512\n21474836480\n512\n"), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantMap: sparseDatas{{10737418240, 512}, {21474836480, 512}}, + }, { + inputData: padInput("100\n" + func() string { + var ss []string + for i := 0; i < 100; i++ { + ss = append(ss, fmt.Sprintf("%d\n%d\n", int64(i)<<30, 512)) + } + return strings.Join(ss, "") + }()), + inputHdrs: map[string]string{paxGNUSparseMajor: "1", paxGNUSparseMinor: "0"}, + wantMap: func() (spd sparseDatas) { + for i := 0; i < 100; i++ { + spd = append(spd, sparseEntry{int64(i) << 30, 512}) + } + return spd + }(), + }} + + for i, v := range vectors { + var hdr Header + hdr.PAXRecords = v.inputHdrs + r := strings.NewReader(v.inputData + "#") // Add canary byte + tr := Reader{curr: ®FileReader{r, int64(r.Len())}} + got, err := tr.readGNUSparsePAXHeaders(&hdr) + if !equalSparseEntries(got, v.wantMap) { + t.Errorf("test %d, readGNUSparsePAXHeaders(): got %v, want %v", i, got, v.wantMap) + } + if err != v.wantErr { + t.Errorf("test %d, readGNUSparsePAXHeaders() = %v, want %v", i, err, v.wantErr) + } + if hdr.Size != v.wantSize { + t.Errorf("test %d, Header.Size = %d, want %d", i, hdr.Size, v.wantSize) + } + if hdr.Name != v.wantName { + t.Errorf("test %d, Header.Name = %s, want %s", i, hdr.Name, v.wantName) + } + if v.wantErr == nil && r.Len() == 0 { + t.Errorf("test %d, canary byte unexpectedly consumed", i) + } + } +} + +// testNonEmptyReader wraps an io.Reader and ensures that +// Read is never called with an empty buffer. +type testNonEmptyReader struct{ io.Reader } + +func (r testNonEmptyReader) Read(b []byte) (int, error) { + if len(b) == 0 { + return 0, errors.New("unexpected empty Read call") + } + return r.Reader.Read(b) +} + +func TestFileReader(t *testing.T) { + type ( + testRead struct { // Read(cnt) == (wantStr, wantErr) + cnt int + wantStr string + wantErr error + } + testWriteTo struct { // WriteTo(testFile{ops}) == (wantCnt, wantErr) + ops fileOps + wantCnt int64 + wantErr error + } + testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt + wantLCnt int64 + wantPCnt int64 + } + testFnc interface{} // testRead | testWriteTo | testRemaining + ) + + type ( + makeReg struct { + str string + size int64 + } + makeSparse struct { + makeReg makeReg + spd sparseDatas + size int64 + } + fileMaker interface{} // makeReg | makeSparse + ) + + vectors := []struct { + maker fileMaker + tests []testFnc + }{{ + maker: makeReg{"", 0}, + tests: []testFnc{ + testRemaining{0, 0}, + testRead{0, "", io.EOF}, + testRead{1, "", io.EOF}, + testWriteTo{nil, 0, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeReg{"", 1}, + tests: []testFnc{ + testRemaining{1, 1}, + testRead{5, "", io.ErrUnexpectedEOF}, + testWriteTo{nil, 0, io.ErrUnexpectedEOF}, + testRemaining{1, 1}, + }, + }, { + maker: makeReg{"hello", 5}, + tests: []testFnc{ + testRemaining{5, 5}, + testRead{5, "hello", io.EOF}, + testRemaining{0, 0}, + }, + }, { + maker: makeReg{"hello, world", 50}, + tests: []testFnc{ + testRemaining{50, 50}, + testRead{7, "hello, ", nil}, + testRemaining{43, 43}, + testRead{5, "world", nil}, + testRemaining{38, 38}, + testWriteTo{nil, 0, io.ErrUnexpectedEOF}, + testRead{1, "", io.ErrUnexpectedEOF}, + testRemaining{38, 38}, + }, + }, { + maker: makeReg{"hello, world", 5}, + tests: []testFnc{ + testRemaining{5, 5}, + testRead{0, "", nil}, + testRead{4, "hell", nil}, + testRemaining{1, 1}, + testWriteTo{fileOps{"o"}, 1, nil}, + testRemaining{0, 0}, + testWriteTo{nil, 0, nil}, + testRead{0, "", io.EOF}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 8}, + tests: []testFnc{ + testRemaining{8, 5}, + testRead{3, "ab\x00", nil}, + testRead{10, "\x00\x00cde", io.EOF}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 8}, + tests: []testFnc{ + testRemaining{8, 5}, + testWriteTo{fileOps{"ab", int64(3), "cde"}, 8, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{0, 2}, {5, 3}}, 10}, + tests: []testFnc{ + testRemaining{10, 5}, + testRead{100, "ab\x00\x00\x00cde\x00\x00", io.EOF}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{"abc", 5}, sparseDatas{{0, 2}, {5, 3}}, 10}, + tests: []testFnc{ + testRemaining{10, 5}, + testRead{100, "ab\x00\x00\x00c", io.ErrUnexpectedEOF}, + testRemaining{4, 2}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 8}, + tests: []testFnc{ + testRemaining{8, 5}, + testRead{8, "\x00abc\x00\x00de", io.EOF}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 0}, {6, 0}, {6, 2}}, 8}, + tests: []testFnc{ + testRemaining{8, 5}, + testRead{8, "\x00abc\x00\x00de", io.EOF}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 0}, {6, 0}, {6, 2}}, 8}, + tests: []testFnc{ + testRemaining{8, 5}, + testWriteTo{fileOps{int64(1), "abc", int64(2), "de"}, 8, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 10}, + tests: []testFnc{ + testRead{100, "\x00abc\x00\x00de\x00\x00", io.EOF}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}}, 10}, + tests: []testFnc{ + testWriteTo{fileOps{int64(1), "abc", int64(2), "de", int64(1), "\x00"}, 10, nil}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 2}, {8, 0}, {8, 0}, {8, 0}, {8, 0}}, 10}, + tests: []testFnc{ + testRead{100, "\x00abc\x00\x00de\x00\x00", io.EOF}, + }, + }, { + maker: makeSparse{makeReg{"", 0}, sparseDatas{}, 2}, + tests: []testFnc{ + testRead{100, "\x00\x00", io.EOF}, + }, + }, { + maker: makeSparse{makeReg{"", 8}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRead{100, "\x00", io.ErrUnexpectedEOF}, + }, + }, { + maker: makeSparse{makeReg{"ab", 2}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRead{100, "\x00ab", errMissData}, + }, + }, { + maker: makeSparse{makeReg{"ab", 8}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRead{100, "\x00ab", io.ErrUnexpectedEOF}, + }, + }, { + maker: makeSparse{makeReg{"abc", 3}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRead{100, "\x00abc\x00\x00", errMissData}, + }, + }, { + maker: makeSparse{makeReg{"abc", 8}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRead{100, "\x00abc\x00\x00", io.ErrUnexpectedEOF}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRead{100, "\x00abc\x00\x00de", errMissData}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 5}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testWriteTo{fileOps{int64(1), "abc", int64(2), "de"}, 8, errMissData}, + }, + }, { + maker: makeSparse{makeReg{"abcde", 8}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRead{100, "\x00abc\x00\x00de", io.ErrUnexpectedEOF}, + }, + }, { + maker: makeSparse{makeReg{"abcdefghEXTRA", 13}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRemaining{15, 13}, + testRead{100, "\x00abc\x00\x00defgh\x00\x00\x00\x00", errUnrefData}, + testWriteTo{nil, 0, errUnrefData}, + testRemaining{0, 5}, + }, + }, { + maker: makeSparse{makeReg{"abcdefghEXTRA", 13}, sparseDatas{{1, 3}, {6, 5}}, 15}, + tests: []testFnc{ + testRemaining{15, 13}, + testWriteTo{fileOps{int64(1), "abc", int64(2), "defgh", int64(4)}, 15, errUnrefData}, + testRead{100, "", errUnrefData}, + testRemaining{0, 5}, + }, + }} + + for i, v := range vectors { + var fr fileReader + switch maker := v.maker.(type) { + case makeReg: + r := testNonEmptyReader{strings.NewReader(maker.str)} + fr = ®FileReader{r, maker.size} + case makeSparse: + if !validateSparseEntries(maker.spd, maker.size) { + t.Fatalf("invalid sparse map: %v", maker.spd) + } + sph := invertSparseEntries(maker.spd, maker.size) + r := testNonEmptyReader{strings.NewReader(maker.makeReg.str)} + fr = ®FileReader{r, maker.makeReg.size} + fr = &sparseFileReader{fr, sph, 0} + default: + t.Fatalf("test %d, unknown make operation: %T", i, maker) + } + + for j, tf := range v.tests { + switch tf := tf.(type) { + case testRead: + b := make([]byte, tf.cnt) + n, err := fr.Read(b) + if got := string(b[:n]); got != tf.wantStr || err != tf.wantErr { + t.Errorf("test %d.%d, Read(%d):\ngot (%q, %v)\nwant (%q, %v)", i, j, tf.cnt, got, err, tf.wantStr, tf.wantErr) + } + case testWriteTo: + f := &testFile{ops: tf.ops} + got, err := fr.WriteTo(f) + if _, ok := err.(testError); ok { + t.Errorf("test %d.%d, WriteTo(): %v", i, j, err) + } else if got != tf.wantCnt || err != tf.wantErr { + t.Errorf("test %d.%d, WriteTo() = (%d, %v), want (%d, %v)", i, j, got, err, tf.wantCnt, tf.wantErr) + } + if len(f.ops) > 0 { + t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops)) + } + case testRemaining: + if got := fr.LogicalRemaining(); got != tf.wantLCnt { + t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt) + } + if got := fr.PhysicalRemaining(); got != tf.wantPCnt { + t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt) + } + default: + t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf) + } } } } diff --git a/src/archive/tar/stat_atim.go b/src/archive/tar/stat_actime1.go similarity index 100% rename from src/archive/tar/stat_atim.go rename to src/archive/tar/stat_actime1.go diff --git a/src/archive/tar/stat_atimespec.go b/src/archive/tar/stat_actime2.go similarity index 100% rename from src/archive/tar/stat_atimespec.go rename to src/archive/tar/stat_actime2.go diff --git a/src/archive/tar/stat_unix.go b/src/archive/tar/stat_unix.go index cb843db4cfd..868105f338e 100644 --- a/src/archive/tar/stat_unix.go +++ b/src/archive/tar/stat_unix.go @@ -8,6 +8,10 @@ package tar import ( "os" + "os/user" + "runtime" + "strconv" + "sync" "syscall" ) @@ -15,6 +19,10 @@ func init() { sysStat = statUnix } +// userMap and groupMap caches UID and GID lookups for performance reasons. +// The downside is that renaming uname or gname by the OS never takes effect. +var userMap, groupMap sync.Map // map[int]string + func statUnix(fi os.FileInfo, h *Header) error { sys, ok := fi.Sys().(*syscall.Stat_t) if !ok { @@ -22,11 +30,67 @@ func statUnix(fi os.FileInfo, h *Header) error { } h.Uid = int(sys.Uid) h.Gid = int(sys.Gid) - // TODO(bradfitz): populate username & group. os/user - // doesn't cache LookupId lookups, and lacks group - // lookup functions. + + // Best effort at populating Uname and Gname. + // The os/user functions may fail for any number of reasons + // (not implemented on that platform, cgo not enabled, etc). + if u, ok := userMap.Load(h.Uid); ok { + h.Uname = u.(string) + } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil { + h.Uname = u.Username + userMap.Store(h.Uid, h.Uname) + } + if g, ok := groupMap.Load(h.Gid); ok { + h.Gname = g.(string) + } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil { + h.Gname = g.Name + groupMap.Store(h.Gid, h.Gname) + } + h.AccessTime = statAtime(sys) h.ChangeTime = statCtime(sys) - // TODO(bradfitz): major/minor device numbers? + + // Best effort at populating Devmajor and Devminor. + if h.Typeflag == TypeChar || h.Typeflag == TypeBlock { + dev := uint64(sys.Rdev) // May be int32 or uint32 + switch runtime.GOOS { + case "linux": + // Copied from golang.org/x/sys/unix/dev_linux.go. + major := uint32((dev & 0x00000000000fff00) >> 8) + major |= uint32((dev & 0xfffff00000000000) >> 32) + minor := uint32((dev & 0x00000000000000ff) >> 0) + minor |= uint32((dev & 0x00000ffffff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "darwin": + // Copied from golang.org/x/sys/unix/dev_darwin.go. + major := uint32((dev >> 24) & 0xff) + minor := uint32(dev & 0xffffff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "dragonfly": + // Copied from golang.org/x/sys/unix/dev_dragonfly.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "freebsd": + // Copied from golang.org/x/sys/unix/dev_freebsd.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "netbsd": + // Copied from golang.org/x/sys/unix/dev_netbsd.go. + major := uint32((dev & 0x000fff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xfff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "openbsd": + // Copied from golang.org/x/sys/unix/dev_openbsd.go. + major := uint32((dev & 0x0000ff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xffff0000) >> 8) + h.Devmajor, h.Devminor = int64(major), int64(minor) + default: + // TODO: Implement solaris (see https://golang.org/issue/8106) + } + } return nil } diff --git a/src/archive/tar/strconv.go b/src/archive/tar/strconv.go index bb5b51c02de..d144485a492 100644 --- a/src/archive/tar/strconv.go +++ b/src/archive/tar/strconv.go @@ -12,26 +12,34 @@ import ( "time" ) +// hasNUL reports whether the NUL character exists within s. +func hasNUL(s string) bool { + return strings.IndexByte(s, 0) >= 0 +} + +// isASCII reports whether the input is an ASCII C-style string. func isASCII(s string) bool { for _, c := range s { - if c >= 0x80 { + if c >= 0x80 || c == 0x00 { return false } } return true } +// toASCII converts the input to an ASCII C-style string. +// This a best effort conversion, so invalid characters are dropped. func toASCII(s string) string { if isASCII(s) { return s } - var buf bytes.Buffer + b := make([]byte, 0, len(s)) for _, c := range s { - if c < 0x80 { - buf.WriteByte(byte(c)) + if c < 0x80 && c != 0x00 { + b = append(b, byte(c)) } } - return buf.String() + return string(b) } type parser struct { @@ -45,23 +53,28 @@ type formatter struct { // parseString parses bytes as a NUL-terminated C-style string. // If a NUL byte is not found then the whole slice is returned as a string. func (*parser) parseString(b []byte) string { - n := 0 - for n < len(b) && b[n] != 0 { - n++ + if i := bytes.IndexByte(b, 0); i >= 0 { + return string(b[:i]) } - return string(b[0:n]) + return string(b) } -// Write s into b, terminating it with a NUL if there is room. +// formatString copies s into b, NUL-terminating if possible. func (f *formatter) formatString(b []byte, s string) { if len(s) > len(b) { f.err = ErrFieldTooLong - return } - ascii := toASCII(s) - copy(b, ascii) - if len(ascii) < len(b) { - b[len(ascii)] = 0 + copy(b, s) + if len(s) < len(b) { + b[len(s)] = 0 + } + + // Some buggy readers treat regular files with a trailing slash + // in the V7 path field as a directory even though the full path + // recorded elsewhere (e.g., via PAX record) contains no trailing slash. + if len(s) > len(b) && b[len(b)-1] == '/' { + n := len(strings.TrimRight(s[:len(b)], "/")) + b[n] = 0 // Replace trailing slash with NUL terminator } } @@ -73,7 +86,7 @@ func (f *formatter) formatString(b []byte, s string) { // that the first byte can only be either 0x80 or 0xff. Thus, the first byte is // equivalent to the sign bit in two's complement form. func fitsInBase256(n int, x int64) bool { - var binBits = uint(n-1) * 8 + binBits := uint(n-1) * 8 return n >= 9 || (x >= -1<= 0; i-- { b[i] = byte(x) @@ -155,6 +174,11 @@ func (p *parser) parseOctal(b []byte) int64 { } func (f *formatter) formatOctal(b []byte, x int64) { + if !fitsInOctal(len(b), x) { + x = 0 // Last resort, just write zero + f.err = ErrFieldTooLong + } + s := strconv.FormatInt(x, 8) // Add leading zeros, but leave room for a NUL. if n := len(b) - len(s) - 1; n > 0 { @@ -163,6 +187,13 @@ func (f *formatter) formatOctal(b []byte, x int64) { f.formatString(b, s) } +// fitsInOctal reports whether the integer x fits in a field n-bytes long +// using octal encoding with the appropriate NUL terminator. +func fitsInOctal(n int, x int64) bool { + octBits := uint(n-1) * 3 + return x >= 0 && (n >= 22 || x < 1< 0 && ss[0] == '-' { - return time.Unix(secs, -1*int64(nsecs)), nil // Negative correction + return time.Unix(secs, -1*nsecs), nil // Negative correction } - return time.Unix(secs, int64(nsecs)), nil + return time.Unix(secs, nsecs), nil } -// TODO(dsnet): Implement formatPAXTime. +// formatPAXTime converts ts into a time of the form %d.%d as described in the +// PAX specification. This function is capable of negative timestamps. +func formatPAXTime(ts time.Time) (s string) { + secs, nsecs := ts.Unix(), ts.Nanosecond() + if nsecs == 0 { + return strconv.FormatInt(secs, 10) + } + + // If seconds is negative, then perform correction. + sign := "" + if secs < 0 { + sign = "-" // Remember sign + secs = -(secs + 1) // Add a second to secs + nsecs = -(nsecs - 1E9) // Take that second away from nsecs + } + return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") +} // parsePAXRecord parses the input PAX record string into a key-value pair. // If parsing is successful, it will slice off the currently read record and // return the remainder as r. -// -// A PAX record is of the following form: -// "%d %s=%s\n" % (size, key, value) func parsePAXRecord(s string) (k, v, r string, err error) { // The size field ends at the first space. sp := strings.IndexByte(s, ' ') @@ -232,21 +276,51 @@ func parsePAXRecord(s string) (k, v, r string, err error) { if eq == -1 { return "", "", s, ErrHeader } - return rec[:eq], rec[eq+1:], rem, nil + k, v = rec[:eq], rec[eq+1:] + + if !validPAXRecord(k, v) { + return "", "", s, ErrHeader + } + return k, v, rem, nil } // formatPAXRecord formats a single PAX record, prefixing it with the // appropriate length. -func formatPAXRecord(k, v string) string { +func formatPAXRecord(k, v string) (string, error) { + if !validPAXRecord(k, v) { + return "", ErrHeader + } + const padding = 3 // Extra padding for ' ', '=', and '\n' size := len(k) + len(v) + padding size += len(strconv.Itoa(size)) - record := fmt.Sprintf("%d %s=%s\n", size, k, v) + record := strconv.Itoa(size) + " " + k + "=" + v + "\n" // Final adjustment if adding size field increased the record size. if len(record) != size { size = len(record) - record = fmt.Sprintf("%d %s=%s\n", size, k, v) + record = strconv.Itoa(size) + " " + k + "=" + v + "\n" + } + return record, nil +} + +// validPAXRecord reports whether the key-value pair is valid where each +// record is formatted as: +// "%d %s=%s\n" % (size, key, value) +// +// Keys and values should be UTF-8, but the number of bad writers out there +// forces us to be a more liberal. +// Thus, we only reject all keys with NUL, and only reject NULs in values +// for the PAX version of the USTAR string fields. +// The key must not contain an '=' character. +func validPAXRecord(k, v string) bool { + if k == "" || strings.IndexByte(k, '=') >= 0 { + return false + } + switch k { + case paxPath, paxLinkpath, paxUname, paxGname: + return !hasNUL(v) + default: + return !hasNUL(k) } - return record } diff --git a/src/archive/tar/strconv_test.go b/src/archive/tar/strconv_test.go index beb70938bfd..4cc388cb0f2 100644 --- a/src/archive/tar/strconv_test.go +++ b/src/archive/tar/strconv_test.go @@ -110,6 +110,25 @@ func TestFormatNumeric(t *testing.T) { want string ok bool }{ + // Test base-8 (octal) encoded values. + {0, "0\x00", true}, + {7, "7\x00", true}, + {8, "\x80\x08", true}, + {077, "77\x00", true}, + {0100, "\x80\x00\x40", true}, + {0, "0000000\x00", true}, + {0123, "0000123\x00", true}, + {07654321, "7654321\x00", true}, + {07777777, "7777777\x00", true}, + {010000000, "\x80\x00\x00\x00\x00\x20\x00\x00", true}, + {0, "00000000000\x00", true}, + {000001234567, "00001234567\x00", true}, + {076543210321, "76543210321\x00", true}, + {012345670123, "12345670123\x00", true}, + {077777777777, "77777777777\x00", true}, + {0100000000000, "\x80\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00", true}, + {math.MaxInt64, "777777777777777777777\x00", true}, + // Test base-256 (binary) encoded values. {-1, "\xff", true}, {-1, "\xff\xff", true}, @@ -155,6 +174,45 @@ func TestFormatNumeric(t *testing.T) { } } +func TestFitsInOctal(t *testing.T) { + vectors := []struct { + input int64 + width int + ok bool + }{ + {-1, 1, false}, + {-1, 2, false}, + {-1, 3, false}, + {0, 1, true}, + {0 + 1, 1, false}, + {0, 2, true}, + {07, 2, true}, + {07 + 1, 2, false}, + {0, 4, true}, + {0777, 4, true}, + {0777 + 1, 4, false}, + {0, 8, true}, + {07777777, 8, true}, + {07777777 + 1, 8, false}, + {0, 12, true}, + {077777777777, 12, true}, + {077777777777 + 1, 12, false}, + {math.MaxInt64, 22, true}, + {012345670123, 12, true}, + {01564164, 12, true}, + {-012345670123, 12, false}, + {-01564164, 12, false}, + {-1564164, 30, false}, + } + + for _, v := range vectors { + ok := fitsInOctal(v.width, v.input) + if ok != v.ok { + t.Errorf("checkOctal(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok) + } + } +} + func TestParsePAXTime(t *testing.T) { vectors := []struct { in string @@ -236,6 +294,51 @@ func TestParsePAXTime(t *testing.T) { } } +func TestFormatPAXTime(t *testing.T) { + vectors := []struct { + sec, nsec int64 + want string + }{ + {1350244992, 0, "1350244992"}, + {1350244992, 300000000, "1350244992.3"}, + {1350244992, 23960100, "1350244992.0239601"}, + {1350244992, 23960108, "1350244992.023960108"}, + {+1, +1E9 - 1E0, "1.999999999"}, + {+1, +1E9 - 1E3, "1.999999"}, + {+1, +1E9 - 1E6, "1.999"}, + {+1, +0E0 - 0E0, "1"}, + {+1, +1E6 - 0E0, "1.001"}, + {+1, +1E3 - 0E0, "1.000001"}, + {+1, +1E0 - 0E0, "1.000000001"}, + {0, 1E9 - 1E0, "0.999999999"}, + {0, 1E9 - 1E3, "0.999999"}, + {0, 1E9 - 1E6, "0.999"}, + {0, 0E0, "0"}, + {0, 1E6 + 0E0, "0.001"}, + {0, 1E3 + 0E0, "0.000001"}, + {0, 1E0 + 0E0, "0.000000001"}, + {-1, -1E9 + 1E0, "-1.999999999"}, + {-1, -1E9 + 1E3, "-1.999999"}, + {-1, -1E9 + 1E6, "-1.999"}, + {-1, -0E0 + 0E0, "-1"}, + {-1, -1E6 + 0E0, "-1.001"}, + {-1, -1E3 + 0E0, "-1.000001"}, + {-1, -1E0 + 0E0, "-1.000000001"}, + {-1350244992, 0, "-1350244992"}, + {-1350244992, -300000000, "-1350244992.3"}, + {-1350244992, -23960100, "-1350244992.0239601"}, + {-1350244992, -23960108, "-1350244992.023960108"}, + } + + for _, v := range vectors { + got := formatPAXTime(time.Unix(v.sec, v.nsec)) + if got != v.want { + t.Errorf("formatPAXTime(%ds, %dns): got %q, want %q", + v.sec, v.nsec, got, v.want) + } + } +} + func TestParsePAXRecord(t *testing.T) { medName := strings.Repeat("CD", 50) longName := strings.Repeat("AB", 100) @@ -256,7 +359,7 @@ func TestParsePAXRecord(t *testing.T) { {"18 foo=b=\nar=\n==\x00\n", "", "foo", "b=\nar=\n==\x00", true}, {"27 foo=hello9 foo=ba\nworld\n", "", "foo", "hello9 foo=ba\nworld", true}, {"27 ☺☻☹=日a本b語ç\nmeow mix", "meow mix", "☺☻☹", "日a本b語ç", true}, - {"17 \x00hello=\x00world\n", "", "\x00hello", "\x00world", true}, + {"17 \x00hello=\x00world\n", "17 \x00hello=\x00world\n", "", "", false}, {"1 k=1\n", "1 k=1\n", "", "", false}, {"6 k~1\n", "6 k~1\n", "", "", false}, {"6_k=1\n", "6_k=1\n", "", "", false}, @@ -296,21 +399,33 @@ func TestFormatPAXRecord(t *testing.T) { inKey string inVal string want string + ok bool }{ - {"k", "v", "6 k=v\n"}, - {"path", "/etc/hosts", "19 path=/etc/hosts\n"}, - {"path", longName, "210 path=" + longName + "\n"}, - {"path", medName, "110 path=" + medName + "\n"}, - {"foo", "ba", "9 foo=ba\n"}, - {"foo", "bar", "11 foo=bar\n"}, - {"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"}, - {"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"}, - {"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"}, - {"\x00hello", "\x00world", "17 \x00hello=\x00world\n"}, + {"k", "v", "6 k=v\n", true}, + {"path", "/etc/hosts", "19 path=/etc/hosts\n", true}, + {"path", longName, "210 path=" + longName + "\n", true}, + {"path", medName, "110 path=" + medName + "\n", true}, + {"foo", "ba", "9 foo=ba\n", true}, + {"foo", "bar", "11 foo=bar\n", true}, + {"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n", true}, + {"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n", true}, + {"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n", true}, + {"xhello", "\x00world", "17 xhello=\x00world\n", true}, + {"path", "null\x00", "", false}, + {"null\x00", "value", "", false}, + {paxSchilyXattr + "key", "null\x00", "26 SCHILY.xattr.key=null\x00\n", true}, } for _, v := range vectors { - got := formatPAXRecord(v.inKey, v.inVal) + got, err := formatPAXRecord(v.inKey, v.inVal) + ok := (err == nil) + if ok != v.ok { + if v.ok { + t.Errorf("formatPAXRecord(%q, %q): got format failure, want success", v.inKey, v.inVal) + } else { + t.Errorf("formatPAXRecord(%q, %q): got format success, want failure", v.inKey, v.inVal) + } + } if got != v.want { t.Errorf("formatPAXRecord(%q, %q): got %q, want %q", v.inKey, v.inVal, got, v.want) diff --git a/src/archive/tar/tar_test.go b/src/archive/tar/tar_test.go index fb7a9dcfc47..af80d6e0c15 100644 --- a/src/archive/tar/tar_test.go +++ b/src/archive/tar/tar_test.go @@ -6,8 +6,12 @@ package tar import ( "bytes" + "errors" + "fmt" "internal/testenv" + "io" "io/ioutil" + "math" "os" "path" "path/filepath" @@ -17,6 +21,193 @@ import ( "time" ) +type testError struct{ error } + +type fileOps []interface{} // []T where T is (string | int64) + +// testFile is an io.ReadWriteSeeker where the IO operations performed +// on it must match the list of operations in ops. +type testFile struct { + ops fileOps + pos int64 +} + +func (f *testFile) Read(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + if len(f.ops) == 0 { + return 0, io.EOF + } + s, ok := f.ops[0].(string) + if !ok { + return 0, errors.New("unexpected Read operation") + } + + n := copy(b, s) + if len(s) > n { + f.ops[0] = s[n:] + } else { + f.ops = f.ops[1:] + } + f.pos += int64(len(b)) + return n, nil +} + +func (f *testFile) Write(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + if len(f.ops) == 0 { + return 0, errors.New("unexpected Write operation") + } + s, ok := f.ops[0].(string) + if !ok { + return 0, errors.New("unexpected Write operation") + } + + if !strings.HasPrefix(s, string(b)) { + return 0, testError{fmt.Errorf("got Write(%q), want Write(%q)", b, s)} + } + if len(s) > len(b) { + f.ops[0] = s[len(b):] + } else { + f.ops = f.ops[1:] + } + f.pos += int64(len(b)) + return len(b), nil +} + +func (f *testFile) Seek(pos int64, whence int) (int64, error) { + if pos == 0 && whence == io.SeekCurrent { + return f.pos, nil + } + if len(f.ops) == 0 { + return 0, errors.New("unexpected Seek operation") + } + s, ok := f.ops[0].(int64) + if !ok { + return 0, errors.New("unexpected Seek operation") + } + + if s != pos || whence != io.SeekCurrent { + return 0, testError{fmt.Errorf("got Seek(%d, %d), want Seek(%d, %d)", pos, whence, s, io.SeekCurrent)} + } + f.pos += s + f.ops = f.ops[1:] + return f.pos, nil +} + +func equalSparseEntries(x, y []sparseEntry) bool { + return (len(x) == 0 && len(y) == 0) || reflect.DeepEqual(x, y) +} + +func TestSparseEntries(t *testing.T) { + vectors := []struct { + in []sparseEntry + size int64 + + wantValid bool // Result of validateSparseEntries + wantAligned []sparseEntry // Result of alignSparseEntries + wantInverted []sparseEntry // Result of invertSparseEntries + }{{ + in: []sparseEntry{}, size: 0, + wantValid: true, + wantInverted: []sparseEntry{{0, 0}}, + }, { + in: []sparseEntry{}, size: 5000, + wantValid: true, + wantInverted: []sparseEntry{{0, 5000}}, + }, { + in: []sparseEntry{{0, 5000}}, size: 5000, + wantValid: true, + wantAligned: []sparseEntry{{0, 5000}}, + wantInverted: []sparseEntry{{5000, 0}}, + }, { + in: []sparseEntry{{1000, 4000}}, size: 5000, + wantValid: true, + wantAligned: []sparseEntry{{1024, 3976}}, + wantInverted: []sparseEntry{{0, 1000}, {5000, 0}}, + }, { + in: []sparseEntry{{0, 3000}}, size: 5000, + wantValid: true, + wantAligned: []sparseEntry{{0, 2560}}, + wantInverted: []sparseEntry{{3000, 2000}}, + }, { + in: []sparseEntry{{3000, 2000}}, size: 5000, + wantValid: true, + wantAligned: []sparseEntry{{3072, 1928}}, + wantInverted: []sparseEntry{{0, 3000}, {5000, 0}}, + }, { + in: []sparseEntry{{2000, 2000}}, size: 5000, + wantValid: true, + wantAligned: []sparseEntry{{2048, 1536}}, + wantInverted: []sparseEntry{{0, 2000}, {4000, 1000}}, + }, { + in: []sparseEntry{{0, 2000}, {8000, 2000}}, size: 10000, + wantValid: true, + wantAligned: []sparseEntry{{0, 1536}, {8192, 1808}}, + wantInverted: []sparseEntry{{2000, 6000}, {10000, 0}}, + }, { + in: []sparseEntry{{0, 2000}, {2000, 2000}, {4000, 0}, {4000, 3000}, {7000, 1000}, {8000, 0}, {8000, 2000}}, size: 10000, + wantValid: true, + wantAligned: []sparseEntry{{0, 1536}, {2048, 1536}, {4096, 2560}, {7168, 512}, {8192, 1808}}, + wantInverted: []sparseEntry{{10000, 0}}, + }, { + in: []sparseEntry{{0, 0}, {1000, 0}, {2000, 0}, {3000, 0}, {4000, 0}, {5000, 0}}, size: 5000, + wantValid: true, + wantInverted: []sparseEntry{{0, 5000}}, + }, { + in: []sparseEntry{{1, 0}}, size: 0, + wantValid: false, + }, { + in: []sparseEntry{{-1, 0}}, size: 100, + wantValid: false, + }, { + in: []sparseEntry{{0, -1}}, size: 100, + wantValid: false, + }, { + in: []sparseEntry{{0, 0}}, size: -100, + wantValid: false, + }, { + in: []sparseEntry{{math.MaxInt64, 3}, {6, -5}}, size: 35, + wantValid: false, + }, { + in: []sparseEntry{{1, 3}, {6, -5}}, size: 35, + wantValid: false, + }, { + in: []sparseEntry{{math.MaxInt64, math.MaxInt64}}, size: math.MaxInt64, + wantValid: false, + }, { + in: []sparseEntry{{3, 3}}, size: 5, + wantValid: false, + }, { + in: []sparseEntry{{2, 0}, {1, 0}, {0, 0}}, size: 3, + wantValid: false, + }, { + in: []sparseEntry{{1, 3}, {2, 2}}, size: 10, + wantValid: false, + }} + + for i, v := range vectors { + gotValid := validateSparseEntries(v.in, v.size) + if gotValid != v.wantValid { + t.Errorf("test %d, validateSparseEntries() = %v, want %v", i, gotValid, v.wantValid) + } + if !v.wantValid { + continue + } + gotAligned := alignSparseEntries(append([]sparseEntry{}, v.in...), v.size) + if !equalSparseEntries(gotAligned, v.wantAligned) { + t.Errorf("test %d, alignSparseEntries():\ngot %v\nwant %v", i, gotAligned, v.wantAligned) + } + gotInverted := invertSparseEntries(append([]sparseEntry{}, v.in...), v.size) + if !equalSparseEntries(gotInverted, v.wantInverted) { + t.Errorf("test %d, inverseSparseEntries():\ngot %v\nwant %v", i, gotInverted, v.wantInverted) + } + } +} + func TestFileInfoHeader(t *testing.T) { fi, err := os.Stat("testdata/small.txt") if err != nil { @@ -109,15 +300,12 @@ func TestRoundTrip(t *testing.T) { var b bytes.Buffer tw := NewWriter(&b) hdr := &Header{ - Name: "file.txt", - Uid: 1 << 21, // too big for 8 octal digits - Size: int64(len(data)), - // AddDate to strip monotonic clock reading, - // and Round to discard sub-second precision, - // both of which are not included in the tar header - // and would otherwise break the round-trip check - // below. - ModTime: time.Now().AddDate(0, 0, 0).Round(1 * time.Second), + Name: "file.txt", + Uid: 1 << 21, // Too big for 8 octal digits + Size: int64(len(data)), + ModTime: time.Now().Round(time.Second), + PAXRecords: map[string]string{"uid": "2097152"}, + Format: FormatPAX, } if err := tw.WriteHeader(hdr); err != nil { t.Fatalf("tw.WriteHeader: %v", err) @@ -329,3 +517,338 @@ func TestHeaderRoundTrip(t *testing.T) { } } } + +func TestHeaderAllowedFormats(t *testing.T) { + vectors := []struct { + header *Header // Input header + paxHdrs map[string]string // Expected PAX headers that may be needed + formats Format // Expected formats that can encode the header + }{{ + header: &Header{}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Size: 077777777777}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Size: 077777777777, Format: FormatUSTAR}, + formats: FormatUSTAR, + }, { + header: &Header{Size: 077777777777, Format: FormatPAX}, + formats: FormatUSTAR | FormatPAX, + }, { + header: &Header{Size: 077777777777, Format: FormatGNU}, + formats: FormatGNU, + }, { + header: &Header{Size: 077777777777 + 1}, + paxHdrs: map[string]string{paxSize: "8589934592"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{Size: 077777777777 + 1, Format: FormatPAX}, + paxHdrs: map[string]string{paxSize: "8589934592"}, + formats: FormatPAX, + }, { + header: &Header{Size: 077777777777 + 1, Format: FormatGNU}, + paxHdrs: map[string]string{paxSize: "8589934592"}, + formats: FormatGNU, + }, { + header: &Header{Mode: 07777777}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Mode: 07777777 + 1}, + formats: FormatGNU, + }, { + header: &Header{Devmajor: -123}, + formats: FormatGNU, + }, { + header: &Header{Devmajor: 1<<56 - 1}, + formats: FormatGNU, + }, { + header: &Header{Devmajor: 1 << 56}, + formats: FormatUnknown, + }, { + header: &Header{Devmajor: -1 << 56}, + formats: FormatGNU, + }, { + header: &Header{Devmajor: -1<<56 - 1}, + formats: FormatUnknown, + }, { + header: &Header{Name: "用戶名", Devmajor: -1 << 56}, + formats: FormatGNU, + }, { + header: &Header{Size: math.MaxInt64}, + paxHdrs: map[string]string{paxSize: "9223372036854775807"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{Size: math.MinInt64}, + paxHdrs: map[string]string{paxSize: "-9223372036854775808"}, + formats: FormatUnknown, + }, { + header: &Header{Uname: "0123456789abcdef0123456789abcdef"}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Uname: "0123456789abcdef0123456789abcdefx"}, + paxHdrs: map[string]string{paxUname: "0123456789abcdef0123456789abcdefx"}, + formats: FormatPAX, + }, { + header: &Header{Name: "foobar"}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Name: strings.Repeat("a", nameSize)}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Name: strings.Repeat("a", nameSize+1)}, + paxHdrs: map[string]string{paxPath: strings.Repeat("a", nameSize+1)}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{Linkname: "用戶名"}, + paxHdrs: map[string]string{paxLinkpath: "用戶名"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{Linkname: strings.Repeat("用戶名\x00", nameSize)}, + paxHdrs: map[string]string{paxLinkpath: strings.Repeat("用戶名\x00", nameSize)}, + formats: FormatUnknown, + }, { + header: &Header{Linkname: "\x00hello"}, + paxHdrs: map[string]string{paxLinkpath: "\x00hello"}, + formats: FormatUnknown, + }, { + header: &Header{Uid: 07777777}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Uid: 07777777 + 1}, + paxHdrs: map[string]string{paxUid: "2097152"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{Xattrs: nil}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Xattrs: map[string]string{"foo": "bar"}}, + paxHdrs: map[string]string{paxSchilyXattr + "foo": "bar"}, + formats: FormatPAX, + }, { + header: &Header{Xattrs: map[string]string{"foo": "bar"}, Format: FormatGNU}, + paxHdrs: map[string]string{paxSchilyXattr + "foo": "bar"}, + formats: FormatUnknown, + }, { + header: &Header{Xattrs: map[string]string{"用戶名": "\x00hello"}}, + paxHdrs: map[string]string{paxSchilyXattr + "用戶名": "\x00hello"}, + formats: FormatPAX, + }, { + header: &Header{Xattrs: map[string]string{"foo=bar": "baz"}}, + formats: FormatUnknown, + }, { + header: &Header{Xattrs: map[string]string{"foo": ""}}, + paxHdrs: map[string]string{paxSchilyXattr + "foo": ""}, + formats: FormatPAX, + }, { + header: &Header{ModTime: time.Unix(0, 0)}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{ModTime: time.Unix(077777777777, 0)}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{ModTime: time.Unix(077777777777+1, 0)}, + paxHdrs: map[string]string{paxMtime: "8589934592"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{ModTime: time.Unix(math.MaxInt64, 0)}, + paxHdrs: map[string]string{paxMtime: "9223372036854775807"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{ModTime: time.Unix(math.MaxInt64, 0), Format: FormatUSTAR}, + paxHdrs: map[string]string{paxMtime: "9223372036854775807"}, + formats: FormatUnknown, + }, { + header: &Header{ModTime: time.Unix(-1, 0)}, + paxHdrs: map[string]string{paxMtime: "-1"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{ModTime: time.Unix(1, 500)}, + paxHdrs: map[string]string{paxMtime: "1.0000005"}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{ModTime: time.Unix(1, 0)}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{ModTime: time.Unix(1, 0), Format: FormatPAX}, + formats: FormatUSTAR | FormatPAX, + }, { + header: &Header{ModTime: time.Unix(1, 500), Format: FormatUSTAR}, + paxHdrs: map[string]string{paxMtime: "1.0000005"}, + formats: FormatUSTAR, + }, { + header: &Header{ModTime: time.Unix(1, 500), Format: FormatPAX}, + paxHdrs: map[string]string{paxMtime: "1.0000005"}, + formats: FormatPAX, + }, { + header: &Header{ModTime: time.Unix(1, 500), Format: FormatGNU}, + paxHdrs: map[string]string{paxMtime: "1.0000005"}, + formats: FormatGNU, + }, { + header: &Header{ModTime: time.Unix(-1, 500)}, + paxHdrs: map[string]string{paxMtime: "-0.9999995"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{ModTime: time.Unix(-1, 500), Format: FormatGNU}, + paxHdrs: map[string]string{paxMtime: "-0.9999995"}, + formats: FormatGNU, + }, { + header: &Header{AccessTime: time.Unix(0, 0)}, + paxHdrs: map[string]string{paxAtime: "0"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{AccessTime: time.Unix(0, 0), Format: FormatUSTAR}, + paxHdrs: map[string]string{paxAtime: "0"}, + formats: FormatUnknown, + }, { + header: &Header{AccessTime: time.Unix(0, 0), Format: FormatPAX}, + paxHdrs: map[string]string{paxAtime: "0"}, + formats: FormatPAX, + }, { + header: &Header{AccessTime: time.Unix(0, 0), Format: FormatGNU}, + paxHdrs: map[string]string{paxAtime: "0"}, + formats: FormatGNU, + }, { + header: &Header{AccessTime: time.Unix(-123, 0)}, + paxHdrs: map[string]string{paxAtime: "-123"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{AccessTime: time.Unix(-123, 0), Format: FormatPAX}, + paxHdrs: map[string]string{paxAtime: "-123"}, + formats: FormatPAX, + }, { + header: &Header{ChangeTime: time.Unix(123, 456)}, + paxHdrs: map[string]string{paxCtime: "123.000000456"}, + formats: FormatPAX | FormatGNU, + }, { + header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatUSTAR}, + paxHdrs: map[string]string{paxCtime: "123.000000456"}, + formats: FormatUnknown, + }, { + header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatGNU}, + paxHdrs: map[string]string{paxCtime: "123.000000456"}, + formats: FormatGNU, + }, { + header: &Header{ChangeTime: time.Unix(123, 456), Format: FormatPAX}, + paxHdrs: map[string]string{paxCtime: "123.000000456"}, + formats: FormatPAX, + }, { + header: &Header{Name: "foo/", Typeflag: TypeDir}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }, { + header: &Header{Name: "foo/", Typeflag: TypeReg}, + formats: FormatUnknown, + }, { + header: &Header{Name: "foo/", Typeflag: TypeSymlink}, + formats: FormatUSTAR | FormatPAX | FormatGNU, + }} + + for i, v := range vectors { + formats, paxHdrs, err := v.header.allowedFormats() + if formats != v.formats { + t.Errorf("test %d, allowedFormats(): got %v, want %v", i, formats, v.formats) + } + if formats&FormatPAX > 0 && !reflect.DeepEqual(paxHdrs, v.paxHdrs) && !(len(paxHdrs) == 0 && len(v.paxHdrs) == 0) { + t.Errorf("test %d, allowedFormats():\ngot %v\nwant %s", i, paxHdrs, v.paxHdrs) + } + if (formats != FormatUnknown) && (err != nil) { + t.Errorf("test %d, unexpected error: %v", i, err) + } + if (formats == FormatUnknown) && (err == nil) { + t.Errorf("test %d, got nil-error, want non-nil error", i) + } + } +} + +func Benchmark(b *testing.B) { + type file struct { + hdr *Header + body []byte + } + + vectors := []struct { + label string + files []file + }{{ + "USTAR", + []file{{ + &Header{Name: "bar", Mode: 0640, Size: int64(3)}, + []byte("foo"), + }, { + &Header{Name: "world", Mode: 0640, Size: int64(5)}, + []byte("hello"), + }}, + }, { + "GNU", + []file{{ + &Header{Name: "bar", Mode: 0640, Size: int64(3), Devmajor: -1}, + []byte("foo"), + }, { + &Header{Name: "world", Mode: 0640, Size: int64(5), Devmajor: -1}, + []byte("hello"), + }}, + }, { + "PAX", + []file{{ + &Header{Name: "bar", Mode: 0640, Size: int64(3), Xattrs: map[string]string{"foo": "bar"}}, + []byte("foo"), + }, { + &Header{Name: "world", Mode: 0640, Size: int64(5), Xattrs: map[string]string{"foo": "bar"}}, + []byte("hello"), + }}, + }} + + b.Run("Writer", func(b *testing.B) { + for _, v := range vectors { + b.Run(v.label, func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + // Writing to ioutil.Discard because we want to + // test purely the writer code and not bring in disk performance into this. + tw := NewWriter(ioutil.Discard) + for _, file := range v.files { + if err := tw.WriteHeader(file.hdr); err != nil { + b.Errorf("unexpected WriteHeader error: %v", err) + } + if _, err := tw.Write(file.body); err != nil { + b.Errorf("unexpected Write error: %v", err) + } + } + if err := tw.Close(); err != nil { + b.Errorf("unexpected Close error: %v", err) + } + } + }) + } + }) + + b.Run("Reader", func(b *testing.B) { + for _, v := range vectors { + var buf bytes.Buffer + var r bytes.Reader + + // Write the archive to a byte buffer. + tw := NewWriter(&buf) + for _, file := range v.files { + tw.WriteHeader(file.hdr) + tw.Write(file.body) + } + tw.Close() + b.Run(v.label, func(b *testing.B) { + b.ReportAllocs() + // Read from the byte buffer. + for i := 0; i < b.N; i++ { + r.Reset(buf.Bytes()) + tr := NewReader(&r) + if _, err := tr.Next(); err != nil { + b.Errorf("unexpected Next error: %v", err) + } + if _, err := io.Copy(ioutil.Discard, tr); err != nil { + b.Errorf("unexpected Copy error : %v", err) + } + } + }) + } + }) + +} diff --git a/src/archive/tar/testdata/gnu-long-nul.tar b/src/archive/tar/testdata/gnu-long-nul.tar new file mode 100644 index 00000000000..28bc812aa60 Binary files /dev/null and b/src/archive/tar/testdata/gnu-long-nul.tar differ diff --git a/src/archive/tar/testdata/gnu-nil-sparse-data.tar b/src/archive/tar/testdata/gnu-nil-sparse-data.tar new file mode 100644 index 00000000000..df1aa834538 Binary files /dev/null and b/src/archive/tar/testdata/gnu-nil-sparse-data.tar differ diff --git a/src/archive/tar/testdata/gnu-nil-sparse-hole.tar b/src/archive/tar/testdata/gnu-nil-sparse-hole.tar new file mode 100644 index 00000000000..496abfeb78a Binary files /dev/null and b/src/archive/tar/testdata/gnu-nil-sparse-hole.tar differ diff --git a/src/archive/tar/testdata/gnu-not-utf8.tar b/src/archive/tar/testdata/gnu-not-utf8.tar new file mode 100644 index 00000000000..81cec67d330 Binary files /dev/null and b/src/archive/tar/testdata/gnu-not-utf8.tar differ diff --git a/src/archive/tar/testdata/gnu-sparse-big.tar b/src/archive/tar/testdata/gnu-sparse-big.tar new file mode 100644 index 00000000000..1a5cfc96d92 Binary files /dev/null and b/src/archive/tar/testdata/gnu-sparse-big.tar differ diff --git a/src/archive/tar/testdata/gnu-utf8.tar b/src/archive/tar/testdata/gnu-utf8.tar new file mode 100644 index 00000000000..2c9c8079cf6 Binary files /dev/null and b/src/archive/tar/testdata/gnu-utf8.tar differ diff --git a/src/archive/tar/testdata/invalid-go17.tar b/src/archive/tar/testdata/invalid-go17.tar new file mode 100644 index 00000000000..58f2488e78f Binary files /dev/null and b/src/archive/tar/testdata/invalid-go17.tar differ diff --git a/src/archive/tar/testdata/pax-global-records.tar b/src/archive/tar/testdata/pax-global-records.tar new file mode 100644 index 00000000000..3d3d241e65c Binary files /dev/null and b/src/archive/tar/testdata/pax-global-records.tar differ diff --git a/src/archive/tar/testdata/pax-nil-sparse-data.tar b/src/archive/tar/testdata/pax-nil-sparse-data.tar new file mode 100644 index 00000000000..e59bd94117d Binary files /dev/null and b/src/archive/tar/testdata/pax-nil-sparse-data.tar differ diff --git a/src/archive/tar/testdata/ustar.issue12594.tar b/src/archive/tar/testdata/pax-nil-sparse-hole.tar similarity index 70% rename from src/archive/tar/testdata/ustar.issue12594.tar rename to src/archive/tar/testdata/pax-nil-sparse-hole.tar index 50fcd009760..b44327bdbfb 100644 Binary files a/src/archive/tar/testdata/ustar.issue12594.tar and b/src/archive/tar/testdata/pax-nil-sparse-hole.tar differ diff --git a/src/archive/tar/testdata/pax-nul-path.tar b/src/archive/tar/testdata/pax-nul-path.tar new file mode 100644 index 00000000000..c78f82b16e8 Binary files /dev/null and b/src/archive/tar/testdata/pax-nul-path.tar differ diff --git a/src/archive/tar/testdata/pax-nul-xattrs.tar b/src/archive/tar/testdata/pax-nul-xattrs.tar new file mode 100644 index 00000000000..881f51768f9 Binary files /dev/null and b/src/archive/tar/testdata/pax-nul-xattrs.tar differ diff --git a/src/archive/tar/testdata/pax-pos-size-file.tar b/src/archive/tar/testdata/pax-pos-size-file.tar index aed9a8aa48f..ea5ccf91642 100644 Binary files a/src/archive/tar/testdata/pax-pos-size-file.tar and b/src/archive/tar/testdata/pax-pos-size-file.tar differ diff --git a/src/archive/tar/testdata/pax-records.tar b/src/archive/tar/testdata/pax-records.tar new file mode 100644 index 00000000000..276c211baa3 Binary files /dev/null and b/src/archive/tar/testdata/pax-records.tar differ diff --git a/src/archive/tar/testdata/pax-sparse-big.tar b/src/archive/tar/testdata/pax-sparse-big.tar new file mode 100644 index 00000000000..65d1f8eceb0 Binary files /dev/null and b/src/archive/tar/testdata/pax-sparse-big.tar differ diff --git a/src/archive/tar/testdata/trailing-slash.tar b/src/archive/tar/testdata/trailing-slash.tar new file mode 100644 index 00000000000..bf1b2ec426b Binary files /dev/null and b/src/archive/tar/testdata/trailing-slash.tar differ diff --git a/src/archive/tar/testdata/ustar-file-devs.tar b/src/archive/tar/testdata/ustar-file-devs.tar new file mode 100644 index 00000000000..146e25b79d8 Binary files /dev/null and b/src/archive/tar/testdata/ustar-file-devs.tar differ diff --git a/src/archive/tar/testdata/writer-big-long.tar b/src/archive/tar/testdata/writer-big-long.tar index ea9bfa88bbb..09fc5dd3dd7 100644 Binary files a/src/archive/tar/testdata/writer-big-long.tar and b/src/archive/tar/testdata/writer-big-long.tar differ diff --git a/src/archive/tar/testdata/writer-big.tar b/src/archive/tar/testdata/writer-big.tar index 753e883cebf..435dcbce6ab 100644 Binary files a/src/archive/tar/testdata/writer-big.tar and b/src/archive/tar/testdata/writer-big.tar differ diff --git a/src/archive/tar/writer.go b/src/archive/tar/writer.go index c51c243a8b8..97d23f80388 100644 --- a/src/archive/tar/writer.go +++ b/src/archive/tar/writer.go @@ -4,255 +4,391 @@ package tar -// TODO(dsymonds): -// - catch more errors (no first header, etc.) - import ( "bytes" - "errors" "fmt" "io" "path" "sort" - "strconv" "strings" "time" ) -var ( - ErrWriteTooLong = errors.New("archive/tar: write too long") - ErrFieldTooLong = errors.New("archive/tar: header field too long") - ErrWriteAfterClose = errors.New("archive/tar: write after close") - errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") -) - -// A Writer provides sequential writing of a tar archive in POSIX.1 format. -// A tar archive consists of a sequence of files. -// Call WriteHeader to begin a new file, and then call Write to supply that file's data, -// writing at most hdr.Size bytes in total. +// Writer provides sequential writing of a tar archive. +// Write.WriteHeader begins a new file with the provided Header, +// and then Writer can be treated as an io.Writer to supply that file's data. type Writer struct { - w io.Writer - err error - nb int64 // number of unwritten bytes for current file entry - pad int64 // amount of padding to write after current file entry - closed bool - usedBinary bool // whether the binary numeric field extension was used - preferPax bool // use PAX header instead of binary numeric header - hdrBuff block // buffer to use in writeHeader when writing a regular header - paxHdrBuff block // buffer to use in writeHeader when writing a PAX header + w io.Writer + pad int64 // Amount of padding to write after current file entry + curr fileWriter // Writer for current file entry + hdr Header // Shallow copy of Header that is safe for mutations + blk block // Buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Writer to + // ensure that this error is sticky. + err error } // NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } +func NewWriter(w io.Writer) *Writer { + return &Writer{w: w, curr: ®FileWriter{w, 0}} +} -// Flush finishes writing the current file (optional). +type fileWriter interface { + io.Writer + fileState + + ReadFrom(io.Reader) (int64, error) +} + +// Flush finishes writing the current file's block padding. +// The current file must be fully written before Flush can be called. +// +// This is unnecessary as the next call to WriteHeader or Close +// will implicitly flush out the file's padding. func (tw *Writer) Flush() error { - if tw.nb > 0 { - tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) - return tw.err - } - - n := tw.nb + tw.pad - for n > 0 && tw.err == nil { - nr := n - if nr > blockSize { - nr = blockSize - } - var nw int - nw, tw.err = tw.w.Write(zeroBlock[0:nr]) - n -= int64(nw) - } - tw.nb = 0 - tw.pad = 0 - return tw.err -} - -var ( - minTime = time.Unix(0, 0) - // There is room for 11 octal digits (33 bits) of mtime. - maxTime = minTime.Add((1<<33 - 1) * time.Second) -) - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -func (tw *Writer) WriteHeader(hdr *Header) error { - return tw.writeHeader(hdr, true) -} - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -// As this method is called internally by writePax header to allow it to -// suppress writing the pax header. -func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { - if tw.closed { - return ErrWriteAfterClose - } - if tw.err == nil { - tw.Flush() - } if tw.err != nil { return tw.err } - - // a map to hold pax header records, if any are needed - paxHeaders := make(map[string]string) - - // TODO(dsnet): we might want to use PAX headers for - // subsecond time resolution, but for now let's just capture - // too long fields or non ascii characters - - // We need to select which scratch buffer to use carefully, - // since this method is called recursively to write PAX headers. - // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. - // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is - // already being used by the non-recursive call, so we must use paxHdrBuff. - header := &tw.hdrBuff - if !allowPax { - header = &tw.paxHdrBuff + if nb := tw.curr.LogicalRemaining(); nb > 0 { + return fmt.Errorf("archive/tar: missed writing %d bytes", nb) } - copy(header[:], zeroBlock[:]) - - // Wrappers around formatter that automatically sets paxHeaders if the - // argument extends beyond the capacity of the input byte slice. - var f formatter - var formatString = func(b []byte, s string, paxKeyword string) { - needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) - if needsPaxHeader { - paxHeaders[paxKeyword] = s - } - - // Write string in a best-effort manner to satisfy readers that expect - // the field to be non-empty. - s = toASCII(s) - if len(s) > len(b) { - s = s[:len(b)] - } - f.formatString(b, s) // Should never error - } - var formatNumeric = func(b []byte, x int64, paxKeyword string) { - // Try octal first. - s := strconv.FormatInt(x, 8) - if len(s) < len(b) { - f.formatOctal(b, x) - return - } - - // If it is too long for octal, and PAX is preferred, use a PAX header. - if paxKeyword != paxNone && tw.preferPax { - f.formatOctal(b, 0) - s := strconv.FormatInt(x, 10) - paxHeaders[paxKeyword] = s - return - } - - tw.usedBinary = true - f.formatNumeric(b, x) - } - - // Handle out of range ModTime carefully. - var modTime int64 - if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { - modTime = hdr.ModTime.Unix() - } - - v7 := header.V7() - formatString(v7.Name(), hdr.Name, paxPath) - // TODO(dsnet): The GNU format permits the mode field to be encoded in - // base-256 format. Thus, we can use formatNumeric instead of formatOctal. - f.formatOctal(v7.Mode(), hdr.Mode) - formatNumeric(v7.UID(), int64(hdr.Uid), paxUid) - formatNumeric(v7.GID(), int64(hdr.Gid), paxGid) - formatNumeric(v7.Size(), hdr.Size, paxSize) - // TODO(dsnet): Consider using PAX for finer time granularity. - formatNumeric(v7.ModTime(), modTime, paxNone) - v7.TypeFlag()[0] = hdr.Typeflag - formatString(v7.LinkName(), hdr.Linkname, paxLinkpath) - - ustar := header.USTAR() - formatString(ustar.UserName(), hdr.Uname, paxUname) - formatString(ustar.GroupName(), hdr.Gname, paxGname) - formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone) - formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone) - - // TODO(dsnet): The logic surrounding the prefix field is broken when trying - // to encode the header as GNU format. The challenge with the current logic - // is that we are unsure what format we are using at any given moment until - // we have processed *all* of the fields. The problem is that by the time - // all fields have been processed, some work has already been done to handle - // each field under the assumption that it is for one given format or - // another. In some situations, this causes the Writer to be confused and - // encode a prefix field when the format being used is GNU. Thus, producing - // an invalid tar file. - // - // As a short-term fix, we disable the logic to use the prefix field, which - // will force the badly generated GNU files to become encoded as being - // the PAX format. - // - // As an alternative fix, we could hard-code preferPax to be true. However, - // this is problematic for the following reasons: - // * The preferPax functionality is not tested at all. - // * This can result in headers that try to use both the GNU and PAX - // features at the same time, which is also wrong. - // - // The proper fix for this is to use a two-pass method: - // * The first pass simply determines what set of formats can possibly - // encode the given header. - // * The second pass actually encodes the header as that given format - // without worrying about violating the format. - // - // See the following: - // https://golang.org/issue/12594 - // https://golang.org/issue/17630 - // https://golang.org/issue/9683 - const usePrefix = false - - // try to use a ustar header when only the name is too long - _, paxPathUsed := paxHeaders[paxPath] - if usePrefix && !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { - prefix, suffix, ok := splitUSTARPath(hdr.Name) - if ok { - // Since we can encode in USTAR format, disable PAX header. - delete(paxHeaders, paxPath) - - // Update the path fields - formatString(v7.Name(), suffix, paxNone) - formatString(ustar.Prefix(), prefix, paxNone) - } - } - - if tw.usedBinary { - header.SetFormat(formatGNU) - } else { - header.SetFormat(formatUSTAR) - } - - // Check if there were any formatting errors. - if f.err != nil { - tw.err = f.err + if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { return tw.err } + tw.pad = 0 + return nil +} - if allowPax { - for k, v := range hdr.Xattrs { - paxHeaders[paxXattr+k] = v +// WriteHeader writes hdr and prepares to accept the file's contents. +// The Header.Size determines how many bytes can be written for the next file. +// If the current file is not fully written, then this returns an error. +// This implicitly flushes any padding necessary before writing the header. +func (tw *Writer) WriteHeader(hdr *Header) error { + if err := tw.Flush(); err != nil { + return err + } + tw.hdr = *hdr // Shallow copy of Header + + // Round ModTime and ignore AccessTime and ChangeTime unless + // the format is explicitly chosen. + // This ensures nominal usage of WriteHeader (without specifying the format) + // does not always result in the PAX format being chosen, which + // causes a 1KiB increase to every header. + if tw.hdr.Format == FormatUnknown { + tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) + tw.hdr.AccessTime = time.Time{} + tw.hdr.ChangeTime = time.Time{} + } + + allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() + switch { + case allowedFormats.has(FormatUSTAR): + tw.err = tw.writeUSTARHeader(&tw.hdr) + return tw.err + case allowedFormats.has(FormatPAX): + tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) + return tw.err + case allowedFormats.has(FormatGNU): + tw.err = tw.writeGNUHeader(&tw.hdr) + return tw.err + default: + return err // Non-fatal error + } +} + +func (tw *Writer) writeUSTARHeader(hdr *Header) error { + // Check if we can use USTAR prefix/suffix splitting. + var namePrefix string + if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { + namePrefix, hdr.Name = prefix, suffix + } + + // Pack the main header. + var f formatter + blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) + f.formatString(blk.USTAR().Prefix(), namePrefix) + blk.SetFormat(FormatUSTAR) + if f.err != nil { + return f.err // Should never happen since header is validated + } + return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) +} + +func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { + realName, realSize := hdr.Name, hdr.Size + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Handle sparse files. + var spd sparseDatas + var spb []byte + if len(hdr.SparseHoles) > 0 { + sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + hdr.Size = 0 // Replace with encoded size + spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') + for _, s := range spd { + hdr.Size += s.Length + spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') + spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') + } + pad := blockPadding(int64(len(spb))) + spb = append(spb, zeroBlock[:pad]...) + hdr.Size += int64(len(spb)) // Accounts for encoded sparse map + + // Add and modify appropriate PAX records. + dir, file := path.Split(realName) + hdr.Name = path.Join(dir, "GNUSparseFile.0", file) + paxHdrs[paxGNUSparseMajor] = "1" + paxHdrs[paxGNUSparseMinor] = "0" + paxHdrs[paxGNUSparseName] = realName + paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) + paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) + delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName + } + */ + _ = realSize + + // Write PAX records to the output. + isGlobal := hdr.Typeflag == TypeXGlobalHeader + if len(paxHdrs) > 0 || isGlobal { + // Sort keys for deterministic ordering. + var keys []string + for k := range paxHdrs { + keys = append(keys, k) + } + sort.Strings(keys) + + // Write each record to a buffer. + var buf bytes.Buffer + for _, k := range keys { + rec, err := formatPAXRecord(k, paxHdrs[k]) + if err != nil { + return err + } + buf.WriteString(rec) + } + + // Write the extended header file. + var name string + var flag byte + if isGlobal { + name = realName + if name == "" { + name = "GlobalHead.0.0" + } + flag = TypeXGlobalHeader + } else { + dir, file := path.Split(realName) + name = path.Join(dir, "PaxHeaders.0", file) + flag = TypeXHeader + } + data := buf.String() + if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { + return err // Global headers return here } } - if len(paxHeaders) > 0 { - if !allowPax { - return errInvalidHeader + // Pack the main header. + var f formatter // Ignore errors since they are expected + fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } + blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) + blk.SetFormat(FormatPAX) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Write the sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.curr since the sparse map is accounted for in hdr.Size. + if _, err := tw.curr.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} } - if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + */ + return nil +} + +func (tw *Writer) writeGNUHeader(hdr *Header) error { + // Use long-link files if Name or Linkname exceeds the field size. + const longName = "././@LongLink" + if len(hdr.Name) > nameSize { + data := hdr.Name + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { + return err + } + } + if len(hdr.Linkname) > nameSize { + data := hdr.Linkname + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { return err } } - tw.nb = hdr.Size - tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize - _, tw.err = tw.w.Write(header[:]) - return tw.err + // Pack the main header. + var f formatter // Ignore errors since they are expected + var spd sparseDatas + var spb []byte + blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) + if !hdr.AccessTime.IsZero() { + f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) + } + if !hdr.ChangeTime.IsZero() { + f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) + } + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + if hdr.Typeflag == TypeGNUSparse { + sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { + for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { + f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) + f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) + sp = sp[1:] + } + if len(sp) > 0 { + sa.IsExtended()[0] = 1 + } + return sp + } + sp2 := formatSPD(spd, blk.GNU().Sparse()) + for len(sp2) > 0 { + var spHdr block + sp2 = formatSPD(sp2, spHdr.Sparse()) + spb = append(spb, spHdr[:]...) + } + + // Update size fields in the header block. + realSize := hdr.Size + hdr.Size = 0 // Encoded size; does not account for encoded sparse map + for _, s := range spd { + hdr.Size += s.Length + } + copy(blk.V7().Size(), zeroBlock[:]) // Reset field + f.formatNumeric(blk.V7().Size(), hdr.Size) + f.formatNumeric(blk.GNU().RealSize(), realSize) + } + */ + blk.SetFormat(FormatGNU) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // Write the extended sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.w since the sparse map is not accounted for in hdr.Size. + if _, err := tw.w.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + return nil +} + +type ( + stringFormatter func([]byte, string) + numberFormatter func([]byte, int64) +) + +// templateV7Plus fills out the V7 fields of a block using values from hdr. +// It also fills out fields (uname, gname, devmajor, devminor) that are +// shared in the USTAR, PAX, and GNU formats using the provided formatters. +// +// The block returned is only valid until the next call to +// templateV7Plus or writeRawFile. +func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { + tw.blk.Reset() + + modTime := hdr.ModTime + if modTime.IsZero() { + modTime = time.Unix(0, 0) + } + + v7 := tw.blk.V7() + v7.TypeFlag()[0] = hdr.Typeflag + fmtStr(v7.Name(), hdr.Name) + fmtStr(v7.LinkName(), hdr.Linkname) + fmtNum(v7.Mode(), hdr.Mode) + fmtNum(v7.UID(), int64(hdr.Uid)) + fmtNum(v7.GID(), int64(hdr.Gid)) + fmtNum(v7.Size(), hdr.Size) + fmtNum(v7.ModTime(), modTime.Unix()) + + ustar := tw.blk.USTAR() + fmtStr(ustar.UserName(), hdr.Uname) + fmtStr(ustar.GroupName(), hdr.Gname) + fmtNum(ustar.DevMajor(), hdr.Devmajor) + fmtNum(ustar.DevMinor(), hdr.Devminor) + + return &tw.blk +} + +// writeRawFile writes a minimal file with the given name and flag type. +// It uses format to encode the header format and will write data as the body. +// It uses default values for all of the other fields (as BSD and GNU tar does). +func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { + tw.blk.Reset() + + // Best effort for the filename. + name = toASCII(name) + if len(name) > nameSize { + name = name[:nameSize] + } + name = strings.TrimRight(name, "/") + + var f formatter + v7 := tw.blk.V7() + v7.TypeFlag()[0] = flag + f.formatString(v7.Name(), name) + f.formatOctal(v7.Mode(), 0) + f.formatOctal(v7.UID(), 0) + f.formatOctal(v7.GID(), 0) + f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB + f.formatOctal(v7.ModTime(), 0) + tw.blk.SetFormat(format) + if f.err != nil { + return f.err // Only occurs if size condition is violated + } + + // Write the header and data. + if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { + return err + } + _, err := io.WriteString(tw, data) + return err +} + +// writeRawHeader writes the value of blk, regardless of its value. +// It sets up the Writer such that it can accept a file of the given size. +// If the flag is a special header-only flag, then the size is treated as zero. +func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { + if err := tw.Flush(); err != nil { + return err + } + if _, err := tw.w.Write(blk[:]); err != nil { + return err + } + if isHeaderOnlyType(flag) { + size = 0 + } + tw.curr = ®FileWriter{tw.w, size} + tw.pad = blockPadding(size) + return nil } // splitUSTARPath splits a path according to USTAR prefix and suffix rules. @@ -276,95 +412,233 @@ func splitUSTARPath(name string) (prefix, suffix string, ok bool) { return name[:i], name[i+1:], true } -// writePaxHeader writes an extended pax header to the -// archive. -func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { - // Prepare extended header - ext := new(Header) - ext.Typeflag = TypeXHeader - // Setting ModTime is required for reader parsing to - // succeed, and seems harmless enough. - ext.ModTime = hdr.ModTime - // The spec asks that we namespace our pseudo files - // with the current pid. However, this results in differing outputs - // for identical inputs. As such, the constant 0 is now used instead. - // golang.org/issue/12358 - dir, file := path.Split(hdr.Name) - fullName := path.Join(dir, "PaxHeaders.0", file) - - ascii := toASCII(fullName) - if len(ascii) > nameSize { - ascii = ascii[:nameSize] - } - ext.Name = ascii - // Construct the body - var buf bytes.Buffer - - // Keys are sorted before writing to body to allow deterministic output. - keys := make([]string, 0, len(paxHeaders)) - for k := range paxHeaders { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) - } - - ext.Size = int64(len(buf.Bytes())) - if err := tw.writeHeader(ext, false); err != nil { - return err - } - if _, err := tw.Write(buf.Bytes()); err != nil { - return err - } - if err := tw.Flush(); err != nil { - return err - } - return nil -} - -// Write writes to the current entry in the tar archive. +// Write writes to the current file in the tar archive. // Write returns the error ErrWriteTooLong if more than -// hdr.Size bytes are written after WriteHeader. -func (tw *Writer) Write(b []byte) (n int, err error) { - if tw.closed { - err = ErrWriteAfterClose - return +// Header.Size bytes are written after WriteHeader. +// +// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless +// of what the Header.Size claims. +func (tw *Writer) Write(b []byte) (int, error) { + if tw.err != nil { + return 0, tw.err } - overwrite := false - if int64(len(b)) > tw.nb { - b = b[0:tw.nb] - overwrite = true + n, err := tw.curr.Write(b) + if err != nil && err != ErrWriteTooLong { + tw.err = err } - n, err = tw.w.Write(b) - tw.nb -= int64(n) - if err == nil && overwrite { - err = ErrWriteTooLong - return - } - tw.err = err - return + return n, err } -// Close closes the tar archive, flushing any unwritten -// data to the underlying writer. -func (tw *Writer) Close() error { - if tw.err != nil || tw.closed { - return tw.err +// readFrom populates the content of the current file by reading from r. +// The bytes read must match the number of remaining bytes in the current file. +// +// If the current file is sparse and r is an io.ReadSeeker, +// then readFrom uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are all NULs. +// This always reads the last byte to ensure r is the right size. +// +// TODO(dsnet): Re-export this when adding sparse file support. +// See https://golang.org/issue/22735 +func (tw *Writer) readFrom(r io.Reader) (int64, error) { + if tw.err != nil { + return 0, tw.err + } + n, err := tw.curr.ReadFrom(r) + if err != nil && err != ErrWriteTooLong { + tw.err = err + } + return n, err +} + +// Close closes the tar archive by flushing the padding, and writing the footer. +// If the current file (from a prior call to WriteHeader) is not fully written, +// then this returns an error. +func (tw *Writer) Close() error { + if tw.err == ErrWriteAfterClose { + return nil } - tw.Flush() - tw.closed = true if tw.err != nil { return tw.err } - // trailer: two zero blocks - for i := 0; i < 2; i++ { - _, tw.err = tw.w.Write(zeroBlock[:]) - if tw.err != nil { - break + // Trailer: two zero blocks. + err := tw.Flush() + for i := 0; i < 2 && err == nil; i++ { + _, err = tw.w.Write(zeroBlock[:]) + } + + // Ensure all future actions are invalid. + tw.err = ErrWriteAfterClose + return err // Report IO errors +} + +// regFileWriter is a fileWriter for writing data to a regular file entry. +type regFileWriter struct { + w io.Writer // Underlying Writer + nb int64 // Number of remaining bytes to write +} + +func (fw *regFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > fw.nb + if overwrite { + b = b[:fw.nb] + } + if len(b) > 0 { + n, err = fw.w.Write(b) + fw.nb -= int64(n) + } + switch { + case err != nil: + return n, err + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(struct{ io.Writer }{fw}, r) +} + +func (fw regFileWriter) LogicalRemaining() int64 { + return fw.nb +} +func (fw regFileWriter) PhysicalRemaining() int64 { + return fw.nb +} + +// sparseFileWriter is a fileWriter for writing data to a sparse file entry. +type sparseFileWriter struct { + fw fileWriter // Underlying fileWriter + sp sparseDatas // Normalized list of data fragments + pos int64 // Current position in sparse file +} + +func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > sw.LogicalRemaining() + if overwrite { + b = b[:sw.LogicalRemaining()] + } + + b0 := b + endPos := sw.pos + int64(len(b)) + for endPos > sw.pos && err == nil { + var nf int // Bytes written in fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + bf := b[:min(int64(len(b)), dataStart-sw.pos)] + nf, err = zeroWriter{}.Write(bf) + } else { // In a data fragment + bf := b[:min(int64(len(b)), dataEnd-sw.pos)] + nf, err = sw.fw.Write(bf) + } + b = b[nf:] + sw.pos += int64(nf) + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains } } - return tw.err + + n = len(b0) - len(b) + switch { + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { + rs, ok := r.(io.ReadSeeker) + if ok { + if _, err := rs.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(struct{ io.Writer }{sw}, r) + } + + var readLastByte bool + pos0 := sw.pos + for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { + var nf int64 // Size of fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + nf = dataStart - sw.pos + if sw.PhysicalRemaining() == 0 { + readLastByte = true + nf-- + } + _, err = rs.Seek(nf, io.SeekCurrent) + } else { // In a data fragment + nf = dataEnd - sw.pos + nf, err = io.CopyN(sw.fw, rs, nf) + } + sw.pos += nf + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // read a single byte to ensure the file is the right size. + if readLastByte && err == nil { + _, err = mustReadFull(rs, []byte{0}) + sw.pos++ + } + + n = sw.pos - pos0 + switch { + case err == io.EOF: + return n, io.ErrUnexpectedEOF + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + default: + return n, ensureEOF(rs) + } +} + +func (sw sparseFileWriter) LogicalRemaining() int64 { + return sw.sp[len(sw.sp)-1].endOffset() - sw.pos +} +func (sw sparseFileWriter) PhysicalRemaining() int64 { + return sw.fw.PhysicalRemaining() +} + +// zeroWriter may only be written with NULs, otherwise it returns errWriteHole. +type zeroWriter struct{} + +func (zeroWriter) Write(b []byte) (int, error) { + for i, c := range b { + if c != 0 { + return i, errWriteHole + } + } + return len(b), nil +} + +// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so. +func ensureEOF(r io.Reader) error { + n, err := tryReadFull(r, []byte{0}) + switch { + case n > 0: + return ErrWriteTooLong + case err == io.EOF: + return nil + default: + return err + } } diff --git a/src/archive/tar/writer_test.go b/src/archive/tar/writer_test.go index d88b8f41ca8..24e8da271c2 100644 --- a/src/archive/tar/writer_test.go +++ b/src/archive/tar/writer_test.go @@ -6,10 +6,12 @@ package tar import ( "bytes" - "fmt" + "encoding/hex" + "errors" "io" "io/ioutil" "os" + "path" "reflect" "sort" "strings" @@ -18,120 +20,127 @@ import ( "time" ) -// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection. -func bytestr(offset int, b []byte) string { - const rowLen = 32 - s := fmt.Sprintf("%04x ", offset) - for _, ch := range b { - switch { - case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z': - s += fmt.Sprintf(" %c", ch) - default: - s += fmt.Sprintf(" %02x", ch) +func bytediff(a, b []byte) string { + const ( + uniqueA = "- " + uniqueB = "+ " + identity = " " + ) + var ss []string + sa := strings.Split(strings.TrimSpace(hex.Dump(a)), "\n") + sb := strings.Split(strings.TrimSpace(hex.Dump(b)), "\n") + for len(sa) > 0 && len(sb) > 0 { + if sa[0] == sb[0] { + ss = append(ss, identity+sa[0]) + } else { + ss = append(ss, uniqueA+sa[0]) + ss = append(ss, uniqueB+sb[0]) } + sa, sb = sa[1:], sb[1:] } - return s -} - -// Render a pseudo-diff between two blocks of bytes. -func bytediff(a []byte, b []byte) string { - const rowLen = 32 - s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b)) - for offset := 0; len(a)+len(b) > 0; offset += rowLen { - na, nb := rowLen, rowLen - if na > len(a) { - na = len(a) - } - if nb > len(b) { - nb = len(b) - } - sa := bytestr(offset, a[0:na]) - sb := bytestr(offset, b[0:nb]) - if sa != sb { - s += fmt.Sprintf("-%v\n+%v\n", sa, sb) - } - a = a[na:] - b = b[nb:] + for len(sa) > 0 { + ss = append(ss, uniqueA+sa[0]) + sa = sa[1:] } - return s + for len(sb) > 0 { + ss = append(ss, uniqueB+sb[0]) + sb = sb[1:] + } + return strings.Join(ss, "\n") } func TestWriter(t *testing.T) { - type entry struct { - header *Header - contents string - } + type ( + testHeader struct { // WriteHeader(hdr) == wantErr + hdr Header + wantErr error + } + testWrite struct { // Write(str) == (wantCnt, wantErr) + str string + wantCnt int + wantErr error + } + testReadFrom struct { // ReadFrom(testFile{ops}) == (wantCnt, wantErr) + ops fileOps + wantCnt int64 + wantErr error + } + testClose struct { // Close() == wantErr + wantErr error + } + testFnc interface{} // testHeader | testWrite | testReadFrom | testClose + ) vectors := []struct { - file string // filename of expected output - entries []*entry + file string // Optional filename of expected output + tests []testFnc }{{ // The writer test file was produced with this command: // tar (GNU tar) 1.26 // ln -s small.txt link.txt // tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt file: "testdata/writer.tar", - entries: []*entry{{ - header: &Header{ + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, Name: "small.txt", - Mode: 0640, - Uid: 73025, - Gid: 5000, Size: 5, - ModTime: time.Unix(1246508266, 0), - Typeflag: '0', - Uname: "dsymonds", - Gname: "eng", - }, - contents: "Kilts", - }, { - header: &Header{ - Name: "small2.txt", Mode: 0640, Uid: 73025, Gid: 5000, - Size: 11, - ModTime: time.Unix(1245217492, 0), - Typeflag: '0', Uname: "dsymonds", Gname: "eng", - }, - contents: "Google.com\n", - }, { - header: &Header{ + ModTime: time.Unix(1246508266, 0), + }, nil}, + testWrite{"Kilts", 5, nil}, + + testHeader{Header{ + Typeflag: TypeReg, + Name: "small2.txt", + Size: 11, + Mode: 0640, + Uid: 73025, + Uname: "dsymonds", + Gname: "eng", + Gid: 5000, + ModTime: time.Unix(1245217492, 0), + }, nil}, + testWrite{"Google.com\n", 11, nil}, + + testHeader{Header{ + Typeflag: TypeSymlink, Name: "link.txt", + Linkname: "small.txt", Mode: 0777, Uid: 1000, Gid: 1000, - Size: 0, - ModTime: time.Unix(1314603082, 0), - Typeflag: '2', - Linkname: "small.txt", Uname: "strings", Gname: "strings", - }, - // no contents - }}, + ModTime: time.Unix(1314603082, 0), + }, nil}, + testWrite{"", 0, nil}, + + testClose{nil}, + }, }, { // The truncated test file was produced using these commands: // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar file: "testdata/writer-big.tar", - entries: []*entry{{ - header: &Header{ + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, Name: "tmp/16gig.txt", + Size: 16 << 30, Mode: 0640, Uid: 73025, Gid: 5000, - Size: 16 << 30, - ModTime: time.Unix(1254699560, 0), - Typeflag: '0', Uname: "dsymonds", Gname: "eng", - }, - // fake contents - contents: strings.Repeat("\x00", 4<<10), - }}, + ModTime: time.Unix(1254699560, 0), + Format: FormatGNU, + }, nil}, + }, }, { // This truncated file was produced using this library. // It was verified to work with GNU tar 1.27.1 and BSD tar 3.1.2. @@ -141,117 +150,377 @@ func TestWriter(t *testing.T) { // // This file is in PAX format. file: "testdata/writer-big-long.tar", - entries: []*entry{{ - header: &Header{ + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, Name: strings.Repeat("longname/", 15) + "16gig.txt", + Size: 16 << 30, Mode: 0644, Uid: 1000, Gid: 1000, - Size: 16 << 30, - ModTime: time.Unix(1399583047, 0), - Typeflag: '0', Uname: "guillaume", Gname: "guillaume", - }, - // fake contents - contents: strings.Repeat("\x00", 4<<10), - }}, + ModTime: time.Unix(1399583047, 0), + }, nil}, + }, }, { - // TODO(dsnet): The Writer output should match the following file. - // To fix an issue (see https://golang.org/issue/12594), we disabled - // prefix support, which alters the generated output. - /* - // This file was produced using gnu tar 1.17 - // gnutar -b 4 --format=ustar (longname/)*15 + file.txt - file: "testdata/ustar.tar" - */ - file: "testdata/ustar.issue12594.tar", // This is a valid tar file, but not expected - entries: []*entry{{ - header: &Header{ + // This file was produced using GNU tar v1.17. + // gnutar -b 4 --format=ustar (longname/)*15 + file.txt + file: "testdata/ustar.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, Name: strings.Repeat("longname/", 15) + "file.txt", + Size: 6, Mode: 0644, - Uid: 0765, - Gid: 024, - Size: 06, - ModTime: time.Unix(1360135598, 0), - Typeflag: '0', + Uid: 501, + Gid: 20, Uname: "shane", Gname: "staff", - }, - contents: "hello\n", - }}, + ModTime: time.Unix(1360135598, 0), + }, nil}, + testWrite{"hello\n", 6, nil}, + testClose{nil}, + }, }, { - // This file was produced using gnu tar 1.26 - // echo "Slartibartfast" > file.txt - // ln file.txt hard.txt - // tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt + // This file was produced using GNU tar v1.26: + // echo "Slartibartfast" > file.txt + // ln file.txt hard.txt + // tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt file: "testdata/hardlink.tar", - entries: []*entry{{ - header: &Header{ + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, Name: "file.txt", - Mode: 0644, - Uid: 1000, - Gid: 100, Size: 15, - ModTime: time.Unix(1425484303, 0), - Typeflag: '0', - Uname: "vbatts", - Gname: "users", - }, - contents: "Slartibartfast\n", - }, { - header: &Header{ - Name: "hard.txt", Mode: 0644, Uid: 1000, Gid: 100, - Size: 0, - ModTime: time.Unix(1425484303, 0), - Typeflag: '1', - Linkname: "file.txt", Uname: "vbatts", Gname: "users", - }, - // no contents - }}, + ModTime: time.Unix(1425484303, 0), + }, nil}, + testWrite{"Slartibartfast\n", 15, nil}, + + testHeader{Header{ + Typeflag: TypeLink, + Name: "hard.txt", + Linkname: "file.txt", + Mode: 0644, + Uid: 1000, + Gid: 100, + Uname: "vbatts", + Gname: "users", + ModTime: time.Unix(1425484303, 0), + }, nil}, + testWrite{"", 0, nil}, + + testClose{nil}, + }, + }, { + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, + Name: "bad-null.txt", + Xattrs: map[string]string{"null\x00null\x00": "fizzbuzz"}, + }, headerError{}}, + }, + }, { + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, + Name: "null\x00.txt", + }, headerError{}}, + }, + }, { + file: "testdata/pax-records.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, + Name: "file", + Uname: strings.Repeat("long", 10), + PAXRecords: map[string]string{ + "path": "FILE", // Should be ignored + "GNU.sparse.map": "0,0", // Should be ignored + "comment": "Hello, 世界", + "GOLANG.pkg": "tar", + }, + }, nil}, + testClose{nil}, + }, + }, { + // Craft a theoretically valid PAX archive with global headers. + // The GNU and BSD tar tools do not parse these the same way. + // + // BSD tar v3.1.2 parses and ignores all global headers; + // the behavior is verified by researching the source code. + // + // $ bsdtar -tvf pax-global-records.tar + // ---------- 0 0 0 0 Dec 31 1969 file1 + // ---------- 0 0 0 0 Dec 31 1969 file2 + // ---------- 0 0 0 0 Dec 31 1969 file3 + // ---------- 0 0 0 0 May 13 2014 file4 + // + // GNU tar v1.27.1 applies global headers to subsequent records, + // but does not do the following properly: + // * It does not treat an empty record as deletion. + // * It does not use subsequent global headers to update previous ones. + // + // $ gnutar -tvf pax-global-records.tar + // ---------- 0/0 0 2017-07-13 19:40 global1 + // ---------- 0/0 0 2017-07-13 19:40 file2 + // gnutar: Substituting `.' for empty member name + // ---------- 0/0 0 1969-12-31 16:00 + // gnutar: Substituting `.' for empty member name + // ---------- 0/0 0 2014-05-13 09:53 + // + // According to the PAX specification, this should have been the result: + // ---------- 0/0 0 2017-07-13 19:40 global1 + // ---------- 0/0 0 2017-07-13 19:40 file2 + // ---------- 0/0 0 2017-07-13 19:40 file3 + // ---------- 0/0 0 2014-05-13 09:53 file4 + file: "testdata/pax-global-records.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeXGlobalHeader, + PAXRecords: map[string]string{"path": "global1", "mtime": "1500000000.0"}, + }, nil}, + testHeader{Header{ + Typeflag: TypeReg, Name: "file1", + }, nil}, + testHeader{Header{ + Typeflag: TypeReg, + Name: "file2", + PAXRecords: map[string]string{"path": "file2"}, + }, nil}, + testHeader{Header{ + Typeflag: TypeXGlobalHeader, + PAXRecords: map[string]string{"path": ""}, // Should delete "path", but keep "mtime" + }, nil}, + testHeader{Header{ + Typeflag: TypeReg, Name: "file3", + }, nil}, + testHeader{Header{ + Typeflag: TypeReg, + Name: "file4", + ModTime: time.Unix(1400000000, 0), + PAXRecords: map[string]string{"mtime": "1400000000"}, + }, nil}, + testClose{nil}, + }, + }, { + file: "testdata/gnu-utf8.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, + Name: "☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹☺☻☹", + Mode: 0644, + Uid: 1000, Gid: 1000, + Uname: "☺", + Gname: "⚹", + ModTime: time.Unix(0, 0), + Format: FormatGNU, + }, nil}, + testClose{nil}, + }, + }, { + file: "testdata/gnu-not-utf8.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, + Name: "hi\x80\x81\x82\x83bye", + Mode: 0644, + Uid: 1000, + Gid: 1000, + Uname: "rawr", + Gname: "dsnet", + ModTime: time.Unix(0, 0), + Format: FormatGNU, + }, nil}, + testClose{nil}, + }, + // TODO(dsnet): Re-enable this test when adding sparse support. + // See https://golang.org/issue/22735 + /* + }, { + file: "testdata/gnu-nil-sparse-data.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeGNUSparse, + Name: "sparse.db", + Size: 1000, + SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}}, + }, nil}, + testWrite{strings.Repeat("0123456789", 100), 1000, nil}, + testClose{}, + }, + }, { + file: "testdata/gnu-nil-sparse-hole.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeGNUSparse, + Name: "sparse.db", + Size: 1000, + SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}}, + }, nil}, + testWrite{strings.Repeat("\x00", 1000), 1000, nil}, + testClose{}, + }, + }, { + file: "testdata/pax-nil-sparse-data.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, + Name: "sparse.db", + Size: 1000, + SparseHoles: []sparseEntry{{Offset: 1000, Length: 0}}, + }, nil}, + testWrite{strings.Repeat("0123456789", 100), 1000, nil}, + testClose{}, + }, + }, { + file: "testdata/pax-nil-sparse-hole.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, + Name: "sparse.db", + Size: 1000, + SparseHoles: []sparseEntry{{Offset: 0, Length: 1000}}, + }, nil}, + testWrite{strings.Repeat("\x00", 1000), 1000, nil}, + testClose{}, + }, + }, { + file: "testdata/gnu-sparse-big.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeGNUSparse, + Name: "gnu-sparse", + Size: 6e10, + SparseHoles: []sparseEntry{ + {Offset: 0e10, Length: 1e10 - 100}, + {Offset: 1e10, Length: 1e10 - 100}, + {Offset: 2e10, Length: 1e10 - 100}, + {Offset: 3e10, Length: 1e10 - 100}, + {Offset: 4e10, Length: 1e10 - 100}, + {Offset: 5e10, Length: 1e10 - 100}, + }, + }, nil}, + testReadFrom{fileOps{ + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + }, 6e10, nil}, + testClose{nil}, + }, + }, { + file: "testdata/pax-sparse-big.tar", + tests: []testFnc{ + testHeader{Header{ + Typeflag: TypeReg, + Name: "pax-sparse", + Size: 6e10, + SparseHoles: []sparseEntry{ + {Offset: 0e10, Length: 1e10 - 100}, + {Offset: 1e10, Length: 1e10 - 100}, + {Offset: 2e10, Length: 1e10 - 100}, + {Offset: 3e10, Length: 1e10 - 100}, + {Offset: 4e10, Length: 1e10 - 100}, + {Offset: 5e10, Length: 1e10 - 100}, + }, + }, nil}, + testReadFrom{fileOps{ + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + int64(1e10 - blockSize), + strings.Repeat("\x00", blockSize-100) + strings.Repeat("0123456789", 10), + }, 6e10, nil}, + testClose{nil}, + }, + */ + }, { + file: "testdata/trailing-slash.tar", + tests: []testFnc{ + testHeader{Header{Name: strings.Repeat("123456789/", 30)}, nil}, + testClose{nil}, + }, }} -testLoop: - for i, v := range vectors { - expected, err := ioutil.ReadFile(v.file) - if err != nil { - t.Errorf("test %d: Unexpected error: %v", i, err) - continue + equalError := func(x, y error) bool { + _, ok1 := x.(headerError) + _, ok2 := y.(headerError) + if ok1 || ok2 { + return ok1 && ok2 } + return x == y + } + for _, v := range vectors { + t.Run(path.Base(v.file), func(t *testing.T) { + const maxSize = 10 << 10 // 10KiB + buf := new(bytes.Buffer) + tw := NewWriter(iotest.TruncateWriter(buf, maxSize)) - buf := new(bytes.Buffer) - tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB - big := false - for j, entry := range v.entries { - big = big || entry.header.Size > 1<<10 - if err := tw.WriteHeader(entry.header); err != nil { - t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err) - continue testLoop + for i, tf := range v.tests { + switch tf := tf.(type) { + case testHeader: + err := tw.WriteHeader(&tf.hdr) + if !equalError(err, tf.wantErr) { + t.Fatalf("test %d, WriteHeader() = %v, want %v", i, err, tf.wantErr) + } + case testWrite: + got, err := tw.Write([]byte(tf.str)) + if got != tf.wantCnt || !equalError(err, tf.wantErr) { + t.Fatalf("test %d, Write() = (%d, %v), want (%d, %v)", i, got, err, tf.wantCnt, tf.wantErr) + } + case testReadFrom: + f := &testFile{ops: tf.ops} + got, err := tw.readFrom(f) + if _, ok := err.(testError); ok { + t.Errorf("test %d, ReadFrom(): %v", i, err) + } else if got != tf.wantCnt || !equalError(err, tf.wantErr) { + t.Errorf("test %d, ReadFrom() = (%d, %v), want (%d, %v)", i, got, err, tf.wantCnt, tf.wantErr) + } + if len(f.ops) > 0 { + t.Errorf("test %d, expected %d more operations", i, len(f.ops)) + } + case testClose: + err := tw.Close() + if !equalError(err, tf.wantErr) { + t.Fatalf("test %d, Close() = %v, want %v", i, err, tf.wantErr) + } + default: + t.Fatalf("test %d, unknown test operation: %T", i, tf) + } } - if _, err := io.WriteString(tw, entry.contents); err != nil { - t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err) - continue testLoop - } - } - // Only interested in Close failures for the small tests. - if err := tw.Close(); err != nil && !big { - t.Errorf("test %d: Failed closing archive: %v", i, err) - continue testLoop - } - actual := buf.Bytes() - if !bytes.Equal(expected, actual) { - t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v", - i, bytediff(expected, actual)) - } - if testing.Short() { // The second test is expensive. - break - } + if v.file != "" { + want, err := ioutil.ReadFile(v.file) + if err != nil { + t.Fatalf("ReadFile() = %v, want nil", err) + } + got := buf.Bytes() + if !bytes.Equal(want, got) { + t.Fatalf("incorrect result: (-got +want)\n%v", bytediff(got, want)) + } + } + }) } } @@ -546,21 +815,104 @@ func TestValidTypeflagWithPAXHeader(t *testing.T) { } } -func TestWriteAfterClose(t *testing.T) { - var buffer bytes.Buffer - tw := NewWriter(&buffer) +// failOnceWriter fails exactly once and then always reports success. +type failOnceWriter bool - hdr := &Header{ - Name: "small.txt", - Size: 5, - } - if err := tw.WriteHeader(hdr); err != nil { - t.Fatalf("Failed to write header: %s", err) - } - tw.Close() - if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose { - t.Fatalf("Write: got %v; want ErrWriteAfterClose", err) +func (w *failOnceWriter) Write(b []byte) (int, error) { + if !*w { + return 0, io.ErrShortWrite } + *w = true + return len(b), nil +} + +func TestWriterErrors(t *testing.T) { + t.Run("HeaderOnly", func(t *testing.T) { + tw := NewWriter(new(bytes.Buffer)) + hdr := &Header{Name: "dir/", Typeflag: TypeDir} + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("WriteHeader() = %v, want nil", err) + } + if _, err := tw.Write([]byte{0x00}); err != ErrWriteTooLong { + t.Fatalf("Write() = %v, want %v", err, ErrWriteTooLong) + } + }) + + t.Run("NegativeSize", func(t *testing.T) { + tw := NewWriter(new(bytes.Buffer)) + hdr := &Header{Name: "small.txt", Size: -1} + if err := tw.WriteHeader(hdr); err == nil { + t.Fatalf("WriteHeader() = nil, want non-nil error") + } + }) + + t.Run("BeforeHeader", func(t *testing.T) { + tw := NewWriter(new(bytes.Buffer)) + if _, err := tw.Write([]byte("Kilts")); err != ErrWriteTooLong { + t.Fatalf("Write() = %v, want %v", err, ErrWriteTooLong) + } + }) + + t.Run("AfterClose", func(t *testing.T) { + tw := NewWriter(new(bytes.Buffer)) + hdr := &Header{Name: "small.txt"} + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("WriteHeader() = %v, want nil", err) + } + if err := tw.Close(); err != nil { + t.Fatalf("Close() = %v, want nil", err) + } + if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose { + t.Fatalf("Write() = %v, want %v", err, ErrWriteAfterClose) + } + if err := tw.Flush(); err != ErrWriteAfterClose { + t.Fatalf("Flush() = %v, want %v", err, ErrWriteAfterClose) + } + if err := tw.Close(); err != nil { + t.Fatalf("Close() = %v, want nil", err) + } + }) + + t.Run("PrematureFlush", func(t *testing.T) { + tw := NewWriter(new(bytes.Buffer)) + hdr := &Header{Name: "small.txt", Size: 5} + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("WriteHeader() = %v, want nil", err) + } + if err := tw.Flush(); err == nil { + t.Fatalf("Flush() = %v, want non-nil error", err) + } + }) + + t.Run("PrematureClose", func(t *testing.T) { + tw := NewWriter(new(bytes.Buffer)) + hdr := &Header{Name: "small.txt", Size: 5} + if err := tw.WriteHeader(hdr); err != nil { + t.Fatalf("WriteHeader() = %v, want nil", err) + } + if err := tw.Close(); err == nil { + t.Fatalf("Close() = %v, want non-nil error", err) + } + }) + + t.Run("Persistence", func(t *testing.T) { + tw := NewWriter(new(failOnceWriter)) + if err := tw.WriteHeader(&Header{}); err != io.ErrShortWrite { + t.Fatalf("WriteHeader() = %v, want %v", err, io.ErrShortWrite) + } + if err := tw.WriteHeader(&Header{Name: "small.txt"}); err == nil { + t.Errorf("WriteHeader() = got %v, want non-nil error", err) + } + if _, err := tw.Write(nil); err == nil { + t.Errorf("Write() = %v, want non-nil error", err) + } + if err := tw.Flush(); err == nil { + t.Errorf("Flush() = %v, want non-nil error", err) + } + if err := tw.Close(); err == nil { + t.Errorf("Close() = %v, want non-nil error", err) + } + }) } func TestSplitUSTARPath(t *testing.T) { @@ -631,7 +983,7 @@ func TestIssue12594(t *testing.T) { if i := strings.IndexByte(prefix, 0); i >= 0 { prefix = prefix[:i] // Truncate at the NUL terminator } - if blk.GetFormat() == formatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) { + if blk.GetFormat() == FormatGNU && len(prefix) > 0 && strings.HasPrefix(name, prefix) { t.Errorf("test %d, found prefix in GNU format: %s", i, prefix) } @@ -645,3 +997,306 @@ func TestIssue12594(t *testing.T) { } } } + +// testNonEmptyWriter wraps an io.Writer and ensures that +// Write is never called with an empty buffer. +type testNonEmptyWriter struct{ io.Writer } + +func (w testNonEmptyWriter) Write(b []byte) (int, error) { + if len(b) == 0 { + return 0, errors.New("unexpected empty Write call") + } + return w.Writer.Write(b) +} + +func TestFileWriter(t *testing.T) { + type ( + testWrite struct { // Write(str) == (wantCnt, wantErr) + str string + wantCnt int + wantErr error + } + testReadFrom struct { // ReadFrom(testFile{ops}) == (wantCnt, wantErr) + ops fileOps + wantCnt int64 + wantErr error + } + testRemaining struct { // LogicalRemaining() == wantLCnt, PhysicalRemaining() == wantPCnt + wantLCnt int64 + wantPCnt int64 + } + testFnc interface{} // testWrite | testReadFrom | testRemaining + ) + + type ( + makeReg struct { + size int64 + wantStr string + } + makeSparse struct { + makeReg makeReg + sph sparseHoles + size int64 + } + fileMaker interface{} // makeReg | makeSparse + ) + + vectors := []struct { + maker fileMaker + tests []testFnc + }{{ + maker: makeReg{0, ""}, + tests: []testFnc{ + testRemaining{0, 0}, + testWrite{"", 0, nil}, + testWrite{"a", 0, ErrWriteTooLong}, + testReadFrom{fileOps{""}, 0, nil}, + testReadFrom{fileOps{"a"}, 0, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeReg{1, "a"}, + tests: []testFnc{ + testRemaining{1, 1}, + testWrite{"", 0, nil}, + testWrite{"a", 1, nil}, + testWrite{"bcde", 0, ErrWriteTooLong}, + testWrite{"", 0, nil}, + testReadFrom{fileOps{""}, 0, nil}, + testReadFrom{fileOps{"a"}, 0, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeReg{5, "hello"}, + tests: []testFnc{ + testRemaining{5, 5}, + testWrite{"hello", 5, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeReg{5, "\x00\x00\x00\x00\x00"}, + tests: []testFnc{ + testRemaining{5, 5}, + testReadFrom{fileOps{"\x00\x00\x00\x00\x00"}, 5, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeReg{5, "\x00\x00\x00\x00\x00"}, + tests: []testFnc{ + testRemaining{5, 5}, + testReadFrom{fileOps{"\x00\x00\x00\x00\x00extra"}, 5, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeReg{5, "abc\x00\x00"}, + tests: []testFnc{ + testRemaining{5, 5}, + testWrite{"abc", 3, nil}, + testRemaining{2, 2}, + testReadFrom{fileOps{"\x00\x00"}, 2, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeReg{5, "\x00\x00abc"}, + tests: []testFnc{ + testRemaining{5, 5}, + testWrite{"\x00\x00", 2, nil}, + testRemaining{3, 3}, + testWrite{"abc", 3, nil}, + testReadFrom{fileOps{"z"}, 0, ErrWriteTooLong}, + testWrite{"z", 0, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testRemaining{8, 5}, + testWrite{"ab\x00\x00\x00cde", 8, nil}, + testWrite{"a", 0, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testWrite{"ab\x00\x00\x00cdez", 8, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testWrite{"ab\x00", 3, nil}, + testRemaining{5, 3}, + testWrite{"\x00\x00cde", 5, nil}, + testWrite{"a", 0, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testWrite{"ab", 2, nil}, + testRemaining{6, 3}, + testReadFrom{fileOps{int64(3), "cde"}, 6, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testReadFrom{fileOps{"ab", int64(3), "cde"}, 8, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{5, "abcde"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testReadFrom{fileOps{"ab", int64(3), "cdeX"}, 8, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testReadFrom{fileOps{"ab", int64(3), "cd"}, 7, io.ErrUnexpectedEOF}, + testRemaining{1, 0}, + }, + }, { + maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testReadFrom{fileOps{"ab", int64(3), "cde"}, 7, errMissData}, + testRemaining{1, 0}, + }, + }, { + maker: makeSparse{makeReg{6, "abcde"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testReadFrom{fileOps{"ab", int64(3), "cde"}, 8, errUnrefData}, + testRemaining{0, 1}, + }, + }, { + maker: makeSparse{makeReg{4, "abcd"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testWrite{"ab", 2, nil}, + testRemaining{6, 2}, + testWrite{"\x00\x00\x00", 3, nil}, + testRemaining{3, 2}, + testWrite{"cde", 2, errMissData}, + testRemaining{1, 0}, + }, + }, { + maker: makeSparse{makeReg{6, "abcde"}, sparseHoles{{2, 3}}, 8}, + tests: []testFnc{ + testWrite{"ab", 2, nil}, + testRemaining{6, 4}, + testWrite{"\x00\x00\x00", 3, nil}, + testRemaining{3, 4}, + testWrite{"cde", 3, errUnrefData}, + testRemaining{0, 1}, + }, + }, { + maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, + tests: []testFnc{ + testRemaining{7, 3}, + testWrite{"\x00\x00abc\x00\x00", 7, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, + tests: []testFnc{ + testRemaining{7, 3}, + testReadFrom{fileOps{int64(2), "abc", int64(1), "\x00"}, 7, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{3, ""}, sparseHoles{{0, 2}, {5, 2}}, 7}, + tests: []testFnc{ + testWrite{"abcdefg", 0, errWriteHole}, + }, + }, { + maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, + tests: []testFnc{ + testWrite{"\x00\x00abcde", 5, errWriteHole}, + }, + }, { + maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, + tests: []testFnc{ + testWrite{"\x00\x00abc\x00\x00z", 7, ErrWriteTooLong}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{3, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, + tests: []testFnc{ + testWrite{"\x00\x00", 2, nil}, + testRemaining{5, 3}, + testWrite{"abc", 3, nil}, + testRemaining{2, 0}, + testWrite{"\x00\x00", 2, nil}, + testRemaining{0, 0}, + }, + }, { + maker: makeSparse{makeReg{2, "ab"}, sparseHoles{{0, 2}, {5, 2}}, 7}, + tests: []testFnc{ + testWrite{"\x00\x00", 2, nil}, + testWrite{"abc", 2, errMissData}, + testWrite{"\x00\x00", 0, errMissData}, + }, + }, { + maker: makeSparse{makeReg{4, "abc"}, sparseHoles{{0, 2}, {5, 2}}, 7}, + tests: []testFnc{ + testWrite{"\x00\x00", 2, nil}, + testWrite{"abc", 3, nil}, + testWrite{"\x00\x00", 2, errUnrefData}, + }, + }} + + for i, v := range vectors { + var wantStr string + bb := new(bytes.Buffer) + w := testNonEmptyWriter{bb} + var fw fileWriter + switch maker := v.maker.(type) { + case makeReg: + fw = ®FileWriter{w, maker.size} + wantStr = maker.wantStr + case makeSparse: + if !validateSparseEntries(maker.sph, maker.size) { + t.Fatalf("invalid sparse map: %v", maker.sph) + } + spd := invertSparseEntries(maker.sph, maker.size) + fw = ®FileWriter{w, maker.makeReg.size} + fw = &sparseFileWriter{fw, spd, 0} + wantStr = maker.makeReg.wantStr + default: + t.Fatalf("test %d, unknown make operation: %T", i, maker) + } + + for j, tf := range v.tests { + switch tf := tf.(type) { + case testWrite: + got, err := fw.Write([]byte(tf.str)) + if got != tf.wantCnt || err != tf.wantErr { + t.Errorf("test %d.%d, Write(%s):\ngot (%d, %v)\nwant (%d, %v)", i, j, tf.str, got, err, tf.wantCnt, tf.wantErr) + } + case testReadFrom: + f := &testFile{ops: tf.ops} + got, err := fw.ReadFrom(f) + if _, ok := err.(testError); ok { + t.Errorf("test %d.%d, ReadFrom(): %v", i, j, err) + } else if got != tf.wantCnt || err != tf.wantErr { + t.Errorf("test %d.%d, ReadFrom() = (%d, %v), want (%d, %v)", i, j, got, err, tf.wantCnt, tf.wantErr) + } + if len(f.ops) > 0 { + t.Errorf("test %d.%d, expected %d more operations", i, j, len(f.ops)) + } + case testRemaining: + if got := fw.LogicalRemaining(); got != tf.wantLCnt { + t.Errorf("test %d.%d, LogicalRemaining() = %d, want %d", i, j, got, tf.wantLCnt) + } + if got := fw.PhysicalRemaining(); got != tf.wantPCnt { + t.Errorf("test %d.%d, PhysicalRemaining() = %d, want %d", i, j, got, tf.wantPCnt) + } + default: + t.Fatalf("test %d.%d, unknown test operation: %T", i, j, tf) + } + } + + if got := bb.String(); got != wantStr { + t.Fatalf("test %d, String() = %q, want %q", i, got, wantStr) + } + } +} diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go index f6c3ead3bea..1563e74dfce 100644 --- a/src/archive/zip/reader.go +++ b/src/archive/zip/reader.go @@ -13,6 +13,7 @@ import ( "hash/crc32" "io" "os" + "time" ) var ( @@ -94,7 +95,7 @@ func (z *Reader) init(r io.ReaderAt, size int64) error { // The count of files inside a zip is truncated to fit in a uint16. // Gloss over this by reading headers until we encounter - // a bad one, and then only report a ErrFormat or UnexpectedEOF if + // a bad one, and then only report an ErrFormat or UnexpectedEOF if // the file count modulo 65536 is incorrect. for { f := &File{zip: z, zipr: r, zipsize: size} @@ -280,52 +281,128 @@ func readDirectoryHeader(f *File, r io.Reader) error { f.Extra = d[filenameLen : filenameLen+extraLen] f.Comment = string(d[filenameLen+extraLen:]) + // Determine the character encoding. + utf8Valid1, utf8Require1 := detectUTF8(f.Name) + utf8Valid2, utf8Require2 := detectUTF8(f.Comment) + switch { + case !utf8Valid1 || !utf8Valid2: + // Name and Comment definitely not UTF-8. + f.NonUTF8 = true + case !utf8Require1 && !utf8Require2: + // Name and Comment use only single-byte runes that overlap with UTF-8. + f.NonUTF8 = false + default: + // Might be UTF-8, might be some other encoding; preserve existing flag. + // Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag. + // Since it is impossible to always distinguish valid UTF-8 from some + // other encoding (e.g., GBK or Shift-JIS), we trust the flag. + f.NonUTF8 = f.Flags&0x800 == 0 + } + needUSize := f.UncompressedSize == ^uint32(0) needCSize := f.CompressedSize == ^uint32(0) needHeaderOffset := f.headerOffset == int64(^uint32(0)) - if len(f.Extra) > 0 { - // Best effort to find what we need. - // Other zip authors might not even follow the basic format, - // and we'll just ignore the Extra content in that case. - b := readBuf(f.Extra) - for len(b) >= 4 { // need at least tag and size - tag := b.uint16() - size := b.uint16() - if int(size) > len(b) { - break - } - if tag == zip64ExtraId { - // update directory values from the zip64 extra block. - // They should only be consulted if the sizes read earlier - // are maxed out. - // See golang.org/issue/13367. - eb := readBuf(b[:size]) + // Best effort to find what we need. + // Other zip authors might not even follow the basic format, + // and we'll just ignore the Extra content in that case. + var modified time.Time +parseExtras: + for extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size + fieldTag := extra.uint16() + fieldSize := int(extra.uint16()) + if len(extra) < fieldSize { + break + } + fieldBuf := extra.sub(fieldSize) - if needUSize { - needUSize = false - if len(eb) < 8 { - return ErrFormat - } - f.UncompressedSize64 = eb.uint64() + switch fieldTag { + case zip64ExtraID: + // update directory values from the zip64 extra block. + // They should only be consulted if the sizes read earlier + // are maxed out. + // See golang.org/issue/13367. + if needUSize { + needUSize = false + if len(fieldBuf) < 8 { + return ErrFormat } - if needCSize { - needCSize = false - if len(eb) < 8 { - return ErrFormat - } - f.CompressedSize64 = eb.uint64() - } - if needHeaderOffset { - needHeaderOffset = false - if len(eb) < 8 { - return ErrFormat - } - f.headerOffset = int64(eb.uint64()) - } - break + f.UncompressedSize64 = fieldBuf.uint64() } - b = b[size:] + if needCSize { + needCSize = false + if len(fieldBuf) < 8 { + return ErrFormat + } + f.CompressedSize64 = fieldBuf.uint64() + } + if needHeaderOffset { + needHeaderOffset = false + if len(fieldBuf) < 8 { + return ErrFormat + } + f.headerOffset = int64(fieldBuf.uint64()) + } + case ntfsExtraID: + if len(fieldBuf) < 4 { + continue parseExtras + } + fieldBuf.uint32() // reserved (ignored) + for len(fieldBuf) >= 4 { // need at least tag and size + attrTag := fieldBuf.uint16() + attrSize := int(fieldBuf.uint16()) + if len(fieldBuf) < attrSize { + continue parseExtras + } + attrBuf := fieldBuf.sub(attrSize) + if attrTag != 1 || attrSize != 24 { + continue // Ignore irrelevant attributes + } + + const ticksPerSecond = 1e7 // Windows timestamp resolution + ts := int64(attrBuf.uint64()) // ModTime since Windows epoch + secs := int64(ts / ticksPerSecond) + nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond) + epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC) + modified = time.Unix(epoch.Unix()+secs, nsecs) + } + case unixExtraID: + if len(fieldBuf) < 8 { + continue parseExtras + } + fieldBuf.uint32() // AcTime (ignored) + ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch + modified = time.Unix(ts, 0) + case extTimeExtraID: + if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 { + continue parseExtras + } + ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch + modified = time.Unix(ts, 0) + case infoZipUnixExtraID: + if len(fieldBuf) < 4 { + continue parseExtras + } + ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch + modified = time.Unix(ts, 0) + } + } + + msdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime) + f.Modified = msdosModified + if !modified.IsZero() { + f.Modified = modified.UTC() + + // If legacy MS-DOS timestamps are set, we can use the delta between + // the legacy and extended versions to estimate timezone offset. + // + // A non-UTC timezone is always used (even if offset is zero). + // Thus, FileHeader.Modified.Location() == time.UTC is useful for + // determining whether extended timestamps are present. + // This is necessary for users that need to do additional time + // calculations when dealing with legacy ZIP formats. + if f.ModifiedTime != 0 || f.ModifiedDate != 0 { + f.Modified = modified.In(timeZone(msdosModified.Sub(modified))) } } @@ -508,6 +585,12 @@ func findSignatureInBlock(b []byte) int { type readBuf []byte +func (b *readBuf) uint8() uint8 { + v := (*b)[0] + *b = (*b)[1:] + return v +} + func (b *readBuf) uint16() uint16 { v := binary.LittleEndian.Uint16(*b) *b = (*b)[2:] @@ -525,3 +608,9 @@ func (b *readBuf) uint64() uint64 { *b = (*b)[8:] return v } + +func (b *readBuf) sub(n int) readBuf { + b2 := (*b)[:n] + *b = (*b)[n:] + return b2 +} diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go index dfaae784361..0d9040f7674 100644 --- a/src/archive/zip/reader_test.go +++ b/src/archive/zip/reader_test.go @@ -27,9 +27,11 @@ type ZipTest struct { } type ZipTestFile struct { - Name string - Mode os.FileMode - Mtime string // optional, modified time in format "mm-dd-yy hh:mm:ss" + Name string + Mode os.FileMode + NonUTF8 bool + ModTime time.Time + Modified time.Time // Information describing expected zip file content. // First, reading the entire content should produce the error ContentErr. @@ -47,32 +49,22 @@ type ZipTestFile struct { Size uint64 } -// Caution: The Mtime values found for the test files should correspond to -// the values listed with unzip -l . However, the values -// listed by unzip appear to be off by some hours. When creating -// fresh test files and testing them, this issue is not present. -// The test files were created in Sydney, so there might be a time -// zone issue. The time zone information does have to be encoded -// somewhere, because otherwise unzip -l could not provide a different -// time from what the archive/zip package provides, but there appears -// to be no documentation about this. - var tests = []ZipTest{ { Name: "test.zip", Comment: "This is a zipfile comment.", File: []ZipTestFile{ { - Name: "test.txt", - Content: []byte("This is a test text file.\n"), - Mtime: "09-05-10 12:12:02", - Mode: 0644, + Name: "test.txt", + Content: []byte("This is a test text file.\n"), + Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)), + Mode: 0644, }, { - Name: "gophercolor16x16.png", - File: "gophercolor16x16.png", - Mtime: "09-05-10 15:52:58", - Mode: 0644, + Name: "gophercolor16x16.png", + File: "gophercolor16x16.png", + Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)), + Mode: 0644, }, }, }, @@ -81,16 +73,16 @@ var tests = []ZipTest{ Comment: "This is a zipfile comment.", File: []ZipTestFile{ { - Name: "test.txt", - Content: []byte("This is a test text file.\n"), - Mtime: "09-05-10 12:12:02", - Mode: 0644, + Name: "test.txt", + Content: []byte("This is a test text file.\n"), + Modified: time.Date(2010, 9, 5, 12, 12, 1, 0, timeZone(+10*time.Hour)), + Mode: 0644, }, { - Name: "gophercolor16x16.png", - File: "gophercolor16x16.png", - Mtime: "09-05-10 15:52:58", - Mode: 0644, + Name: "gophercolor16x16.png", + File: "gophercolor16x16.png", + Modified: time.Date(2010, 9, 5, 15, 52, 58, 0, timeZone(+10*time.Hour)), + Mode: 0644, }, }, }, @@ -99,10 +91,10 @@ var tests = []ZipTest{ Source: returnRecursiveZip, File: []ZipTestFile{ { - Name: "r/r.zip", - Content: rZipBytes(), - Mtime: "03-04-10 00:24:16", - Mode: 0666, + Name: "r/r.zip", + Content: rZipBytes(), + Modified: time.Date(2010, 3, 4, 0, 24, 16, 0, time.UTC), + Mode: 0666, }, }, }, @@ -110,9 +102,10 @@ var tests = []ZipTest{ Name: "symlink.zip", File: []ZipTestFile{ { - Name: "symlink", - Content: []byte("../target"), - Mode: 0777 | os.ModeSymlink, + Name: "symlink", + Content: []byte("../target"), + Modified: time.Date(2012, 2, 3, 19, 56, 48, 0, timeZone(-2*time.Hour)), + Mode: 0777 | os.ModeSymlink, }, }, }, @@ -127,22 +120,72 @@ var tests = []ZipTest{ Name: "dd.zip", File: []ZipTestFile{ { - Name: "filename", - Content: []byte("This is a test textfile.\n"), - Mtime: "02-02-11 13:06:20", - Mode: 0666, + Name: "filename", + Content: []byte("This is a test textfile.\n"), + Modified: time.Date(2011, 2, 2, 13, 6, 20, 0, time.UTC), + Mode: 0666, }, }, }, { // created in windows XP file manager. Name: "winxp.zip", - File: crossPlatform, + File: []ZipTestFile{ + { + Name: "hello", + Content: []byte("world \r\n"), + Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, time.UTC), + Mode: 0666, + }, + { + Name: "dir/bar", + Content: []byte("foo \r\n"), + Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, time.UTC), + Mode: 0666, + }, + { + Name: "dir/empty/", + Content: []byte{}, + Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, time.UTC), + Mode: os.ModeDir | 0777, + }, + { + Name: "readonly", + Content: []byte("important \r\n"), + Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, time.UTC), + Mode: 0444, + }, + }, }, { // created by Zip 3.0 under Linux Name: "unix.zip", - File: crossPlatform, + File: []ZipTestFile{ + { + Name: "hello", + Content: []byte("world \r\n"), + Modified: time.Date(2011, 12, 8, 10, 4, 24, 0, timeZone(0)), + Mode: 0666, + }, + { + Name: "dir/bar", + Content: []byte("foo \r\n"), + Modified: time.Date(2011, 12, 8, 10, 4, 50, 0, timeZone(0)), + Mode: 0666, + }, + { + Name: "dir/empty/", + Content: []byte{}, + Modified: time.Date(2011, 12, 8, 10, 8, 6, 0, timeZone(0)), + Mode: os.ModeDir | 0777, + }, + { + Name: "readonly", + Content: []byte("important \r\n"), + Modified: time.Date(2011, 12, 8, 10, 6, 8, 0, timeZone(0)), + Mode: 0444, + }, + }, }, { // created by Go, before we wrote the "optional" data @@ -150,16 +193,16 @@ var tests = []ZipTest{ Name: "go-no-datadesc-sig.zip", File: []ZipTestFile{ { - Name: "foo.txt", - Content: []byte("foo\n"), - Mtime: "03-08-12 16:59:10", - Mode: 0644, + Name: "foo.txt", + Content: []byte("foo\n"), + Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)), + Mode: 0644, }, { - Name: "bar.txt", - Content: []byte("bar\n"), - Mtime: "03-08-12 16:59:12", - Mode: 0644, + Name: "bar.txt", + Content: []byte("bar\n"), + Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)), + Mode: 0644, }, }, }, @@ -169,14 +212,16 @@ var tests = []ZipTest{ Name: "go-with-datadesc-sig.zip", File: []ZipTestFile{ { - Name: "foo.txt", - Content: []byte("foo\n"), - Mode: 0666, + Name: "foo.txt", + Content: []byte("foo\n"), + Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC), + Mode: 0666, }, { - Name: "bar.txt", - Content: []byte("bar\n"), - Mode: 0666, + Name: "bar.txt", + Content: []byte("bar\n"), + Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC), + Mode: 0666, }, }, }, @@ -187,13 +232,15 @@ var tests = []ZipTest{ { Name: "foo.txt", Content: []byte("foo\n"), + Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC), Mode: 0666, ContentErr: ErrChecksum, }, { - Name: "bar.txt", - Content: []byte("bar\n"), - Mode: 0666, + Name: "bar.txt", + Content: []byte("bar\n"), + Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC), + Mode: 0666, }, }, }, @@ -203,16 +250,16 @@ var tests = []ZipTest{ Name: "crc32-not-streamed.zip", File: []ZipTestFile{ { - Name: "foo.txt", - Content: []byte("foo\n"), - Mtime: "03-08-12 16:59:10", - Mode: 0644, + Name: "foo.txt", + Content: []byte("foo\n"), + Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)), + Mode: 0644, }, { - Name: "bar.txt", - Content: []byte("bar\n"), - Mtime: "03-08-12 16:59:12", - Mode: 0644, + Name: "bar.txt", + Content: []byte("bar\n"), + Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)), + Mode: 0644, }, }, }, @@ -225,15 +272,15 @@ var tests = []ZipTest{ { Name: "foo.txt", Content: []byte("foo\n"), - Mtime: "03-08-12 16:59:10", + Modified: time.Date(2012, 3, 8, 16, 59, 10, 0, timeZone(-8*time.Hour)), Mode: 0644, ContentErr: ErrChecksum, }, { - Name: "bar.txt", - Content: []byte("bar\n"), - Mtime: "03-08-12 16:59:12", - Mode: 0644, + Name: "bar.txt", + Content: []byte("bar\n"), + Modified: time.Date(2012, 3, 8, 16, 59, 12, 0, timeZone(-8*time.Hour)), + Mode: 0644, }, }, }, @@ -241,10 +288,10 @@ var tests = []ZipTest{ Name: "zip64.zip", File: []ZipTestFile{ { - Name: "README", - Content: []byte("This small file is in ZIP64 format.\n"), - Mtime: "08-10-12 14:33:32", - Mode: 0644, + Name: "README", + Content: []byte("This small file is in ZIP64 format.\n"), + Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, time.UTC), + Mode: 0644, }, }, }, @@ -253,10 +300,10 @@ var tests = []ZipTest{ Name: "zip64-2.zip", File: []ZipTestFile{ { - Name: "README", - Content: []byte("This small file is in ZIP64 format.\n"), - Mtime: "08-10-12 14:33:32", - Mode: 0644, + Name: "README", + Content: []byte("This small file is in ZIP64 format.\n"), + Modified: time.Date(2012, 8, 10, 14, 33, 32, 0, timeZone(-4*time.Hour)), + Mode: 0644, }, }, }, @@ -266,41 +313,179 @@ var tests = []ZipTest{ Source: returnBigZipBytes, File: []ZipTestFile{ { - Name: "big.file", - Content: nil, - Size: 1<<32 - 1, - Mode: 0666, + Name: "big.file", + Content: nil, + Size: 1<<32 - 1, + Modified: time.Date(1979, 11, 30, 0, 0, 0, 0, time.UTC), + Mode: 0666, + }, + }, + }, + { + Name: "utf8-7zip.zip", + File: []ZipTestFile{ + { + Name: "世界", + Content: []byte{}, + Mode: 0666, + Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)), + }, + }, + }, + { + Name: "utf8-infozip.zip", + File: []ZipTestFile{ + { + Name: "世界", + Content: []byte{}, + Mode: 0644, + // Name is valid UTF-8, but format does not have UTF-8 flag set. + // We don't do UTF-8 detection for multi-byte runes due to + // false-positives with other encodings (e.g., Shift-JIS). + // Format says encoding is not UTF-8, so we trust it. + NonUTF8: true, + Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)), + }, + }, + }, + { + Name: "utf8-osx.zip", + File: []ZipTestFile{ + { + Name: "世界", + Content: []byte{}, + Mode: 0644, + // Name is valid UTF-8, but format does not have UTF-8 set. + NonUTF8: true, + Modified: time.Date(2017, 11, 6, 13, 9, 27, 0, timeZone(-8*time.Hour)), + }, + }, + }, + { + Name: "utf8-winrar.zip", + File: []ZipTestFile{ + { + Name: "世界", + Content: []byte{}, + Mode: 0666, + Modified: time.Date(2017, 11, 6, 13, 9, 27, 867862500, timeZone(-8*time.Hour)), + }, + }, + }, + { + Name: "utf8-winzip.zip", + File: []ZipTestFile{ + { + Name: "世界", + Content: []byte{}, + Mode: 0666, + Modified: time.Date(2017, 11, 6, 13, 9, 27, 867000000, timeZone(-8*time.Hour)), + }, + }, + }, + { + Name: "time-7zip.zip", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte{}, + Size: 1<<32 - 1, + Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)), + Mode: 0666, + }, + }, + }, + { + Name: "time-infozip.zip", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte{}, + Size: 1<<32 - 1, + Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)), + Mode: 0644, + }, + }, + }, + { + Name: "time-osx.zip", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte{}, + Size: 1<<32 - 1, + Modified: time.Date(2017, 10, 31, 21, 17, 27, 0, timeZone(-7*time.Hour)), + Mode: 0644, + }, + }, + }, + { + Name: "time-win7.zip", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte{}, + Size: 1<<32 - 1, + Modified: time.Date(2017, 10, 31, 21, 11, 58, 0, time.UTC), + Mode: 0666, + }, + }, + }, + { + Name: "time-winrar.zip", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte{}, + Size: 1<<32 - 1, + Modified: time.Date(2017, 10, 31, 21, 11, 57, 244817900, timeZone(-7*time.Hour)), + Mode: 0666, + }, + }, + }, + { + Name: "time-winzip.zip", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte{}, + Size: 1<<32 - 1, + Modified: time.Date(2017, 10, 31, 21, 11, 57, 244000000, timeZone(-7*time.Hour)), + Mode: 0666, + }, + }, + }, + { + Name: "time-go.zip", + File: []ZipTestFile{ + { + Name: "test.txt", + Content: []byte{}, + Size: 1<<32 - 1, + Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)), + Mode: 0666, + }, + }, + }, + { + Name: "time-22738.zip", + File: []ZipTestFile{ + { + Name: "file", + Content: []byte{}, + Mode: 0666, + Modified: time.Date(1999, 12, 31, 19, 0, 0, 0, timeZone(-5*time.Hour)), + ModTime: time.Date(1999, 12, 31, 19, 0, 0, 0, time.UTC), }, }, }, } -var crossPlatform = []ZipTestFile{ - { - Name: "hello", - Content: []byte("world \r\n"), - Mode: 0666, - }, - { - Name: "dir/bar", - Content: []byte("foo \r\n"), - Mode: 0666, - }, - { - Name: "dir/empty/", - Content: []byte{}, - Mode: os.ModeDir | 0777, - }, - { - Name: "readonly", - Content: []byte("important \r\n"), - Mode: 0444, - }, -} - func TestReader(t *testing.T) { for _, zt := range tests { - readTestZip(t, zt) + t.Run(zt.Name, func(t *testing.T) { + readTestZip(t, zt) + }) } } @@ -319,7 +504,7 @@ func readTestZip(t *testing.T, zt ZipTest) { } } if err != zt.Error { - t.Errorf("%s: error=%v, want %v", zt.Name, err, zt.Error) + t.Errorf("error=%v, want %v", err, zt.Error) return } @@ -335,16 +520,19 @@ func readTestZip(t *testing.T, zt ZipTest) { } if z.Comment != zt.Comment { - t.Errorf("%s: comment=%q, want %q", zt.Name, z.Comment, zt.Comment) + t.Errorf("comment=%q, want %q", z.Comment, zt.Comment) } if len(z.File) != len(zt.File) { - t.Fatalf("%s: file count=%d, want %d", zt.Name, len(z.File), len(zt.File)) + t.Fatalf("file count=%d, want %d", len(z.File), len(zt.File)) } // test read of each file for i, ft := range zt.File { readTestFile(t, zt, ft, z.File[i]) } + if t.Failed() { + return + } // test simultaneous reads n := 0 @@ -363,23 +551,24 @@ func readTestZip(t *testing.T, zt ZipTest) { } } +func equalTimeAndZone(t1, t2 time.Time) bool { + name1, offset1 := t1.Zone() + name2, offset2 := t2.Zone() + return t1.Equal(t2) && name1 == name2 && offset1 == offset2 +} + func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) { if f.Name != ft.Name { - t.Errorf("%s: name=%q, want %q", zt.Name, f.Name, ft.Name) + t.Errorf("name=%q, want %q", f.Name, ft.Name) + } + if !ft.Modified.IsZero() && !equalTimeAndZone(f.Modified, ft.Modified) { + t.Errorf("%s: Modified=%s, want %s", f.Name, f.Modified, ft.Modified) + } + if !ft.ModTime.IsZero() && !equalTimeAndZone(f.ModTime(), ft.ModTime) { + t.Errorf("%s: ModTime=%s, want %s", f.Name, f.ModTime(), ft.ModTime) } - if ft.Mtime != "" { - mtime, err := time.Parse("01-02-06 15:04:05", ft.Mtime) - if err != nil { - t.Error(err) - return - } - if ft := f.ModTime(); !ft.Equal(mtime) { - t.Errorf("%s: %s: mtime=%s, want %s", zt.Name, f.Name, ft, mtime) - } - } - - testFileMode(t, zt.Name, f, ft.Mode) + testFileMode(t, f, ft.Mode) size := uint64(f.UncompressedSize) if size == uint32max { @@ -390,7 +579,7 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) { r, err := f.Open() if err != nil { - t.Errorf("%s: %v", zt.Name, err) + t.Errorf("%v", err) return } @@ -408,7 +597,7 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) { var b bytes.Buffer _, err = io.Copy(&b, r) if err != ft.ContentErr { - t.Errorf("%s: copying contents: %v (want %v)", zt.Name, err, ft.ContentErr) + t.Errorf("copying contents: %v (want %v)", err, ft.ContentErr) } if err != nil { return @@ -440,12 +629,12 @@ func readTestFile(t *testing.T, zt ZipTest, ft ZipTestFile, f *File) { } } -func testFileMode(t *testing.T, zipName string, f *File, want os.FileMode) { +func testFileMode(t *testing.T, f *File, want os.FileMode) { mode := f.Mode() if want == 0 { - t.Errorf("%s: %s mode: got %v, want none", zipName, f.Name, mode) + t.Errorf("%s mode: got %v, want none", f.Name, mode) } else if mode != want { - t.Errorf("%s: %s mode: want %v, got %v", zipName, f.Name, want, mode) + t.Errorf("%s mode: want %v, got %v", f.Name, want, mode) } } diff --git a/src/archive/zip/struct.go b/src/archive/zip/struct.go index 0be210e8e73..f613ebdc344 100644 --- a/src/archive/zip/struct.go +++ b/src/archive/zip/struct.go @@ -27,8 +27,8 @@ import ( // Compression methods. const ( - Store uint16 = 0 - Deflate uint16 = 8 + Store uint16 = 0 // no compression + Deflate uint16 = 8 // DEFLATE compressed ) const ( @@ -46,40 +46,79 @@ const ( directory64LocLen = 20 // directory64EndLen = 56 // + extra - // Constants for the first byte in CreatorVersion + // Constants for the first byte in CreatorVersion. creatorFAT = 0 creatorUnix = 3 creatorNTFS = 11 creatorVFAT = 14 creatorMacOSX = 19 - // version numbers + // Version numbers. zipVersion20 = 20 // 2.0 zipVersion45 = 45 // 4.5 (reads and writes zip64 archives) - // limits for non zip64 files + // Limits for non zip64 files. uint16max = (1 << 16) - 1 uint32max = (1 << 32) - 1 - // extra header id's - zip64ExtraId = 0x0001 // zip64 Extended Information Extra Field + // Extra header IDs. + // + // IDs 0..31 are reserved for official use by PKWARE. + // IDs above that range are defined by third-party vendors. + // Since ZIP lacked high precision timestamps (nor a official specification + // of the timezone used for the date fields), many competing extra fields + // have been invented. Pervasive use effectively makes them "official". + // + // See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField + zip64ExtraID = 0x0001 // Zip64 extended information + ntfsExtraID = 0x000a // NTFS + unixExtraID = 0x000d // UNIX + extTimeExtraID = 0x5455 // Extended timestamp + infoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension ) // FileHeader describes a file within a zip file. // See the zip spec for details. type FileHeader struct { // Name is the name of the file. - // It must be a relative path: it must not start with a drive - // letter (e.g. C:) or leading slash, and only forward slashes - // are allowed. + // It must be a relative path, not start with a drive letter (e.g. C:), + // and must use forward slashes instead of back slashes. Name string - CreatorVersion uint16 - ReaderVersion uint16 - Flags uint16 - Method uint16 - ModifiedTime uint16 // MS-DOS time - ModifiedDate uint16 // MS-DOS date + // Comment is any arbitrary user-defined string shorter than 64KiB. + Comment string + + // NonUTF8 indicates that Name and Comment are not encoded in UTF-8. + // + // By specification, the only other encoding permitted should be CP-437, + // but historically many ZIP readers interpret Name and Comment as whatever + // the system's local character encoding happens to be. + // + // This flag should only be set if the user intends to encode a non-portable + // ZIP file for a specific localized region. Otherwise, the Writer + // automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings. + NonUTF8 bool + + CreatorVersion uint16 + ReaderVersion uint16 + Flags uint16 + + // Method is the compression method. If zero, Store is used. + Method uint16 + + // Modified is the modified time of the file. + // + // When reading, an extended timestamp is preferred over the legacy MS-DOS + // date field, and the offset between the times is used as the timezone. + // If only the MS-DOS date is present, the timezone is assumed to be UTC. + // + // When writing, an extended timestamp (which is timezone-agnostic) is + // always emitted. The legacy MS-DOS date field is encoded according to the + // location of the Modified time. + Modified time.Time + ModifiedTime uint16 // Deprecated: Legacy MS-DOS date; use Modified instead. + ModifiedDate uint16 // Deprecated: Legacy MS-DOS time; use Modified instead. + CRC32 uint32 CompressedSize uint32 // Deprecated: Use CompressedSize64 instead. UncompressedSize uint32 // Deprecated: Use UncompressedSize64 instead. @@ -87,7 +126,6 @@ type FileHeader struct { UncompressedSize64 uint64 Extra []byte ExternalAttrs uint32 // Meaning depends on CreatorVersion - Comment string } // FileInfo returns an os.FileInfo for the FileHeader. @@ -117,6 +155,8 @@ func (fi headerFileInfo) Sys() interface{} { return fi.fh } // Because os.FileInfo's Name method returns only the base name of // the file it describes, it may be necessary to modify the Name field // of the returned header to provide the full path name of the file. +// If compression is desired, callers should set the FileHeader.Method +// field; it is unset by default. func FileInfoHeader(fi os.FileInfo) (*FileHeader, error) { size := fi.Size() fh := &FileHeader{ @@ -144,6 +184,21 @@ type directoryEnd struct { comment string } +// timeZone returns a *time.Location based on the provided offset. +// If the offset is non-sensible, then this uses an offset of zero. +func timeZone(offset time.Duration) *time.Location { + const ( + minOffset = -12 * time.Hour // E.g., Baker island at -12:00 + maxOffset = +14 * time.Hour // E.g., Line island at +14:00 + offsetAlias = 15 * time.Minute // E.g., Nepal at +5:45 + ) + offset = offset.Round(offsetAlias) + if offset < minOffset || maxOffset < offset { + offset = 0 + } + return time.FixedZone("", int(offset/time.Second)) +} + // msDosTimeToTime converts an MS-DOS date and time into a time.Time. // The resolution is 2s. // See: http://msdn.microsoft.com/en-us/library/ms724247(v=VS.85).aspx @@ -168,21 +223,26 @@ func msDosTimeToTime(dosDate, dosTime uint16) time.Time { // The resolution is 2s. // See: http://msdn.microsoft.com/en-us/library/ms724274(v=VS.85).aspx func timeToMsDosTime(t time.Time) (fDate uint16, fTime uint16) { - t = t.In(time.UTC) fDate = uint16(t.Day() + int(t.Month())<<5 + (t.Year()-1980)<<9) fTime = uint16(t.Second()/2 + t.Minute()<<5 + t.Hour()<<11) return } -// ModTime returns the modification time in UTC. -// The resolution is 2s. +// ModTime returns the modification time in UTC using the legacy +// ModifiedDate and ModifiedTime fields. +// +// Deprecated: Use Modified instead. func (h *FileHeader) ModTime() time.Time { return msDosTimeToTime(h.ModifiedDate, h.ModifiedTime) } -// SetModTime sets the ModifiedTime and ModifiedDate fields to the given time in UTC. -// The resolution is 2s. +// SetModTime sets the Modified, ModifiedTime, and ModifiedDate fields +// to the given time in UTC. +// +// Deprecated: Use Modified instead. func (h *FileHeader) SetModTime(t time.Time) { + t = t.UTC() // Convert to UTC for compatibility + h.Modified = t h.ModifiedDate, h.ModifiedTime = timeToMsDosTime(t) } diff --git a/src/archive/zip/testdata/time-22738.zip b/src/archive/zip/testdata/time-22738.zip new file mode 100644 index 00000000000..eb85b57103e Binary files /dev/null and b/src/archive/zip/testdata/time-22738.zip differ diff --git a/src/archive/zip/testdata/time-7zip.zip b/src/archive/zip/testdata/time-7zip.zip new file mode 100644 index 00000000000..4f74819d11d Binary files /dev/null and b/src/archive/zip/testdata/time-7zip.zip differ diff --git a/src/archive/zip/testdata/time-go.zip b/src/archive/zip/testdata/time-go.zip new file mode 100644 index 00000000000..f008805fa42 Binary files /dev/null and b/src/archive/zip/testdata/time-go.zip differ diff --git a/src/archive/zip/testdata/time-infozip.zip b/src/archive/zip/testdata/time-infozip.zip new file mode 100644 index 00000000000..8e6394891f0 Binary files /dev/null and b/src/archive/zip/testdata/time-infozip.zip differ diff --git a/src/archive/zip/testdata/time-osx.zip b/src/archive/zip/testdata/time-osx.zip new file mode 100644 index 00000000000..e82c5c229e0 Binary files /dev/null and b/src/archive/zip/testdata/time-osx.zip differ diff --git a/src/archive/zip/testdata/time-win7.zip b/src/archive/zip/testdata/time-win7.zip new file mode 100644 index 00000000000..8ba222b2246 Binary files /dev/null and b/src/archive/zip/testdata/time-win7.zip differ diff --git a/src/archive/zip/testdata/time-winrar.zip b/src/archive/zip/testdata/time-winrar.zip new file mode 100644 index 00000000000..a8a19b0f8e2 Binary files /dev/null and b/src/archive/zip/testdata/time-winrar.zip differ diff --git a/src/archive/zip/testdata/time-winzip.zip b/src/archive/zip/testdata/time-winzip.zip new file mode 100644 index 00000000000..f6e8f8ba067 Binary files /dev/null and b/src/archive/zip/testdata/time-winzip.zip differ diff --git a/src/archive/zip/testdata/utf8-7zip.zip b/src/archive/zip/testdata/utf8-7zip.zip new file mode 100644 index 00000000000..0e97884559f Binary files /dev/null and b/src/archive/zip/testdata/utf8-7zip.zip differ diff --git a/src/archive/zip/testdata/utf8-infozip.zip b/src/archive/zip/testdata/utf8-infozip.zip new file mode 100644 index 00000000000..25a892646ce Binary files /dev/null and b/src/archive/zip/testdata/utf8-infozip.zip differ diff --git a/src/archive/zip/testdata/utf8-osx.zip b/src/archive/zip/testdata/utf8-osx.zip new file mode 100644 index 00000000000..9b0c058b5b5 Binary files /dev/null and b/src/archive/zip/testdata/utf8-osx.zip differ diff --git a/src/archive/zip/testdata/utf8-winrar.zip b/src/archive/zip/testdata/utf8-winrar.zip new file mode 100644 index 00000000000..4bad6c3a5e0 Binary files /dev/null and b/src/archive/zip/testdata/utf8-winrar.zip differ diff --git a/src/archive/zip/testdata/utf8-winzip.zip b/src/archive/zip/testdata/utf8-winzip.zip new file mode 100644 index 00000000000..909d52ed2d9 Binary files /dev/null and b/src/archive/zip/testdata/utf8-winzip.zip differ diff --git a/src/archive/zip/writer.go b/src/archive/zip/writer.go index 9f4fceee844..14a5ee48c11 100644 --- a/src/archive/zip/writer.go +++ b/src/archive/zip/writer.go @@ -14,6 +14,11 @@ import ( "unicode/utf8" ) +var ( + errLongName = errors.New("zip: FileHeader.Name too long") + errLongExtra = errors.New("zip: FileHeader.Extra too long") +) + // Writer implements a zip file writer. type Writer struct { cw *countWriter @@ -21,6 +26,7 @@ type Writer struct { last *fileWriter closed bool compressors map[uint16]Compressor + comment string // testHookCloseSizeOffset if non-nil is called with the size // of offset of the central directory at Close. @@ -54,6 +60,16 @@ func (w *Writer) Flush() error { return w.cw.w.(*bufio.Writer).Flush() } +// SetComment sets the end-of-central-directory comment field. +// It can only be called before Close. +func (w *Writer) SetComment(comment string) error { + if len(comment) > uint16max { + return errors.New("zip: Writer.Comment too long") + } + w.comment = comment + return nil +} + // Close finishes writing the zip file by writing the central directory. // It does not (and cannot) close the underlying writer. func (w *Writer) Close() error { @@ -91,7 +107,7 @@ func (w *Writer) Close() error { // append a zip64 extra block to Extra var buf [28]byte // 2x uint16 + 3x uint64 eb := writeBuf(buf[:]) - eb.uint16(zip64ExtraId) + eb.uint16(zip64ExtraID) eb.uint16(24) // size = 3x uint64 eb.uint64(h.UncompressedSize64) eb.uint64(h.CompressedSize64) @@ -172,21 +188,25 @@ func (w *Writer) Close() error { var buf [directoryEndLen]byte b := writeBuf(buf[:]) b.uint32(uint32(directoryEndSignature)) - b = b[4:] // skip over disk number and first disk number (2x uint16) - b.uint16(uint16(records)) // number of entries this disk - b.uint16(uint16(records)) // number of entries total - b.uint32(uint32(size)) // size of directory - b.uint32(uint32(offset)) // start of directory - // skipped size of comment (always zero) + b = b[4:] // skip over disk number and first disk number (2x uint16) + b.uint16(uint16(records)) // number of entries this disk + b.uint16(uint16(records)) // number of entries total + b.uint32(uint32(size)) // size of directory + b.uint32(uint32(offset)) // start of directory + b.uint16(uint16(len(w.comment))) // byte size of EOCD comment if _, err := w.cw.Write(buf[:]); err != nil { return err } + if _, err := io.WriteString(w.cw, w.comment); err != nil { + return err + } return w.cw.w.(*bufio.Writer).Flush() } // Create adds a file to the zip file using the provided name. // It returns a Writer to which the file contents should be written. +// The file contents will be compressed using the Deflate method. // The name must be a relative path: it must not start with a drive // letter (e.g. C:) or leading slash, and only forward slashes are // allowed. @@ -200,27 +220,36 @@ func (w *Writer) Create(name string) (io.Writer, error) { return w.CreateHeader(header) } -func hasValidUTF8(s string) bool { - n := 0 - for _, r := range s { - // By default, ZIP uses CP437, which is only identical to ASCII for the printable characters. - if r < 0x20 || r >= 0x7f { - if !utf8.ValidRune(r) { - return false +// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string +// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII, +// or any other common encoding). +func detectUTF8(s string) (valid, require bool) { + for i := 0; i < len(s); { + r, size := utf8.DecodeRuneInString(s[i:]) + i += size + // Officially, ZIP uses CP-437, but many readers use the system's + // local character encoding. Most encoding are compatible with a large + // subset of CP-437, which itself is ASCII-like. + // + // Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those + // characters with localized currency and overline characters. + if r < 0x20 || r > 0x7d || r == 0x5c { + if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) { + return false, false } - n++ + require = true } } - return n > 0 + return true, require } -// CreateHeader adds a file to the zip file using the provided FileHeader -// for the file metadata. -// It returns a Writer to which the file contents should be written. +// CreateHeader adds a file to the zip archive using the provided FileHeader +// for the file metadata. Writer takes ownership of fh and may mutate +// its fields. The caller must not modify fh after calling CreateHeader. // +// This returns a Writer to which the file contents should be written. // The file's contents must be written to the io.Writer before the next -// call to Create, CreateHeader, or Close. The provided FileHeader fh -// must not be modified after a call to CreateHeader. +// call to Create, CreateHeader, or Close. func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { if w.last != nil && !w.last.closed { if err := w.last.close(); err != nil { @@ -234,13 +263,62 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { fh.Flags |= 0x8 // we will write a data descriptor - if hasValidUTF8(fh.Name) || hasValidUTF8(fh.Comment) { - fh.Flags |= 0x800 // filename or comment have valid utf-8 string + // The ZIP format has a sad state of affairs regarding character encoding. + // Officially, the name and comment fields are supposed to be encoded + // in CP-437 (which is mostly compatible with ASCII), unless the UTF-8 + // flag bit is set. However, there are several problems: + // + // * Many ZIP readers still do not support UTF-8. + // * If the UTF-8 flag is cleared, several readers simply interpret the + // name and comment fields as whatever the local system encoding is. + // + // In order to avoid breaking readers without UTF-8 support, + // we avoid setting the UTF-8 flag if the strings are CP-437 compatible. + // However, if the strings require multibyte UTF-8 encoding and is a + // valid UTF-8 string, then we set the UTF-8 bit. + // + // For the case, where the user explicitly wants to specify the encoding + // as UTF-8, they will need to set the flag bit themselves. + utf8Valid1, utf8Require1 := detectUTF8(fh.Name) + utf8Valid2, utf8Require2 := detectUTF8(fh.Comment) + switch { + case fh.NonUTF8: + fh.Flags &^= 0x800 + case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2): + fh.Flags |= 0x800 } fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20 // preserve compatibility byte fh.ReaderVersion = zipVersion20 + // If Modified is set, this takes precedence over MS-DOS timestamp fields. + if !fh.Modified.IsZero() { + // Contrary to the FileHeader.SetModTime method, we intentionally + // do not convert to UTC, because we assume the user intends to encode + // the date using the specified timezone. A user may want this control + // because many legacy ZIP readers interpret the timestamp according + // to the local timezone. + // + // The timezone is only non-UTC if a user directly sets the Modified + // field directly themselves. All other approaches sets UTC. + fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified) + + // Use "extended timestamp" format since this is what Info-ZIP uses. + // Nearly every major ZIP implementation uses a different format, + // but at least most seem to be able to understand the other formats. + // + // This format happens to be identical for both local and central header + // if modification time is the only timestamp being encoded. + var mbuf [9]byte // 2*SizeOf(uint16) + SizeOf(uint8) + SizeOf(uint32) + mt := uint32(fh.Modified.Unix()) + eb := writeBuf(mbuf[:]) + eb.uint16(extTimeExtraID) + eb.uint16(5) // Size: SizeOf(uint8) + SizeOf(uint32) + eb.uint8(1) // Flags: ModTime + eb.uint32(mt) // ModTime + fh.Extra = append(fh.Extra, mbuf[:]...) + } + fw := &fileWriter{ zipw: w.cw, compCount: &countWriter{w: w.cw}, @@ -273,6 +351,14 @@ func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) { } func writeHeader(w io.Writer, h *FileHeader) error { + const maxUint16 = 1<<16 - 1 + if len(h.Name) > maxUint16 { + return errLongName + } + if len(h.Extra) > maxUint16 { + return errLongExtra + } + var buf [fileHeaderLen]byte b := writeBuf(buf[:]) b.uint32(uint32(fileHeaderSignature)) @@ -402,6 +488,11 @@ func (w nopCloser) Close() error { type writeBuf []byte +func (b *writeBuf) uint8(v uint8) { + (*b)[0] = v + *b = (*b)[1:] +} + func (b *writeBuf) uint16(v uint16) { binary.LittleEndian.PutUint16(*b, v) *b = (*b)[2:] diff --git a/src/archive/zip/writer_test.go b/src/archive/zip/writer_test.go index 92fb6ecf0ed..38f32296fa8 100644 --- a/src/archive/zip/writer_test.go +++ b/src/archive/zip/writer_test.go @@ -6,11 +6,14 @@ package zip import ( "bytes" + "fmt" "io" "io/ioutil" "math/rand" "os" + "strings" "testing" + "time" ) // TODO(adg): a more sophisticated test suite @@ -57,8 +60,8 @@ var writeTests = []WriteTest{ func TestWriter(t *testing.T) { largeData := make([]byte, 1<<17) - for i := range largeData { - largeData[i] = byte(rand.Int()) + if _, err := rand.Read(largeData); err != nil { + t.Fatal("rand.Read failed:", err) } writeTests[1].Data = largeData defer func() { @@ -87,31 +90,100 @@ func TestWriter(t *testing.T) { } } +// TestWriterComment is test for EOCD comment read/write. +func TestWriterComment(t *testing.T) { + var tests = []struct { + comment string + ok bool + }{ + {"hi, hello", true}, + {"hi, こんにちわ", true}, + {strings.Repeat("a", uint16max), true}, + {strings.Repeat("a", uint16max+1), false}, + } + + for _, test := range tests { + // write a zip file + buf := new(bytes.Buffer) + w := NewWriter(buf) + if err := w.SetComment(test.comment); err != nil { + if test.ok { + t.Fatalf("SetComment: unexpected error %v", err) + } + continue + } else { + if !test.ok { + t.Fatalf("SetComment: unexpected success, want error") + } + } + + if err := w.Close(); test.ok == (err != nil) { + t.Fatal(err) + } + + if w.closed != test.ok { + t.Fatalf("Writer.closed: got %v, want %v", w.closed, test.ok) + } + + // skip read test in failure cases + if !test.ok { + continue + } + + // read it back + r, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + if err != nil { + t.Fatal(err) + } + if r.Comment != test.comment { + t.Fatalf("Reader.Comment: got %v, want %v", r.Comment, test.comment) + } + } +} + func TestWriterUTF8(t *testing.T) { var utf8Tests = []struct { name string comment string - expect uint16 + nonUTF8 bool + flags uint16 }{ { name: "hi, hello", comment: "in the world", - expect: 0x8, + flags: 0x8, }, { name: "hi, こんにちわ", comment: "in the world", - expect: 0x808, + flags: 0x808, + }, + { + name: "hi, こんにちわ", + comment: "in the world", + nonUTF8: true, + flags: 0x8, }, { name: "hi, hello", comment: "in the 世界", - expect: 0x808, + flags: 0x808, }, { name: "hi, こんにちわ", comment: "in the 世界", - expect: 0x808, + flags: 0x808, + }, + { + name: "the replacement rune is �", + comment: "the replacement rune is �", + flags: 0x808, + }, + { + // Name is Japanese encoded in Shift JIS. + name: "\x93\xfa\x96{\x8c\xea.txt", + comment: "in the 世界", + flags: 0x008, // UTF-8 must not be set }, } @@ -123,6 +195,7 @@ func TestWriterUTF8(t *testing.T) { h := &FileHeader{ Name: test.name, Comment: test.comment, + NonUTF8: test.nonUTF8, Method: Deflate, } w, err := w.CreateHeader(h) @@ -142,18 +215,41 @@ func TestWriterUTF8(t *testing.T) { t.Fatal(err) } for i, test := range utf8Tests { - got := r.File[i].Flags - t.Logf("name %v, comment %v", test.name, test.comment) - if got != test.expect { - t.Fatalf("Flags: got %v, want %v", got, test.expect) + flags := r.File[i].Flags + if flags != test.flags { + t.Errorf("CreateHeader(name=%q comment=%q nonUTF8=%v): flags=%#x, want %#x", test.name, test.comment, test.nonUTF8, flags, test.flags) } } } +func TestWriterTime(t *testing.T) { + var buf bytes.Buffer + h := &FileHeader{ + Name: "test.txt", + Modified: time.Date(2017, 10, 31, 21, 11, 57, 0, timeZone(-7*time.Hour)), + } + w := NewWriter(&buf) + if _, err := w.CreateHeader(h); err != nil { + t.Fatalf("unexpected CreateHeader error: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("unexpected Close error: %v", err) + } + + want, err := ioutil.ReadFile("testdata/time-go.zip") + if err != nil { + t.Fatalf("unexpected ReadFile error: %v", err) + } + if got := buf.Bytes(); !bytes.Equal(got, want) { + fmt.Printf("%x\n%x\n", got, want) + t.Error("contents of time-go.zip differ") + } +} + func TestWriterOffset(t *testing.T) { largeData := make([]byte, 1<<17) - for i := range largeData { - largeData[i] = byte(rand.Int()) + if _, err := rand.Read(largeData); err != nil { + t.Fatal("rand.Read failed:", err) } writeTests[1].Data = largeData defer func() { @@ -225,7 +321,7 @@ func testReadFile(t *testing.T, f *File, wt *WriteTest) { if f.Name != wt.Name { t.Fatalf("File name: got %q, want %q", f.Name, wt.Name) } - testFileMode(t, wt.Name, f, wt.Mode) + testFileMode(t, f, wt.Mode) rc, err := f.Open() if err != nil { t.Fatal("opening:", err) diff --git a/src/archive/zip/zip_test.go b/src/archive/zip/zip_test.go index 18c2171ba6c..7e02cb0eeaa 100644 --- a/src/archive/zip/zip_test.go +++ b/src/archive/zip/zip_test.go @@ -645,16 +645,54 @@ func TestHeaderTooShort(t *testing.T) { h := FileHeader{ Name: "foo.txt", Method: Deflate, - Extra: []byte{zip64ExtraId}, // missing size and second half of tag, but Extra is best-effort parsing + Extra: []byte{zip64ExtraID}, // missing size and second half of tag, but Extra is best-effort parsing } testValidHeader(&h, t) } +func TestHeaderTooLongErr(t *testing.T) { + var headerTests = []struct { + name string + extra []byte + wanterr error + }{ + { + name: strings.Repeat("x", 1<<16), + extra: []byte{}, + wanterr: errLongName, + }, + { + name: "long_extra", + extra: bytes.Repeat([]byte{0xff}, 1<<16), + wanterr: errLongExtra, + }, + } + + // write a zip file + buf := new(bytes.Buffer) + w := NewWriter(buf) + + for _, test := range headerTests { + h := &FileHeader{ + Name: test.name, + Extra: test.extra, + } + _, err := w.CreateHeader(h) + if err != test.wanterr { + t.Errorf("error=%v, want %v", err, test.wanterr) + } + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } +} + func TestHeaderIgnoredSize(t *testing.T) { h := FileHeader{ Name: "foo.txt", Method: Deflate, - Extra: []byte{zip64ExtraId & 0xFF, zip64ExtraId >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted + Extra: []byte{zip64ExtraID & 0xFF, zip64ExtraID >> 8, 24, 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8}, // bad size but shouldn't be consulted } testValidHeader(&h, t) } diff --git a/src/bootstrap.bash b/src/bootstrap.bash index da3dff461f4..7b4f57461fc 100755 --- a/src/bootstrap.bash +++ b/src/bootstrap.bash @@ -14,6 +14,15 @@ # # Only changes that have been committed to Git (at least locally, # not necessary reviewed and submitted to master) are included in the tree. +# +# As a special case for Go's internal use only, if the +# BOOTSTRAP_FORMAT environment variable is set to "mintgz", the +# resulting archive is intended for use by the Go build system and +# differs in that the mintgz file: +# * is a tar.gz file instead of bz2 +# * has many unnecessary files deleted to reduce its size +# * does not have a shared directory component for each tar entry +# Do not depend on the mintgz format. set -e @@ -28,6 +37,11 @@ if [ -e $targ ]; then exit 2 fi +if [ "$BOOTSTRAP_FORMAT" != "mintgz" -a "$BOOTSTRAP_FORMAT" != "" ]; then + echo "unknown BOOTSTRAP_FORMAT format" + exit 2 +fi + unset GOROOT src=$(cd .. && pwd) echo "#### Copying to $targ" @@ -62,8 +76,36 @@ else rmdir bin/*_* rm -rf "pkg/${gohostos}_${gohostarch}" "pkg/tool/${gohostos}_${gohostarch}" fi + +GITREV=$(git rev-parse --short HEAD) rm -rf pkg/bootstrap pkg/obj .git +# Support for building minimal tar.gz for the builders. +# The build system doesn't support bzip2, and by deleting more stuff, +# they start faster, especially on machines without fast filesystems +# and things like tmpfs configures. +# Do not depend on this format. It's for internal use only. +if [ "$BOOTSTRAP_FORMAT" = "mintgz" ]; then + OUTGZ="gobootstrap-${GOOS}-${GOARCH}-${GITREV}.tar.gz" + echo "Preparing to generate build system's ${OUTGZ}; cleaning ..." + rm -rf bin/gofmt + rm -rf src/runtime/race/race_*.syso + rm -rf api test doc misc/cgo/test misc/trace + rm -rf pkg/tool/*_*/{addr2line,api,cgo,cover,doc,fix,nm,objdump,pack,pprof,test2json,trace,vet} + rm -rf pkg/*_*/{image,database,cmd} + rm -rf $(find . -type d -name testdata) + find . -type f -name '*_test.go' -exec rm {} \; + # git clean doesn't clean symlinks apparently, and the buildlet + # rejects them, so: + find . -type l -exec rm {} \; + + echo "Writing ${OUTGZ} ..." + tar cf - . | gzip -9 > ../$OUTGZ + cd .. + ls -l "$(pwd)/$OUTGZ" + exit 0 +fi + echo ---- echo Bootstrap toolchain for "$GOOS/$GOARCH" installed in "$(pwd)". echo Building tbz. diff --git a/src/bufio/bufio.go b/src/bufio/bufio.go index da94a2503f0..ad9c9f5ddf7 100644 --- a/src/bufio/bufio.go +++ b/src/bufio/bufio.go @@ -62,6 +62,9 @@ func NewReader(rd io.Reader) *Reader { return NewReaderSize(rd, defaultBufSize) } +// Size returns the size of the underlying buffer in bytes. +func (r *Reader) Size() int { return len(r.buf) } + // Reset discards any buffered data, resets all state, and switches // the buffered reader to read from r. func (b *Reader) Reset(r io.Reader) { @@ -548,6 +551,9 @@ func NewWriter(w io.Writer) *Writer { return NewWriterSize(w, defaultBufSize) } +// Size returns the size of the underlying buffer in bytes. +func (b *Writer) Size() int { return len(b.buf) } + // Reset discards any unflushed buffered data, clears any error, and // resets b to write its output to w. func (b *Writer) Reset(w io.Writer) { diff --git a/src/bufio/bufio_test.go b/src/bufio/bufio_test.go index ef0f6c834e8..c829d2b0648 100644 --- a/src/bufio/bufio_test.go +++ b/src/bufio/bufio_test.go @@ -1418,6 +1418,24 @@ func TestReaderDiscard(t *testing.T) { } +func TestReaderSize(t *testing.T) { + if got, want := NewReader(nil).Size(), DefaultBufSize; got != want { + t.Errorf("NewReader's Reader.Size = %d; want %d", got, want) + } + if got, want := NewReaderSize(nil, 1234).Size(), 1234; got != want { + t.Errorf("NewReaderSize's Reader.Size = %d; want %d", got, want) + } +} + +func TestWriterSize(t *testing.T) { + if got, want := NewWriter(nil).Size(), DefaultBufSize; got != want { + t.Errorf("NewWriter's Writer.Size = %d; want %d", got, want) + } + if got, want := NewWriterSize(nil, 1234).Size(), 1234; got != want { + t.Errorf("NewWriterSize's Writer.Size = %d; want %d", got, want) + } +} + // An onlyReader only implements io.Reader, no matter what other methods the underlying implementation may have. type onlyReader struct { io.Reader diff --git a/src/bufio/export_test.go b/src/bufio/export_test.go index 3d3bb27d8da..1667f01a841 100644 --- a/src/bufio/export_test.go +++ b/src/bufio/export_test.go @@ -11,6 +11,8 @@ import ( var IsSpace = isSpace +const DefaultBufSize = defaultBufSize + func (s *Scanner) MaxTokenSize(n int) { if n < utf8.UTFMax || n > 1e9 { panic("bad max token size") diff --git a/src/bufio/scan.go b/src/bufio/scan.go index 9f741c98307..40aaa4ab817 100644 --- a/src/bufio/scan.go +++ b/src/bufio/scan.go @@ -123,8 +123,9 @@ var ErrFinalToken = errors.New("final token") // After Scan returns false, the Err method will return any error that // occurred during scanning, except that if it was io.EOF, Err // will return nil. -// Scan panics if the split function returns 100 empty tokens without -// advancing the input. This is a common error mode for scanners. +// Scan panics if the split function returns too many empty +// tokens without advancing the input. This is a common error mode for +// scanners. func (s *Scanner) Scan() bool { if s.done { return false @@ -156,8 +157,8 @@ func (s *Scanner) Scan() bool { } else { // Returning tokens not advancing input at EOF. s.empties++ - if s.empties > 100 { - panic("bufio.Scan: 100 empty tokens without progressing") + if s.empties > maxConsecutiveEmptyReads { + panic("bufio.Scan: too many empty tokens without progressing") } } return true diff --git a/src/builtin/builtin.go b/src/builtin/builtin.go index 1c7c041d680..4578c855a9e 100644 --- a/src/builtin/builtin.go +++ b/src/builtin/builtin.go @@ -171,8 +171,9 @@ func cap(v Type) int // Slice: The size specifies the length. The capacity of the slice is // equal to its length. A second integer argument may be provided to // specify a different capacity; it must be no smaller than the -// length, so make([]int, 0, 10) allocates a slice of length 0 and -// capacity 10. +// length. For example, make([]int, 0, 10) allocates an underlying array +// of size 10 and returns a slice of length 0 and capacity 10 that is +// backed by this underlying array. // Map: An empty map is allocated with enough space to hold the // specified number of elements. The size may be omitted, in which case // a small starting size is allocated. diff --git a/src/bytes/boundary_test.go b/src/bytes/boundary_test.go new file mode 100644 index 00000000000..ea84f1e40fd --- /dev/null +++ b/src/bytes/boundary_test.go @@ -0,0 +1,84 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +// +build linux + +package bytes_test + +import ( + . "bytes" + "syscall" + "testing" +) + +// This file tests the situation where byte operations are checking +// data very near to a page boundary. We want to make sure those +// operations do not read across the boundary and cause a page +// fault where they shouldn't. + +// These tests run only on linux. The code being tested is +// not OS-specific, so it does not need to be tested on all +// operating systems. + +// dangerousSlice returns a slice which is immediately +// preceded and followed by a faulting page. +func dangerousSlice(t *testing.T) []byte { + pagesize := syscall.Getpagesize() + b, err := syscall.Mmap(0, 0, 3*pagesize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANONYMOUS|syscall.MAP_PRIVATE) + if err != nil { + t.Fatalf("mmap failed %s", err) + } + err = syscall.Mprotect(b[:pagesize], syscall.PROT_NONE) + if err != nil { + t.Fatalf("mprotect low failed %s\n", err) + } + err = syscall.Mprotect(b[2*pagesize:], syscall.PROT_NONE) + if err != nil { + t.Fatalf("mprotect high failed %s\n", err) + } + return b[pagesize : 2*pagesize] +} + +func TestEqualNearPageBoundary(t *testing.T) { + t.Parallel() + b := dangerousSlice(t) + for i := range b { + b[i] = 'A' + } + for i := 0; i <= len(b); i++ { + Equal(b[:i], b[len(b)-i:]) + Equal(b[len(b)-i:], b[:i]) + } +} + +func TestIndexByteNearPageBoundary(t *testing.T) { + t.Parallel() + b := dangerousSlice(t) + for i := range b { + idx := IndexByte(b[i:], 1) + if idx != -1 { + t.Fatalf("IndexByte(b[%d:])=%d, want -1\n", i, idx) + } + } +} + +func TestIndexNearPageBoundary(t *testing.T) { + t.Parallel() + var q [64]byte + b := dangerousSlice(t) + if len(b) > 256 { + // Only worry about when we're near the end of a page. + b = b[len(b)-256:] + } + for j := 1; j < len(q); j++ { + q[j-1] = 1 // difference is only found on the last byte + for i := range b { + idx := Index(b[i:], q[:j]) + if idx != -1 { + t.Fatalf("Index(b[%d:], q[:%d])=%d, want -1\n", i, j, idx) + } + } + q[j-1] = 0 + } +} diff --git a/src/bytes/buffer.go b/src/bytes/buffer.go index 20e42bbbbca..dc9d5e95d32 100644 --- a/src/bytes/buffer.go +++ b/src/bytes/buffer.go @@ -15,34 +15,37 @@ import ( // A Buffer is a variable-sized buffer of bytes with Read and Write methods. // The zero value for Buffer is an empty buffer ready to use. type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - lastRead readOp // last read operation, so that Unread* can work correctly. - // FIXME: lastRead can fit in a single byte + buf []byte // contents are the bytes buf[off : len(buf)] + off int // read at &buf[off], write at &buf[len(buf)] + bootstrap [64]byte // memory to hold first slice; helps small buffers avoid allocation. + lastRead readOp // last read operation, so that Unread* can work correctly. - // memory to hold first slice; helps small buffers avoid allocation. // FIXME: it would be advisable to align Buffer to cachelines to avoid false // sharing. - bootstrap [64]byte } // The readOp constants describe the last action performed on // the buffer, so that UnreadRune and UnreadByte can check for // invalid usage. opReadRuneX constants are chosen such that // converted to int they correspond to the rune size that was read. -type readOp int +type readOp int8 +// Don't use iota for these, as the values need to correspond with the +// names and comments, which is easier to see when being explicit. const ( opRead readOp = -1 // Any other read operation. - opInvalid = 0 // Non-read operation. - opReadRune1 = 1 // Read rune of size 1. - opReadRune2 = 2 // Read rune of size 2. - opReadRune3 = 3 // Read rune of size 3. - opReadRune4 = 4 // Read rune of size 4. + opInvalid readOp = 0 // Non-read operation. + opReadRune1 readOp = 1 // Read rune of size 1. + opReadRune2 readOp = 2 // Read rune of size 2. + opReadRune3 readOp = 3 // Read rune of size 3. + opReadRune4 readOp = 4 // Read rune of size 4. ) // ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. var ErrTooLarge = errors.New("bytes.Buffer: too large") +var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read") + +const maxInt = int(^uint(0) >> 1) // Bytes returns a slice of length b.Len() holding the unread portion of the buffer. // The slice is valid for use only until the next buffer modification (that is, @@ -53,6 +56,8 @@ func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } // String returns the contents of the unread portion of the buffer // as a string. If the Buffer is a nil pointer, it returns "". +// +// To build strings more efficiently, see the strings.Builder type. func (b *Buffer) String() string { if b == nil { // Special case, useful in debugging. @@ -61,6 +66,9 @@ func (b *Buffer) String() string { return string(b.buf[b.off:]) } +// empty returns whether the unread portion of the buffer is empty. +func (b *Buffer) empty() bool { return len(b.buf) <= b.off } + // Len returns the number of bytes of the unread portion of the buffer; // b.Len() == len(b.Bytes()). func (b *Buffer) Len() int { return len(b.buf) - b.off } @@ -81,7 +89,7 @@ func (b *Buffer) Truncate(n int) { if n < 0 || n > b.Len() { panic("bytes.Buffer: truncation out of range") } - b.buf = b.buf[0 : b.off+n] + b.buf = b.buf[:b.off+n] } // Reset resets the buffer to be empty, @@ -97,7 +105,7 @@ func (b *Buffer) Reset() { // internal buffer only needs to be resliced. // It returns the index where bytes should be written and whether it succeeded. func (b *Buffer) tryGrowByReslice(n int) (int, bool) { - if l := len(b.buf); l+n <= cap(b.buf) { + if l := len(b.buf); n <= cap(b.buf)-l { b.buf = b.buf[:l+n] return l, true } @@ -122,15 +130,18 @@ func (b *Buffer) grow(n int) int { b.buf = b.bootstrap[:n] return 0 } - if m+n <= cap(b.buf)/2 { + c := cap(b.buf) + if n <= c/2-m { // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but + // slice. We only need m+n <= c to slide, but // we instead let capacity get twice as large so we // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) + copy(b.buf, b.buf[b.off:]) + } else if c > maxInt-c-n { + panic(ErrTooLarge) } else { // Not enough space anywhere, we need to allocate. - buf := makeSlice(2*cap(b.buf) + n) + buf := makeSlice(2*c + n) copy(buf, b.buf[b.off:]) b.buf = buf } @@ -150,7 +161,7 @@ func (b *Buffer) Grow(n int) { panic("bytes.Buffer.Grow: negative count") } m := b.grow(n) - b.buf = b.buf[0:m] + b.buf = b.buf[:m] } // Write appends the contents of p to the buffer, growing the buffer as @@ -189,34 +200,22 @@ const MinRead = 512 // buffer becomes too large, ReadFrom will panic with ErrTooLarge. func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { b.lastRead = opInvalid - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Reset() - } for { - if free := cap(b.buf) - len(b.buf); free < MinRead { - // not enough space at end - newBuf := b.buf - if b.off+free < MinRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + MinRead) - } - copy(newBuf, b.buf[b.off:]) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 + i := b.grow(MinRead) + m, e := r.Read(b.buf[i:cap(b.buf)]) + if m < 0 { + panic(errNegativeRead) } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] + + b.buf = b.buf[:i+m] n += int64(m) if e == io.EOF { - break + return n, nil // e is EOF, so return nil explicitly } if e != nil { return n, e } } - return n, nil // err is EOF, so return nil explicitly } // makeSlice allocates a slice of size n. If the allocation fails, it panics @@ -237,8 +236,7 @@ func makeSlice(n int) []byte { // encountered during the write is also returned. func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { b.lastRead = opInvalid - if b.off < len(b.buf) { - nBytes := b.Len() + if nBytes := b.Len(); nBytes > 0 { m, e := w.Write(b.buf[b.off:]) if m > nBytes { panic("bytes.Buffer.WriteTo: invalid Write count") @@ -256,7 +254,7 @@ func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { } // Buffer is now empty; reset. b.Reset() - return + return n, nil } // WriteByte appends the byte c to the buffer, growing the buffer as needed. @@ -298,11 +296,11 @@ func (b *Buffer) WriteRune(r rune) (n int, err error) { // otherwise it is nil. func (b *Buffer) Read(p []byte) (n int, err error) { b.lastRead = opInvalid - if b.off >= len(b.buf) { + if b.empty() { // Buffer is empty, reset to recover space. b.Reset() if len(p) == 0 { - return + return 0, nil } return 0, io.EOF } @@ -311,7 +309,7 @@ func (b *Buffer) Read(p []byte) (n int, err error) { if n > 0 { b.lastRead = opRead } - return + return n, nil } // Next returns a slice containing the next n bytes from the buffer, @@ -335,8 +333,7 @@ func (b *Buffer) Next(n int) []byte { // ReadByte reads and returns the next byte from the buffer. // If no byte is available, it returns error io.EOF. func (b *Buffer) ReadByte() (byte, error) { - b.lastRead = opInvalid - if b.off >= len(b.buf) { + if b.empty() { // Buffer is empty, reset to recover space. b.Reset() return 0, io.EOF @@ -353,8 +350,7 @@ func (b *Buffer) ReadByte() (byte, error) { // If the bytes are an erroneous UTF-8 encoding, it // consumes one byte and returns U+FFFD, 1. func (b *Buffer) ReadRune() (r rune, size int, err error) { - b.lastRead = opInvalid - if b.off >= len(b.buf) { + if b.empty() { // Buffer is empty, reset to recover space. b.Reset() return 0, 0, io.EOF @@ -413,7 +409,7 @@ func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { // return a copy of slice. The buffer's backing array may // be overwritten by later calls. line = append(line, slice...) - return + return line, err } // readSlice is like ReadBytes but returns a reference to internal buffer data. diff --git a/src/bytes/buffer_test.go b/src/bytes/buffer_test.go index ce2f01a0ad3..e4bbc12f6a1 100644 --- a/src/bytes/buffer_test.go +++ b/src/bytes/buffer_test.go @@ -6,25 +6,27 @@ package bytes_test import ( . "bytes" - "internal/testenv" "io" "math/rand" - "os/exec" "runtime" "testing" "unicode/utf8" ) -const N = 10000 // make this bigger for a larger (and slower) test -var data string // test data for write tests -var testBytes []byte // test data; same as data but as a slice. +const N = 10000 // make this bigger for a larger (and slower) test +var testString string // test data for write tests +var testBytes []byte // test data; same as testString but as a slice. + +type negativeReader struct{} + +func (r *negativeReader) Read([]byte) (int, error) { return -1, nil } func init() { testBytes = make([]byte, N) for i := 0; i < N; i++ { testBytes[i] = 'a' + byte(i%26) } - data = string(testBytes) + testString = string(testBytes) } // Verify that contents of buf match the string s. @@ -88,12 +90,12 @@ func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub func TestNewBuffer(t *testing.T) { buf := NewBuffer(testBytes) - check(t, "NewBuffer", buf, data) + check(t, "NewBuffer", buf, testString) } func TestNewBufferString(t *testing.T) { - buf := NewBufferString(data) - check(t, "NewBufferString", buf, data) + buf := NewBufferString(testString) + check(t, "NewBufferString", buf, testString) } // Empty buf through repeated reads into fub. @@ -128,7 +130,7 @@ func TestBasicOperations(t *testing.T) { buf.Truncate(0) check(t, "TestBasicOperations (3)", &buf, "") - n, err := buf.Write([]byte(data[0:1])) + n, err := buf.Write(testBytes[0:1]) if n != 1 { t.Errorf("wrote 1 byte, but n == %d", n) } @@ -137,30 +139,30 @@ func TestBasicOperations(t *testing.T) { } check(t, "TestBasicOperations (4)", &buf, "a") - buf.WriteByte(data[1]) + buf.WriteByte(testString[1]) check(t, "TestBasicOperations (5)", &buf, "ab") - n, err = buf.Write([]byte(data[2:26])) + n, err = buf.Write(testBytes[2:26]) if n != 24 { - t.Errorf("wrote 25 bytes, but n == %d", n) + t.Errorf("wrote 24 bytes, but n == %d", n) } - check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) + check(t, "TestBasicOperations (6)", &buf, testString[0:26]) buf.Truncate(26) - check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) + check(t, "TestBasicOperations (7)", &buf, testString[0:26]) buf.Truncate(20) - check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) + check(t, "TestBasicOperations (8)", &buf, testString[0:20]) - empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) + empty(t, "TestBasicOperations (9)", &buf, testString[0:20], make([]byte, 5)) empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) - buf.WriteByte(data[1]) + buf.WriteByte(testString[1]) c, err := buf.ReadByte() if err != nil { t.Error("ReadByte unexpected eof") } - if c != data[1] { + if c != testString[1] { t.Errorf("ReadByte wrong value c=%v", c) } c, err = buf.ReadByte() @@ -177,8 +179,8 @@ func TestLargeStringWrites(t *testing.T) { limit = 9 } for i := 3; i < limit; i += 3 { - s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, data) - empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(data)/i)) + s := fillString(t, "TestLargeWrites (1)", &buf, "", 5, testString) + empty(t, "TestLargeStringWrites (2)", &buf, s, make([]byte, len(testString)/i)) } check(t, "TestLargeStringWrites (3)", &buf, "") } @@ -191,7 +193,7 @@ func TestLargeByteWrites(t *testing.T) { } for i := 3; i < limit; i += 3 { s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) - empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) + empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(testString)/i)) } check(t, "TestLargeByteWrites (3)", &buf, "") } @@ -199,8 +201,8 @@ func TestLargeByteWrites(t *testing.T) { func TestLargeStringReads(t *testing.T) { var buf Buffer for i := 3; i < 30; i += 3 { - s := fillString(t, "TestLargeReads (1)", &buf, "", 5, data[0:len(data)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + s := fillString(t, "TestLargeReads (1)", &buf, "", 5, testString[0:len(testString)/i]) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString))) } check(t, "TestLargeStringReads (3)", &buf, "") } @@ -209,7 +211,7 @@ func TestLargeByteReads(t *testing.T) { var buf Buffer for i := 3; i < 30; i += 3 { s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) - empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(testString))) } check(t, "TestLargeByteReads (3)", &buf, "") } @@ -218,14 +220,14 @@ func TestMixedReadsAndWrites(t *testing.T) { var buf Buffer s := "" for i := 0; i < 50; i++ { - wlen := rand.Intn(len(data)) + wlen := rand.Intn(len(testString)) if i%2 == 0 { - s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, data[0:wlen]) + s = fillString(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testString[0:wlen]) } else { s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) } - rlen := rand.Intn(len(data)) + rlen := rand.Intn(len(testString)) fub := make([]byte, rlen) n, _ := buf.Read(fub) s = s[n:] @@ -263,17 +265,37 @@ func TestReadFrom(t *testing.T) { s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) var b Buffer b.ReadFrom(&buf) - empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) + empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(testString))) } } +func TestReadFromNegativeReader(t *testing.T) { + var b Buffer + defer func() { + switch err := recover().(type) { + case nil: + t.Fatal("bytes.Buffer.ReadFrom didn't panic") + case error: + // this is the error string of errNegativeRead + wantError := "bytes.Buffer: reader returned negative count from Read" + if err.Error() != wantError { + t.Fatalf("recovered panic: got %v, want %v", err.Error(), wantError) + } + default: + t.Fatalf("unexpected panic value: %#v", err) + } + }() + + b.ReadFrom(new(negativeReader)) +} + func TestWriteTo(t *testing.T) { var buf Buffer for i := 3; i < 30; i += 3 { s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) var b Buffer buf.WriteTo(&b) - empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) + empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(testString))) } } @@ -473,6 +495,18 @@ func TestGrow(t *testing.T) { } } +func TestGrowOverflow(t *testing.T) { + defer func() { + if err := recover(); err != ErrTooLarge { + t.Errorf("after too-large Grow, recover() = %v; want %v", err, ErrTooLarge) + } + }() + + buf := NewBuffer(make([]byte, 1)) + const maxInt = int(^uint(0) >> 1) + buf.Grow(maxInt) +} + // Was a bug: used to give EOF reading empty slice at EOF. func TestReadEmptyAtEOF(t *testing.T) { b := new(Buffer) @@ -548,26 +582,6 @@ func TestBufferGrowth(t *testing.T) { } } -// Test that tryGrowByReslice is inlined. -// Only execute on "linux-amd64" builder in order to avoid breakage. -func TestTryGrowByResliceInlined(t *testing.T) { - targetBuilder := "linux-amd64" - if testenv.Builder() != targetBuilder { - t.Skipf("%q gets executed on %q builder only", t.Name(), targetBuilder) - } - t.Parallel() - goBin := testenv.GoToolPath(t) - out, err := exec.Command(goBin, "tool", "nm", goBin).CombinedOutput() - if err != nil { - t.Fatalf("go tool nm: %v: %s", err, out) - } - // Verify this doesn't exist: - sym := "bytes.(*Buffer).tryGrowByReslice" - if Contains(out, []byte(sym)) { - t.Errorf("found symbol %q in cmd/go, but should be inlined", sym) - } -} - func BenchmarkWriteByte(b *testing.B) { const n = 4 << 10 b.SetBytes(n) diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go index 7c878af688c..9af177fa882 100644 --- a/src/bytes/bytes.go +++ b/src/bytes/bytes.go @@ -39,7 +39,7 @@ func explode(s []byte, n int) [][]byte { break } _, size = utf8.DecodeRune(s) - a[na] = s[0:size] + a[na] = s[0:size:size] s = s[size:] na++ } @@ -68,12 +68,12 @@ func Contains(b, subslice []byte) bool { return Index(b, subslice) != -1 } -// ContainsAny reports whether any of the UTF-8-encoded Unicode code points in chars are within b. +// ContainsAny reports whether any of the UTF-8-encoded code points in chars are within b. func ContainsAny(b []byte, chars string) bool { return IndexAny(b, chars) >= 0 } -// ContainsRune reports whether the Unicode code point r is within b. +// ContainsRune reports whether the rune is contained in the UTF-8-encoded byte slice b. func ContainsRune(b []byte, r rune) bool { return IndexRune(b, r) >= 0 } @@ -112,7 +112,7 @@ func LastIndexByte(s []byte, c byte) int { return -1 } -// IndexRune interprets s as a sequence of UTF-8-encoded Unicode code points. +// IndexRune interprets s as a sequence of UTF-8-encoded code points. // It returns the byte index of the first occurrence in s of the given rune. // It returns -1 if rune is not present in s. // If r is utf8.RuneError, it returns the first instance of any @@ -144,30 +144,32 @@ func IndexRune(s []byte, r rune) int { // code points in chars. It returns -1 if chars is empty or if there is no code // point in common. func IndexAny(s []byte, chars string) int { - if len(chars) > 0 { - if len(s) > 8 { - if as, isASCII := makeASCIISet(chars); isASCII { - for i, c := range s { - if as.contains(c) { - return i - } - } - return -1 - } - } - var width int - for i := 0; i < len(s); i += width { - r := rune(s[i]) - if r < utf8.RuneSelf { - width = 1 - } else { - r, width = utf8.DecodeRune(s[i:]) - } - for _, ch := range chars { - if r == ch { + if chars == "" { + // Avoid scanning all of s. + return -1 + } + if len(s) > 8 { + if as, isASCII := makeASCIISet(chars); isASCII { + for i, c := range s { + if as.contains(c) { return i } } + return -1 + } + } + var width int + for i := 0; i < len(s); i += width { + r := rune(s[i]) + if r < utf8.RuneSelf { + width = 1 + } else { + r, width = utf8.DecodeRune(s[i:]) + } + for _, ch := range chars { + if r == ch { + return i + } } } return -1 @@ -178,25 +180,27 @@ func IndexAny(s []byte, chars string) int { // the Unicode code points in chars. It returns -1 if chars is empty or if // there is no code point in common. func LastIndexAny(s []byte, chars string) int { - if len(chars) > 0 { - if len(s) > 8 { - if as, isASCII := makeASCIISet(chars); isASCII { - for i := len(s) - 1; i >= 0; i-- { - if as.contains(s[i]) { - return i - } - } - return -1 - } - } - for i := len(s); i > 0; { - r, size := utf8.DecodeLastRune(s[:i]) - i -= size - for _, c := range chars { - if r == c { + if chars == "" { + // Avoid scanning all of s. + return -1 + } + if len(s) > 8 { + if as, isASCII := makeASCIISet(chars); isASCII { + for i := len(s) - 1; i >= 0; i-- { + if as.contains(s[i]) { return i } } + return -1 + } + } + for i := len(s); i > 0; { + r, size := utf8.DecodeLastRune(s[:i]) + i -= size + for _, c := range chars { + if r == c { + return i + } } } return -1 @@ -223,7 +227,7 @@ func genSplit(s, sep []byte, sepSave, n int) [][]byte { if m < 0 { break } - a[i] = s[:m+sepSave] + a[i] = s[: m+sepSave : m+sepSave] s = s[m+len(sep):] i++ } @@ -265,52 +269,112 @@ func SplitAfter(s, sep []byte) [][]byte { return genSplit(s, sep, len(sep), -1) } -// Fields splits the slice s around each instance of one or more consecutive white space -// characters, returning a slice of subslices of s or an empty list if s contains only white space. +var asciiSpace = [256]uint8{'\t': 1, '\n': 1, '\v': 1, '\f': 1, '\r': 1, ' ': 1} + +// Fields interprets s as a sequence of UTF-8-encoded code points. +// It splits the slice s around each instance of one or more consecutive white space +// characters, as defined by unicode.IsSpace, returning a slice of subslices of s or an +// empty slice if s contains only white space. func Fields(s []byte) [][]byte { - return FieldsFunc(s, unicode.IsSpace) + // First count the fields. + // This is an exact count if s is ASCII, otherwise it is an approximation. + n := 0 + wasSpace := 1 + // setBits is used to track which bits are set in the bytes of s. + setBits := uint8(0) + for i := 0; i < len(s); i++ { + r := s[i] + setBits |= r + isSpace := int(asciiSpace[r]) + n += wasSpace & ^isSpace + wasSpace = isSpace + } + + if setBits >= utf8.RuneSelf { + // Some runes in the input slice are not ASCII. + return FieldsFunc(s, unicode.IsSpace) + } + + // ASCII fast path + a := make([][]byte, n) + na := 0 + fieldStart := 0 + i := 0 + // Skip spaces in the front of the input. + for i < len(s) && asciiSpace[s[i]] != 0 { + i++ + } + fieldStart = i + for i < len(s) { + if asciiSpace[s[i]] == 0 { + i++ + continue + } + a[na] = s[fieldStart:i:i] + na++ + i++ + // Skip spaces in between fields. + for i < len(s) && asciiSpace[s[i]] != 0 { + i++ + } + fieldStart = i + } + if fieldStart < len(s) { // Last field might end at EOF. + a[na] = s[fieldStart:len(s):len(s)] + } + return a } -// FieldsFunc interprets s as a sequence of UTF-8-encoded Unicode code points. +// FieldsFunc interprets s as a sequence of UTF-8-encoded code points. // It splits the slice s at each run of code points c satisfying f(c) and // returns a slice of subslices of s. If all code points in s satisfy f(c), or // len(s) == 0, an empty slice is returned. // FieldsFunc makes no guarantees about the order in which it calls f(c). // If f does not return consistent results for a given c, FieldsFunc may crash. func FieldsFunc(s []byte, f func(rune) bool) [][]byte { - n := 0 - inField := false + // A span is used to record a slice of s of the form s[start:end]. + // The start index is inclusive and the end index is exclusive. + type span struct { + start int + end int + } + spans := make([]span, 0, 32) + + // Find the field start and end indices. + wasField := false + fromIndex := 0 for i := 0; i < len(s); { - r, size := utf8.DecodeRune(s[i:]) - wasInField := inField - inField = !f(r) - if inField && !wasInField { - n++ + size := 1 + r := rune(s[i]) + if r >= utf8.RuneSelf { + r, size = utf8.DecodeRune(s[i:]) + } + if f(r) { + if wasField { + spans = append(spans, span{start: fromIndex, end: i}) + wasField = false + } + } else { + if !wasField { + fromIndex = i + wasField = true + } } i += size } - a := make([][]byte, n) - na := 0 - fieldStart := -1 - for i := 0; i <= len(s) && na < n; { - r, size := utf8.DecodeRune(s[i:]) - if fieldStart < 0 && size > 0 && !f(r) { - fieldStart = i - i += size - continue - } - if fieldStart >= 0 && (size == 0 || f(r)) { - a[na] = s[fieldStart:i] - na++ - fieldStart = -1 - } - if size == 0 { - break - } - i += size + // Last field might end at EOF. + if wasField { + spans = append(spans, span{fromIndex, len(s)}) } - return a[0:na] + + // Create subslices from recorded field indices. + a := make([][]byte, len(spans)) + for i, span := range spans { + a[i] = s[span.start:span.end:span.end] + } + + return a } // Join concatenates the elements of s to create a new byte slice. The separator @@ -349,8 +413,8 @@ func HasSuffix(s, suffix []byte) bool { // Map returns a copy of the byte slice s with all its characters modified // according to the mapping function. If mapping returns a negative value, the character is -// dropped from the string with no replacement. The characters in s and the -// output are interpreted as UTF-8-encoded Unicode code points. +// dropped from the byte slice with no replacement. The characters in s and the +// output are interpreted as UTF-8-encoded code points. func Map(mapping func(r rune) rune, s []byte) []byte { // In the worst case, the slice can grow when mapped, making // things unpleasant. But it's so rare we barge in assuming it's @@ -408,28 +472,28 @@ func Repeat(b []byte, count int) []byte { return nb } -// ToUpper returns a copy of the byte slice s with all Unicode letters mapped to their upper case. +// ToUpper treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters within it mapped to their upper case. func ToUpper(s []byte) []byte { return Map(unicode.ToUpper, s) } -// ToLower returns a copy of the byte slice s with all Unicode letters mapped to their lower case. +// ToLower treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their lower case. func ToLower(s []byte) []byte { return Map(unicode.ToLower, s) } -// ToTitle returns a copy of the byte slice s with all Unicode letters mapped to their title case. +// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case. func ToTitle(s []byte) []byte { return Map(unicode.ToTitle, s) } -// ToUpperSpecial returns a copy of the byte slice s with all Unicode letters mapped to their +// ToUpperSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their // upper case, giving priority to the special casing rules. func ToUpperSpecial(c unicode.SpecialCase, s []byte) []byte { return Map(func(r rune) rune { return c.ToUpper(r) }, s) } -// ToLowerSpecial returns a copy of the byte slice s with all Unicode letters mapped to their +// ToLowerSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their // lower case, giving priority to the special casing rules. func ToLowerSpecial(c unicode.SpecialCase, s []byte) []byte { return Map(func(r rune) rune { return c.ToLower(r) }, s) } -// ToTitleSpecial returns a copy of the byte slice s with all Unicode letters mapped to their +// ToTitleSpecial treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their // title case, giving priority to the special casing rules. func ToTitleSpecial(c unicode.SpecialCase, s []byte) []byte { return Map(func(r rune) rune { return c.ToTitle(r) }, s) @@ -460,8 +524,8 @@ func isSeparator(r rune) bool { return unicode.IsSpace(r) } -// Title returns a copy of s with all Unicode letters that begin words -// mapped to their title case. +// Title treats s as UTF-8-encoded bytes and returns a copy with all Unicode letters that begin +// words mapped to their title case. // // BUG(rsc): The rule Title uses for word boundaries does not handle Unicode punctuation properly. func Title(s []byte) []byte { @@ -481,8 +545,8 @@ func Title(s []byte) []byte { s) } -// TrimLeftFunc returns a subslice of s by slicing off all leading UTF-8-encoded -// Unicode code points c that satisfy f(c). +// TrimLeftFunc treats s as UTF-8-encoded bytes and returns a subslice of s by slicing off +// all leading UTF-8-encoded code points c that satisfy f(c). func TrimLeftFunc(s []byte, f func(r rune) bool) []byte { i := indexFunc(s, f, false) if i == -1 { @@ -491,8 +555,8 @@ func TrimLeftFunc(s []byte, f func(r rune) bool) []byte { return s[i:] } -// TrimRightFunc returns a subslice of s by slicing off all trailing UTF-8 -// encoded Unicode code points c that satisfy f(c). +// TrimRightFunc returns a subslice of s by slicing off all trailing +// UTF-8-encoded code points c that satisfy f(c). func TrimRightFunc(s []byte, f func(r rune) bool) []byte { i := lastIndexFunc(s, f, false) if i >= 0 && s[i] >= utf8.RuneSelf { @@ -505,7 +569,7 @@ func TrimRightFunc(s []byte, f func(r rune) bool) []byte { } // TrimFunc returns a subslice of s by slicing off all leading and trailing -// UTF-8-encoded Unicode code points c that satisfy f(c). +// UTF-8-encoded code points c that satisfy f(c). func TrimFunc(s []byte, f func(r rune) bool) []byte { return TrimRightFunc(TrimLeftFunc(s, f), f) } @@ -528,14 +592,14 @@ func TrimSuffix(s, suffix []byte) []byte { return s } -// IndexFunc interprets s as a sequence of UTF-8-encoded Unicode code points. +// IndexFunc interprets s as a sequence of UTF-8-encoded code points. // It returns the byte index in s of the first Unicode // code point satisfying f(c), or -1 if none do. func IndexFunc(s []byte, f func(r rune) bool) int { return indexFunc(s, f, true) } -// LastIndexFunc interprets s as a sequence of UTF-8-encoded Unicode code points. +// LastIndexFunc interprets s as a sequence of UTF-8-encoded code points. // It returns the byte index in s of the last Unicode // code point satisfying f(c), or -1 if none do. func LastIndexFunc(s []byte, f func(r rune) bool) int { @@ -626,19 +690,19 @@ func makeCutsetFunc(cutset string) func(r rune) bool { } // Trim returns a subslice of s by slicing off all leading and -// trailing UTF-8-encoded Unicode code points contained in cutset. +// trailing UTF-8-encoded code points contained in cutset. func Trim(s []byte, cutset string) []byte { return TrimFunc(s, makeCutsetFunc(cutset)) } // TrimLeft returns a subslice of s by slicing off all leading -// UTF-8-encoded Unicode code points contained in cutset. +// UTF-8-encoded code points contained in cutset. func TrimLeft(s []byte, cutset string) []byte { return TrimLeftFunc(s, makeCutsetFunc(cutset)) } // TrimRight returns a subslice of s by slicing off all trailing -// UTF-8-encoded Unicode code points that are contained in cutset. +// UTF-8-encoded code points that are contained in cutset. func TrimRight(s []byte, cutset string) []byte { return TrimRightFunc(s, makeCutsetFunc(cutset)) } @@ -649,7 +713,8 @@ func TrimSpace(s []byte) []byte { return TrimFunc(s, unicode.IsSpace) } -// Runes returns a slice of runes (Unicode code points) equivalent to s. +// Runes interprets s as a sequence of UTF-8-encoded code points. +// It returns a slice of runes (Unicode code points) equivalent to s. func Runes(s []byte) []rune { t := make([]rune, utf8.RuneCount(s)) i := 0 @@ -758,3 +823,46 @@ func EqualFold(s, t []byte) bool { // One string is empty. Are both? return len(s) == len(t) } + +func indexRabinKarp(s, sep []byte) int { + // Rabin-Karp search + hashsep, pow := hashStr(sep) + n := len(sep) + var h uint32 + for i := 0; i < n; i++ { + h = h*primeRK + uint32(s[i]) + } + if h == hashsep && Equal(s[:n], sep) { + return 0 + } + for i := n; i < len(s); { + h *= primeRK + h += uint32(s[i]) + h -= pow * uint32(s[i-n]) + i++ + if h == hashsep && Equal(s[i-n:i], sep) { + return i - n + } + } + return -1 +} + +// primeRK is the prime base used in Rabin-Karp algorithm. +const primeRK = 16777619 + +// hashStr returns the hash and the appropriate multiplicative +// factor for use in Rabin-Karp algorithm. +func hashStr(sep []byte) (uint32, uint32) { + hash := uint32(0) + for i := 0; i < len(sep); i++ { + hash = hash*primeRK + uint32(sep[i]) + } + var pow, sq uint32 = 1, primeRK + for i := len(sep); i > 0; i >>= 1 { + if i&1 != 0 { + pow *= sq + } + sq *= sq + } + return hash, pow +} diff --git a/src/bytes/bytes_amd64.go b/src/bytes/bytes_amd64.go index 77d5970152a..0c9d613ef9d 100644 --- a/src/bytes/bytes_amd64.go +++ b/src/bytes/bytes_amd64.go @@ -75,52 +75,14 @@ func Index(s, sep []byte) int { } return -1 } - // Rabin-Karp search - hashsep, pow := hashStr(sep) - var h uint32 - for i := 0; i < n; i++ { - h = h*primeRK + uint32(s[i]) - } - if h == hashsep && Equal(s[:n], sep) { - return 0 - } - for i := n; i < len(s); { - h *= primeRK - h += uint32(s[i]) - h -= pow * uint32(s[i-n]) - i++ - if h == hashsep && Equal(s[i-n:i], sep) { - return i - n - } - } - return -1 + return indexRabinKarp(s, sep) } // Count counts the number of non-overlapping instances of sep in s. -// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s. +// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s. func Count(s, sep []byte) int { if len(sep) == 1 && cpu.X86.HasPOPCNT { return countByte(s, sep[0]) } return countGeneric(s, sep) } - -// primeRK is the prime base used in Rabin-Karp algorithm. -const primeRK = 16777619 - -// hashStr returns the hash and the appropriate multiplicative -// factor for use in Rabin-Karp algorithm. -func hashStr(sep []byte) (uint32, uint32) { - hash := uint32(0) - for i := 0; i < len(sep); i++ { - hash = hash*primeRK + uint32(sep[i]) - } - var pow, sq uint32 = 1, primeRK - for i := len(sep); i > 0; i >>= 1 { - if i&1 != 0 { - pow *= sq - } - sq *= sq - } - return hash, pow -} diff --git a/src/bytes/bytes_arm64.go b/src/bytes/bytes_arm64.go new file mode 100644 index 00000000000..846eeba486a --- /dev/null +++ b/src/bytes/bytes_arm64.go @@ -0,0 +1,68 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bytes + +func countByte(s []byte, c byte) int // bytes_arm64.s + +// Index returns the index of the first instance of sep in s, or -1 if sep is not present in s. +func Index(s, sep []byte) int { + n := len(sep) + switch { + case n == 0: + return 0 + case n == 1: + return IndexByte(s, sep[0]) + case n == len(s): + if Equal(sep, s) { + return 0 + } + return -1 + case n > len(s): + return -1 + } + c := sep[0] + i := 0 + fails := 0 + t := s[:len(s)-n+1] + for i < len(t) { + if t[i] != c { + o := IndexByte(t[i:], c) + if o < 0 { + break + } + i += o + } + if Equal(s[i:i+n], sep) { + return i + } + i++ + fails++ + if fails >= 4+i>>4 && i < len(t) { + // Give up on IndexByte, it isn't skipping ahead + // far enough to be better than Rabin-Karp. + // Experiments (using IndexPeriodic) suggest + // the cutover is about 16 byte skips. + // TODO: if large prefixes of sep are matching + // we should cutover at even larger average skips, + // because Equal becomes that much more expensive. + // This code does not take that effect into account. + j := indexRabinKarp(s[i:], sep) + if j < 0 { + return -1 + } + return i + j + } + } + return -1 +} + +// Count counts the number of non-overlapping instances of sep in s. +// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s. +func Count(s, sep []byte) int { + if len(sep) == 1 { + return countByte(s, sep[0]) + } + return countGeneric(s, sep) +} diff --git a/src/bytes/bytes_arm64.s b/src/bytes/bytes_arm64.s new file mode 100644 index 00000000000..5e229d772be --- /dev/null +++ b/src/bytes/bytes_arm64.s @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// countByte(s []byte, c byte) int +TEXT bytes·countByte(SB),NOSPLIT,$0-40 + MOVD s_base+0(FP), R0 + MOVD s_len+8(FP), R2 + MOVBU c+24(FP), R1 + // R11 = count of byte to search + MOVD $0, R11 + // short path to handle 0-byte case + CBZ R2, done + CMP $0x20, R2 + // jump directly to tail if length < 32 + BLO tail + ANDS $0x1f, R0, R9 + BEQ chunk + // Work with not 32-byte aligned head + BIC $0x1f, R0, R3 + ADD $0x20, R3 +head_loop: + MOVBU.P 1(R0), R5 + CMP R5, R1 + CINC EQ, R11, R11 + SUB $1, R2, R2 + CMP R0, R3 + BNE head_loop + // Work with 32-byte aligned chunks +chunk: + BIC $0x1f, R2, R9 + // The first chunk can also be the last + CBZ R9, tail + // R3 = end of 32-byte chunks + ADD R0, R9, R3 + MOVD $1, R5 + VMOV R5, V5.B16 + // R2 = length of tail + SUB R9, R2, R2 + // Duplicate R1 (byte to search) to 16 1-byte elements of V0 + VMOV R1, V0.B16 + // Clear the low 64-bit element of V7 and V8 + VEOR V7.B8, V7.B8, V7.B8 + VEOR V8.B8, V8.B8, V8.B8 + // Count the target byte in 32-byte chunk +chunk_loop: + VLD1.P (R0), [V1.B16, V2.B16] + CMP R0, R3 + VCMEQ V0.B16, V1.B16, V3.B16 + VCMEQ V0.B16, V2.B16, V4.B16 + // Clear the higher 7 bits + VAND V5.B16, V3.B16, V3.B16 + VAND V5.B16, V4.B16, V4.B16 + // Count lanes match the requested byte + VADDP V4.B16, V3.B16, V6.B16 // 32B->16B + VUADDLV V6.B16, V7 + // Accumulate the count in low 64-bit element of V8 when inside the loop + VADD V7, V8 + BNE chunk_loop + VMOV V8.D[0], R6 + ADD R6, R11, R11 + CBZ R2, done +tail: + // Work with tail shorter than 32 bytes + MOVBU.P 1(R0), R5 + SUB $1, R2, R2 + CMP R5, R1 + CINC EQ, R11, R11 + CBNZ R2, tail +done: + MOVD R11, ret+32(FP) + RET diff --git a/src/bytes/bytes_generic.go b/src/bytes/bytes_generic.go index 98454bc121b..0e7d33f09ad 100644 --- a/src/bytes/bytes_generic.go +++ b/src/bytes/bytes_generic.go @@ -2,27 +2,29 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!s390x +// +build !amd64,!s390x,!arm64 package bytes -// TODO: implements short string optimization on non amd64 platforms -// and get rid of bytes_amd64.go - // Index returns the index of the first instance of sep in s, or -1 if sep is not present in s. func Index(s, sep []byte) int { n := len(sep) - if n == 0 { + switch { + case n == 0: return 0 - } - if n > len(s) { + case n == 1: + return IndexByte(s, sep[0]) + case n == len(s): + if Equal(sep, s) { + return 0 + } + return -1 + case n > len(s): return -1 } c := sep[0] - if n == 1 { - return IndexByte(s, c) - } i := 0 + fails := 0 t := s[:len(s)-n+1] for i < len(t) { if t[i] != c { @@ -36,12 +38,28 @@ func Index(s, sep []byte) int { return i } i++ + fails++ + if fails >= 4+i>>4 && i < len(t) { + // Give up on IndexByte, it isn't skipping ahead + // far enough to be better than Rabin-Karp. + // Experiments (using IndexPeriodic) suggest + // the cutover is about 16 byte skips. + // TODO: if large prefixes of sep are matching + // we should cutover at even larger average skips, + // because Equal becomes that much more expensive. + // This code does not take that effect into account. + j := indexRabinKarp(s[i:], sep) + if j < 0 { + return -1 + } + return i + j + } } return -1 } // Count counts the number of non-overlapping instances of sep in s. -// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s. +// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s. func Count(s, sep []byte) int { return countGeneric(s, sep) } diff --git a/src/bytes/bytes_s390x.go b/src/bytes/bytes_s390x.go index 68b57301fe8..c59b891292f 100644 --- a/src/bytes/bytes_s390x.go +++ b/src/bytes/bytes_s390x.go @@ -76,49 +76,11 @@ func Index(s, sep []byte) int { } return -1 } - // Rabin-Karp search - hashsep, pow := hashStr(sep) - var h uint32 - for i := 0; i < n; i++ { - h = h*primeRK + uint32(s[i]) - } - if h == hashsep && Equal(s[:n], sep) { - return 0 - } - for i := n; i < len(s); { - h *= primeRK - h += uint32(s[i]) - h -= pow * uint32(s[i-n]) - i++ - if h == hashsep && Equal(s[i-n:i], sep) { - return i - n - } - } - return -1 + return indexRabinKarp(s, sep) } // Count counts the number of non-overlapping instances of sep in s. -// If sep is an empty slice, Count returns 1 + the number of Unicode code points in s. +// If sep is an empty slice, Count returns 1 + the number of UTF-8-encoded code points in s. func Count(s, sep []byte) int { return countGeneric(s, sep) } - -// primeRK is the prime base used in Rabin-Karp algorithm. -const primeRK = 16777619 - -// hashStr returns the hash and the appropriate multiplicative -// factor for use in Rabin-Karp algorithm. -func hashStr(sep []byte) (uint32, uint32) { - hash := uint32(0) - for i := 0; i < len(sep); i++ { - hash = hash*primeRK + uint32(sep[i]) - } - var pow, sq uint32 = 1, primeRK - for i := len(sep); i > 0; i >>= 1 { - if i&1 != 0 { - pow *= sq - } - sq *= sq - } - return hash, pow -} diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go index ca0cdbb7c9f..1e56571c738 100644 --- a/src/bytes/bytes_test.go +++ b/src/bytes/bytes_test.go @@ -139,6 +139,9 @@ var indexTests = []BinOpTest{ {"barfoobarfooyyyzzzyyyzzzyyyzzzyyyxxxzzzyyy", "x", 33}, {"foofyfoobarfoobar", "y", 4}, {"oooooooooooooooooooooo", "r", -1}, + // test fallback to Rabin-Karp. + {"oxoxoxoxoxoxoxoxoxoxoxoy", "oy", 22}, + {"oxoxoxoxoxoxoxoxoxoxoxox", "oy", -1}, } var lastIndexTests = []BinOpTest{ @@ -736,6 +739,13 @@ var splittests = []SplitTest{ func TestSplit(t *testing.T) { for _, tt := range splittests { a := SplitN([]byte(tt.s), []byte(tt.sep), tt.n) + + // Appending to the results should not change future results. + var x []byte + for _, v := range a { + x = append(v, 'z') + } + result := sliceOfString(a) if !eq(result, tt.a) { t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a) @@ -744,6 +754,11 @@ func TestSplit(t *testing.T) { if tt.n == 0 { continue } + + if want := tt.a[len(tt.a)-1] + "z"; string(x) != want { + t.Errorf("last appended result was %s; want %s", x, want) + } + s := Join(a, []byte(tt.sep)) if string(s) != tt.s { t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s) @@ -782,11 +797,23 @@ var splitaftertests = []SplitTest{ func TestSplitAfter(t *testing.T) { for _, tt := range splitaftertests { a := SplitAfterN([]byte(tt.s), []byte(tt.sep), tt.n) + + // Appending to the results should not change future results. + var x []byte + for _, v := range a { + x = append(v, 'z') + } + result := sliceOfString(a) if !eq(result, tt.a) { t.Errorf(`Split(%q, %q, %d) = %v; want %v`, tt.s, tt.sep, tt.n, result, tt.a) continue } + + if want := tt.a[len(tt.a)-1] + "z"; string(x) != want { + t.Errorf("last appended result was %s; want %s", x, want) + } + s := Join(a, nil) if string(s) != tt.s { t.Errorf(`Join(Split(%q, %q, %d), %q) = %q`, tt.s, tt.sep, tt.n, tt.sep, s) @@ -821,12 +848,29 @@ var fieldstests = []FieldsTest{ func TestFields(t *testing.T) { for _, tt := range fieldstests { - a := Fields([]byte(tt.s)) + b := []byte(tt.s) + a := Fields(b) + + // Appending to the results should not change future results. + var x []byte + for _, v := range a { + x = append(v, 'z') + } + result := sliceOfString(a) if !eq(result, tt.a) { t.Errorf("Fields(%q) = %v; want %v", tt.s, a, tt.a) continue } + + if string(b) != tt.s { + t.Errorf("slice changed to %s; want %s", string(b), tt.s) + } + if len(tt.a) > 0 { + if want := tt.a[len(tt.a)-1] + "z"; string(x) != want { + t.Errorf("last appended result was %s; want %s", x, want) + } + } } } @@ -847,11 +891,28 @@ func TestFieldsFunc(t *testing.T) { {"aXXbXXXcX", []string{"a", "b", "c"}}, } for _, tt := range fieldsFuncTests { - a := FieldsFunc([]byte(tt.s), pred) + b := []byte(tt.s) + a := FieldsFunc(b, pred) + + // Appending to the results should not change future results. + var x []byte + for _, v := range a { + x = append(v, 'z') + } + result := sliceOfString(a) if !eq(result, tt.a) { t.Errorf("FieldsFunc(%q) = %v, want %v", tt.s, a, tt.a) } + + if string(b) != tt.s { + t.Errorf("slice changed to %s; want %s", b, tt.s) + } + if len(tt.a) > 0 { + if want := tt.a[len(tt.a)-1] + "z"; string(x) != want { + t.Errorf("last appended result was %s; want %s", x, want) + } + } } } @@ -1502,19 +1563,58 @@ var makeFieldsInput = func() []byte { return x } -var fieldsInput = makeFieldsInput() +var makeFieldsInputASCII = func() []byte { + x := make([]byte, 1<<20) + // Input is ~10% space, rest ASCII non-space. + for i := range x { + if rand.Intn(10) == 0 { + x[i] = ' ' + } else { + x[i] = 'x' + } + } + return x +} + +var bytesdata = []struct { + name string + data []byte +}{ + {"ASCII", makeFieldsInputASCII()}, + {"Mixed", makeFieldsInput()}, +} func BenchmarkFields(b *testing.B) { - b.SetBytes(int64(len(fieldsInput))) - for i := 0; i < b.N; i++ { - Fields(fieldsInput) + for _, sd := range bytesdata { + b.Run(sd.name, func(b *testing.B) { + for j := 1 << 4; j <= 1<<20; j <<= 4 { + b.Run(fmt.Sprintf("%d", j), func(b *testing.B) { + b.ReportAllocs() + b.SetBytes(int64(j)) + data := sd.data[:j] + for i := 0; i < b.N; i++ { + Fields(data) + } + }) + } + }) } } func BenchmarkFieldsFunc(b *testing.B) { - b.SetBytes(int64(len(fieldsInput))) - for i := 0; i < b.N; i++ { - FieldsFunc(fieldsInput, unicode.IsSpace) + for _, sd := range bytesdata { + b.Run(sd.name, func(b *testing.B) { + for j := 1 << 4; j <= 1<<20; j <<= 4 { + b.Run(fmt.Sprintf("%d", j), func(b *testing.B) { + b.ReportAllocs() + b.SetBytes(int64(j)) + data := sd.data[:j] + for i := 0; i < b.N; i++ { + FieldsFunc(data, unicode.IsSpace) + } + }) + } + }) } } @@ -1633,3 +1733,18 @@ func BenchmarkTrimASCII(b *testing.B) { } } } + +func BenchmarkIndexPeriodic(b *testing.B) { + key := []byte{1, 1} + for _, skip := range [...]int{2, 4, 8, 16, 32, 64} { + b.Run(fmt.Sprintf("IndexPeriodic%d", skip), func(b *testing.B) { + buf := make([]byte, 1<<16) + for i := 0; i < len(buf); i += skip { + buf[i] = 1 + } + for i := 0; i < b.N; i++ { + Index(buf, key) + } + }) + } +} diff --git a/src/bytes/equal_test.go b/src/bytes/equal_test.go deleted file mode 100644 index 9fdead8a604..00000000000 --- a/src/bytes/equal_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// +build linux - -package bytes_test - -import ( - . "bytes" - "syscall" - "testing" - "unsafe" -) - -// This file tests the situation where memeq is checking -// data very near to a page boundary. We want to make sure -// equal does not read across the boundary and cause a page -// fault where it shouldn't. - -// This test runs only on linux. The code being tested is -// not OS-specific, so it does not need to be tested on all -// operating systems. - -func TestEqualNearPageBoundary(t *testing.T) { - pagesize := syscall.Getpagesize() - b := make([]byte, 4*pagesize) - i := pagesize - for ; uintptr(unsafe.Pointer(&b[i]))%uintptr(pagesize) != 0; i++ { - } - syscall.Mprotect(b[i-pagesize:i], 0) - syscall.Mprotect(b[i+pagesize:i+2*pagesize], 0) - defer syscall.Mprotect(b[i-pagesize:i], syscall.PROT_READ|syscall.PROT_WRITE) - defer syscall.Mprotect(b[i+pagesize:i+2*pagesize], syscall.PROT_READ|syscall.PROT_WRITE) - - // both of these should fault - //pagesize += int(b[i-1]) - //pagesize += int(b[i+pagesize]) - - for j := 0; j < pagesize; j++ { - b[i+j] = 'A' - } - for j := 0; j <= pagesize; j++ { - Equal(b[i:i+j], b[i+pagesize-j:i+pagesize]) - Equal(b[i+pagesize-j:i+pagesize], b[i:i+j]) - } -} diff --git a/src/bytes/example_test.go b/src/bytes/example_test.go index 93972770ab2..5b7a46058f5 100644 --- a/src/bytes/example_test.go +++ b/src/bytes/example_test.go @@ -119,6 +119,32 @@ func ExampleContains() { // true } +func ExampleContainsAny() { + fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "fÄo!")) + fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "去是伟大的.")) + fmt.Println(bytes.ContainsAny([]byte("I like seafood."), "")) + fmt.Println(bytes.ContainsAny([]byte(""), "")) + // Output: + // true + // true + // false + // false +} + +func ExampleContainsRune() { + fmt.Println(bytes.ContainsRune([]byte("I like seafood."), 'f')) + fmt.Println(bytes.ContainsRune([]byte("I like seafood."), 'ö')) + fmt.Println(bytes.ContainsRune([]byte("去是伟大的!"), '大')) + fmt.Println(bytes.ContainsRune([]byte("去是伟大的!"), '!')) + fmt.Println(bytes.ContainsRune([]byte(""), '@')) + // Output: + // true + // false + // true + // true + // false +} + func ExampleCount() { fmt.Println(bytes.Count([]byte("cheese"), []byte("e"))) fmt.Println(bytes.Count([]byte("five"), []byte(""))) // before & after each rune @@ -127,6 +153,14 @@ func ExampleCount() { // 5 } +func ExampleEqual() { + fmt.Println(bytes.Equal([]byte("Go"), []byte("Go"))) + fmt.Println(bytes.Equal([]byte("Go"), []byte("C++"))) + // Output: + // true + // false +} + func ExampleEqualFold() { fmt.Println(bytes.EqualFold([]byte("Go"), []byte("go"))) // Output: true @@ -162,6 +196,14 @@ func ExampleIndex() { // -1 } +func ExampleIndexByte() { + fmt.Println(bytes.IndexByte([]byte("chicken"), byte('k'))) + fmt.Println(bytes.IndexByte([]byte("chicken"), byte('g'))) + // Output: + // 4 + // -1 +} + func ExampleIndexFunc() { f := func(c rune) bool { return unicode.Is(unicode.Han, c) @@ -199,6 +241,36 @@ func ExampleLastIndex() { // -1 } +func ExampleLastIndexAny() { + fmt.Println(bytes.LastIndexAny([]byte("go gopher"), "MüQp")) + fmt.Println(bytes.LastIndexAny([]byte("go 地鼠"), "地大")) + fmt.Println(bytes.LastIndexAny([]byte("go gopher"), "z,!.")) + // Output: + // 5 + // 3 + // -1 +} + +func ExampleLastIndexByte() { + fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('g'))) + fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('r'))) + fmt.Println(bytes.LastIndexByte([]byte("go gopher"), byte('z'))) + // Output: + // 3 + // 8 + // -1 +} + +func ExampleLastIndexFunc() { + fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsLetter)) + fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsPunct)) + fmt.Println(bytes.LastIndexFunc([]byte("go gopher!"), unicode.IsNumber)) + // Output: + // 8 + // 9 + // -1 +} + func ExampleJoin() { s := [][]byte{[]byte("foo"), []byte("bar"), []byte("baz")} fmt.Printf("%s", bytes.Join(s, []byte(", "))) @@ -218,6 +290,23 @@ func ExampleReplace() { // moo moo moo } +func ExampleRunes() { + rs := bytes.Runes([]byte("go gopher")) + for _, r := range rs { + fmt.Printf("%#U\n", r) + } + // Output: + // U+0067 'g' + // U+006F 'o' + // U+0020 ' ' + // U+0067 'g' + // U+006F 'o' + // U+0070 'p' + // U+0068 'h' + // U+0065 'e' + // U+0072 'r' +} + func ExampleSplit() { fmt.Printf("%q\n", bytes.Split([]byte("a,b,c"), []byte(","))) fmt.Printf("%q\n", bytes.Split([]byte("a man a plan a canal panama"), []byte("a "))) @@ -267,6 +356,18 @@ func ExampleTrim() { // Output: ["Achtung! Achtung"] } +func ExampleTrimFunc() { + fmt.Println(string(bytes.TrimFunc([]byte("go-gopher!"), unicode.IsLetter))) + fmt.Println(string(bytes.TrimFunc([]byte("\"go-gopher!\""), unicode.IsLetter))) + fmt.Println(string(bytes.TrimFunc([]byte("go-gopher!"), unicode.IsPunct))) + fmt.Println(string(bytes.TrimFunc([]byte("1234go-gopher!567"), unicode.IsNumber))) + // Output: + // -gopher! + // "go-gopher!" + // go-gopher + // go-gopher! +} + func ExampleMap() { rot13 := func(r rune) rune { switch { @@ -281,11 +382,43 @@ func ExampleMap() { // Output: 'Gjnf oevyyvt naq gur fyvgul tbcure... } +func ExampleTrimLeft() { + fmt.Print(string(bytes.TrimLeft([]byte("453gopher8257"), "0123456789"))) + // Output: + // gopher8257 +} + +func ExampleTrimLeftFunc() { + fmt.Println(string(bytes.TrimLeftFunc([]byte("go-gopher"), unicode.IsLetter))) + fmt.Println(string(bytes.TrimLeftFunc([]byte("go-gopher!"), unicode.IsPunct))) + fmt.Println(string(bytes.TrimLeftFunc([]byte("1234go-gopher!567"), unicode.IsNumber))) + // Output: + // -gopher + // go-gopher! + // go-gopher!567 +} + func ExampleTrimSpace() { fmt.Printf("%s", bytes.TrimSpace([]byte(" \t\n a lone gopher \n\t\r\n"))) // Output: a lone gopher } +func ExampleTrimRight() { + fmt.Print(string(bytes.TrimRight([]byte("453gopher8257"), "0123456789"))) + // Output: + // 453gopher +} + +func ExampleTrimRightFunc() { + fmt.Println(string(bytes.TrimRightFunc([]byte("go-gopher"), unicode.IsLetter))) + fmt.Println(string(bytes.TrimRightFunc([]byte("go-gopher!"), unicode.IsPunct))) + fmt.Println(string(bytes.TrimRightFunc([]byte("1234go-gopher!567"), unicode.IsNumber))) + // Output: + // go- + // go-gopher + // 1234go-gopher! +} + func ExampleToUpper() { fmt.Printf("%s", bytes.ToUpper([]byte("Gopher"))) // Output: GOPHER @@ -295,3 +428,11 @@ func ExampleToLower() { fmt.Printf("%s", bytes.ToLower([]byte("Gopher"))) // Output: gopher } + +func ExampleReader_Len() { + fmt.Println(bytes.NewReader([]byte("Hi!")).Len()) + fmt.Println(bytes.NewReader([]byte("こんにちは!")).Len()) + // Output: + // 3 + // 16 +} diff --git a/src/bytes/reader.go b/src/bytes/reader.go index 28cfc7a9788..08464c2402d 100644 --- a/src/bytes/reader.go +++ b/src/bytes/reader.go @@ -35,6 +35,7 @@ func (r *Reader) Len() int { // to any other method. func (r *Reader) Size() int64 { return int64(len(r.s)) } +// Read implements the io.Reader interface. func (r *Reader) Read(b []byte) (n int, err error) { if r.i >= int64(len(r.s)) { return 0, io.EOF @@ -45,6 +46,7 @@ func (r *Reader) Read(b []byte) (n int, err error) { return } +// ReadAt implements the io.ReaderAt interface. func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) { // cannot modify state - see io.ReaderAt if off < 0 { @@ -60,6 +62,7 @@ func (r *Reader) ReadAt(b []byte, off int64) (n int, err error) { return } +// ReadByte implements the io.ByteReader interface. func (r *Reader) ReadByte() (byte, error) { r.prevRune = -1 if r.i >= int64(len(r.s)) { @@ -70,6 +73,7 @@ func (r *Reader) ReadByte() (byte, error) { return b, nil } +// UnreadByte complements ReadByte in implementing the io.ByteScanner interface. func (r *Reader) UnreadByte() error { r.prevRune = -1 if r.i <= 0 { @@ -79,6 +83,7 @@ func (r *Reader) UnreadByte() error { return nil } +// ReadRune implements the io.RuneReader interface. func (r *Reader) ReadRune() (ch rune, size int, err error) { if r.i >= int64(len(r.s)) { r.prevRune = -1 @@ -94,6 +99,7 @@ func (r *Reader) ReadRune() (ch rune, size int, err error) { return } +// UnreadRune complements ReadRune in implementing the io.RuneScanner interface. func (r *Reader) UnreadRune() error { if r.prevRune < 0 { return errors.New("bytes.Reader.UnreadRune: previous operation was not ReadRune") diff --git a/src/bytes/reader_test.go b/src/bytes/reader_test.go index 7b3034d4e0d..8806876ff13 100644 --- a/src/bytes/reader_test.go +++ b/src/bytes/reader_test.go @@ -140,9 +140,9 @@ func TestReaderWriteTo(t *testing.T) { for i := 0; i < 30; i += 3 { var l int if i > 0 { - l = len(data) / i + l = len(testString) / i } - s := data[:l] + s := testString[:l] r := NewReader(testBytes[:l]) var b Buffer n, err := r.WriteTo(&b) diff --git a/src/cmd/api/goapi.go b/src/cmd/api/goapi.go index 936f9e55115..8cc78c01ed4 100644 --- a/src/cmd/api/goapi.go +++ b/src/cmd/api/goapi.go @@ -27,6 +27,18 @@ import ( "strings" ) +func goCmd() string { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix) + if _, err := os.Stat(path); err == nil { + return path + } + return "go" +} + // Flags var ( checkFile = flag.String("c", "", "optional comma-separated filename(s) to check API against") @@ -127,7 +139,7 @@ func main() { if flag.NArg() > 0 { pkgNames = flag.Args() } else { - stds, err := exec.Command("go", "list", "std").Output() + stds, err := exec.Command(goCmd(), "list", "std").Output() if err != nil { log.Fatal(err) } diff --git a/src/cmd/api/goapi_test.go b/src/cmd/api/goapi_test.go index 0d00f6a2977..3c4e50a21a3 100644 --- a/src/cmd/api/goapi_test.go +++ b/src/cmd/api/goapi_test.go @@ -9,6 +9,7 @@ import ( "flag" "fmt" "go/build" + "internal/testenv" "io/ioutil" "os" "os/exec" @@ -163,7 +164,7 @@ func TestSkipInternal(t *testing.T) { } func BenchmarkAll(b *testing.B) { - stds, err := exec.Command("go", "list", "std").Output() + stds, err := exec.Command(testenv.GoToolPath(b), "list", "std").Output() if err != nil { b.Fatal(err) } diff --git a/src/cmd/api/run.go b/src/cmd/api/run.go index 20cddb704bf..219776cae40 100644 --- a/src/cmd/api/run.go +++ b/src/cmd/api/run.go @@ -14,8 +14,21 @@ import ( "os" "os/exec" "path/filepath" + "runtime" ) +func goCmd() string { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix) + if _, err := os.Stat(path); err == nil { + return path + } + return "go" +} + var goroot string func main() { @@ -25,7 +38,7 @@ func main() { log.Fatal("No $GOROOT set.") } - out, err := exec.Command("go", "tool", "api", + out, err := exec.Command(goCmd(), "tool", "api", "-c", file("go1", "go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6", "go1.7", "go1.8", "go1.9"), "-next", file("next"), "-except", file("except")).CombinedOutput() diff --git a/src/cmd/asm/doc.go b/src/cmd/asm/doc.go index aa621479579..c39cab3c195 100644 --- a/src/cmd/asm/doc.go +++ b/src/cmd/asm/doc.go @@ -19,23 +19,26 @@ The GOOS and GOARCH environment variables set the desired target. Flags: - -D value - predefined symbol with optional simple value -D=identifier=value; - can be set multiple times - -I value - include directory; can be set multiple times - -S print assembly and machine code + -D name[=value] + Predefine symbol name with an optional simple value. + Can be repeated to define multiple symbols. + -I dir1 -I dir2 + Search for #include files in dir1, dir2, etc, + after consulting $GOROOT/pkg/$GOOS_$GOARCH. + -S + Print assembly and machine code. + -V + Print assembler version and exit. -debug - dump instructions as they are parsed + Dump instructions as they are parsed. -dynlink - support references to Go symbols defined in other shared libraries - -o string - output file; default foo.o for /a/b/c/foo.s + Support references to Go symbols defined in other shared libraries. + -o file + Write output to file. The default is foo.o for /a/b/c/foo.s. -shared - generate code that can be linked into a shared library - -trimpath string - remove prefix from recorded source file paths - + Generate code that can be linked into a shared library. + -trimpath prefix + Remove prefix from recorded source file paths. Input language: The assembler uses mostly the same syntax for all architectures, diff --git a/src/cmd/asm/internal/arch/amd64.go b/src/cmd/asm/internal/arch/amd64.go deleted file mode 100644 index ff20d32daaa..00000000000 --- a/src/cmd/asm/internal/arch/amd64.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file encapsulates some of the odd characteristics of the -// AMD64 instruction set, to minimize its interaction -// with the core of the assembler. - -package arch - -import ( - "cmd/internal/obj" - "cmd/internal/obj/x86" -) - -// IsAMD4OP reports whether the op (as defined by an amd64.A* constant) is -// a 4-operand instruction. -func IsAMD4OP(op obj.As) bool { - switch op { - case x86.AVPERM2F128, - x86.AVPALIGNR, - x86.AVPERM2I128, - x86.AVINSERTI128, - x86.AVPBLENDD: - return true - } - return false -} diff --git a/src/cmd/asm/internal/arch/arm.go b/src/cmd/asm/internal/arch/arm.go index 40443d5ecaf..6e86ac0fbe2 100644 --- a/src/cmd/asm/internal/arch/arm.go +++ b/src/cmd/asm/internal/arch/arm.go @@ -122,6 +122,16 @@ func IsARMMRC(op obj.As) bool { return false } +// IsARMBFX reports whether the op (as defined by an arm.A* constant) is one the +// BFX-like instructions which are in the form of "op $width, $LSB, (Reg,) Reg". +func IsARMBFX(op obj.As) bool { + switch op { + case arm.ABFX, arm.ABFXU, arm.ABFC, arm.ABFI: + return true + } + return false +} + // IsARMFloatCmp reports whether the op is a floating comparison instruction. func IsARMFloatCmp(op obj.As) bool { switch op { diff --git a/src/cmd/asm/internal/arch/arm64.go b/src/cmd/asm/internal/arch/arm64.go index dd04719451e..2fd21b58b8f 100644 --- a/src/cmd/asm/internal/arch/arm64.go +++ b/src/cmd/asm/internal/arch/arm64.go @@ -11,6 +11,7 @@ package arch import ( "cmd/internal/obj" "cmd/internal/obj/arm64" + "errors" ) var arm64LS = map[string]uint8{ @@ -56,7 +57,9 @@ func jumpArm64(word string) bool { func IsARM64CMP(op obj.As) bool { switch op { case arm64.ACMN, arm64.ACMP, arm64.ATST, - arm64.ACMNW, arm64.ACMPW, arm64.ATSTW: + arm64.ACMNW, arm64.ACMPW, arm64.ATSTW, + arm64.AFCMPS, arm64.AFCMPD, + arm64.AFCMPES, arm64.AFCMPED: return true } return false @@ -67,7 +70,8 @@ func IsARM64CMP(op obj.As) bool { // handling. func IsARM64STLXR(op obj.As) bool { switch op { - case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR: + case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR, + arm64.ASTXRB, arm64.ASTXRH, arm64.ASTXRW, arm64.ASTXR: return true } return false @@ -115,3 +119,162 @@ func arm64RegisterNumber(name string, n int16) (int16, bool) { } return 0, false } + +// ARM64RegisterExtension parses an ARM64 register with extension or arrangment. +func ARM64RegisterExtension(a *obj.Addr, ext string, reg, num int16, isAmount, isIndex bool) error { + rm := uint32(reg) + switch ext { + case "UXTB": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_UXTB + (reg & 31) + int16(num<<5) + a.Offset = int64(((rm & 31) << 16) | (uint32(num) << 10)) + case "UXTH": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_UXTH + (num & 31) + int16(num<<5) + a.Offset = int64(((rm & 31) << 16) | (1 << 13) | (uint32(num) << 10)) + case "UXTW": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_UXTW + (reg & 31) + int16(num<<5) + a.Offset = int64(((rm & 31) << 16) | (2 << 13) | (uint32(num) << 10)) + case "UXTX": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_UXTX + (reg & 31) + int16(num<<5) + a.Offset = int64(((rm & 31) << 16) | (3 << 13) | (uint32(num) << 10)) + case "SXTB": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_SXTB + (reg & 31) + int16(num<<5) + a.Offset = int64(((rm & 31) << 16) | (4 << 13) | (uint32(num) << 10)) + case "SXTH": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_SXTH + (reg & 31) + int16(num<<5) + a.Offset = int64(((rm & 31) << 16) | (5 << 13) | (uint32(num) << 10)) + case "SXTW": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_SXTW + (reg & 31) + int16(num<<5) + a.Offset = int64(((rm & 31) << 16) | (6 << 13) | (uint32(num) << 10)) + case "SXTX": + if !isAmount { + return errors.New("invalid register extension") + } + a.Reg = arm64.REG_SXTX + (reg & 31) + int16(num<<5) + a.Offset = int64(((rm & 31) << 16) | (7 << 13) | (uint32(num) << 10)) + case "B8": + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8B & 15) << 5) + case "B16": + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_16B & 15) << 5) + case "H4": + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4H & 15) << 5) + case "H8": + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_8H & 15) << 5) + case "S2": + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2S & 15) << 5) + case "S4": + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_4S & 15) << 5) + case "D2": + a.Reg = arm64.REG_ARNG + (reg & 31) + ((arm64.ARNG_2D & 15) << 5) + case "B": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_B & 15) << 5) + a.Index = num + case "H": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_H & 15) << 5) + a.Index = num + case "S": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_S & 15) << 5) + a.Index = num + case "D": + if !isIndex { + return nil + } + a.Reg = arm64.REG_ELEM + (reg & 31) + ((arm64.ARNG_D & 15) << 5) + a.Index = num + default: + return errors.New("unsupported register extension type: " + ext) + } + a.Type = obj.TYPE_REG + return nil +} + +// ARM64RegisterArrangement parses an ARM64 vector register arrangment. +func ARM64RegisterArrangement(reg int16, name, arng string) (int64, error) { + var curQ, curSize uint16 + if name[0] != 'V' { + return 0, errors.New("expect V0 through V31; found: " + name) + } + if reg < 0 { + return 0, errors.New("invalid register number: " + name) + } + switch arng { + case "B8": + curSize = 0 + curQ = 0 + case "B16": + curSize = 0 + curQ = 1 + case "H4": + curSize = 1 + curQ = 0 + case "H8": + curSize = 1 + curQ = 1 + case "S2": + curSize = 1 + curQ = 0 + case "S4": + curSize = 2 + curQ = 1 + case "D1": + curSize = 3 + curQ = 0 + case "D2": + curSize = 3 + curQ = 1 + default: + return 0, errors.New("invalid arrangement in ARM64 register list") + } + return (int64(curQ) & 1 << 30) | (int64(curSize&3) << 10), nil +} + +// ARM64RegisterListOffset generates offset encoding according to AArch64 specification. +func ARM64RegisterListOffset(firstReg, regCnt int, arrangement int64) (int64, error) { + offset := int64(firstReg) + switch regCnt { + case 1: + offset |= 0x7 << 12 + case 2: + offset |= 0xa << 12 + case 3: + offset |= 0x6 << 12 + case 4: + offset |= 0x2 << 12 + default: + return 0, errors.New("invalid register numbers in ARM64 register list") + } + offset |= arrangement + // arm64 uses the 60th bit to differentiate from other archs + // For more details, refer to: obj/arm64/list7.go + offset |= 1 << 60 + return offset, nil +} diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go index df23856c474..bf3545b32fa 100644 --- a/src/cmd/asm/internal/asm/asm.go +++ b/src/cmd/asm/internal/asm/asm.go @@ -384,7 +384,7 @@ func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) { prog.Reg = p.getRegister(prog, op, &a[1]) } else { // Compare register with immediate and jump. - prog.From3 = newAddr(a[1]) + prog.SetFrom3(a[1]) } break } @@ -507,27 +507,6 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { break } // Strange special cases. - if arch.IsARMSTREX(op) { - /* - STREX x, (y) - from=(y) reg=x to=x - STREX (x), y - from=(x) reg=y to=y - */ - if a[0].Type == obj.TYPE_REG && a[1].Type != obj.TYPE_REG { - prog.From = a[1] - prog.Reg = a[0].Reg - prog.To = a[0] - break - } else if a[0].Type != obj.TYPE_REG && a[1].Type == obj.TYPE_REG { - prog.From = a[0] - prog.Reg = a[1].Reg - prog.To = a[1] - break - } - p.errorf("unrecognized addressing for %s", op) - return - } if arch.IsARMFloatCmp(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) @@ -564,18 +543,20 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.To = a[2] break } + if arch.IsARMBFX(op) { + // a[0] and a[1] must be constants, a[2] must be a register + prog.From = a[0] + prog.SetFrom3(a[1]) + prog.To = a[2] + break + } // Otherwise the 2nd operand (a[1]) must be a register. prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) prog.To = a[2] case sys.AMD64: - // Catch missing operand here, because we store immediate as part of From3, and can't distinguish - // missing operand from legal value 0 in obj/x86/asm6. - if arch.IsAMD4OP(op) { - p.errorf("4 operands required, but only 3 are provided for %s instruction", op) - } prog.From = a[0] - prog.From3 = newAddr(a[1]) + prog.SetFrom3(a[1]) prog.To = a[2] case sys.ARM64: // ARM64 instructions with one input and two outputs. @@ -594,7 +575,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.To = a[2] case sys.I386: prog.From = a[0] - prog.From3 = newAddr(a[1]) + prog.SetFrom3(a[1]) prog.To = a[2] case sys.PPC64: if arch.IsPPC64CMP(op) { @@ -616,7 +597,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { prog.To = a[2] case obj.TYPE_CONST: prog.From = a[0] - prog.From3 = newAddr(a[1]) + prog.SetFrom3(a[1]) prog.To = a[2] default: p.errorf("invalid addressing modes for %s instruction", op) @@ -627,7 +608,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { if a[1].Type == obj.TYPE_REG { prog.Reg = p.getRegister(prog, op, &a[1]) } else { - prog.From3 = newAddr(a[1]) + prog.SetFrom3(a[1]) } prog.To = a[2] default: @@ -635,40 +616,39 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { return } case 4: - if p.arch.Family == sys.ARM && arch.IsARMMULA(op) { - // All must be registers. - p.getRegister(prog, op, &a[0]) - r1 := p.getRegister(prog, op, &a[1]) - r2 := p.getRegister(prog, op, &a[2]) - p.getRegister(prog, op, &a[3]) - prog.From = a[0] - prog.To = a[3] - prog.To.Type = obj.TYPE_REGREG2 - prog.To.Offset = int64(r2) - prog.Reg = r1 - break + if p.arch.Family == sys.ARM { + if arch.IsARMBFX(op) { + // a[0] and a[1] must be constants, a[2] and a[3] must be registers + prog.From = a[0] + prog.SetFrom3(a[1]) + prog.Reg = p.getRegister(prog, op, &a[2]) + prog.To = a[3] + break + } + if arch.IsARMMULA(op) { + // All must be registers. + p.getRegister(prog, op, &a[0]) + r1 := p.getRegister(prog, op, &a[1]) + r2 := p.getRegister(prog, op, &a[2]) + p.getRegister(prog, op, &a[3]) + prog.From = a[0] + prog.To = a[3] + prog.To.Type = obj.TYPE_REGREG2 + prog.To.Offset = int64(r2) + prog.Reg = r1 + break + } } if p.arch.Family == sys.AMD64 { - // 4 operand instruction have form ymm1, ymm2, ymm3/m256, imm8 - // So From3 is always just a register, so we store imm8 in Offset field, - // to avoid increasing size of Prog. - prog.From = a[1] - prog.From3 = newAddr(a[2]) - if a[0].Type != obj.TYPE_CONST { - p.errorf("first operand must be an immediate in %s instruction", op) - } - if prog.From3.Type != obj.TYPE_REG { - p.errorf("third operand must be a register in %s instruction", op) - } - prog.From3.Offset = int64(p.getImmediate(prog, op, &a[0])) + prog.From = a[0] + prog.RestArgs = []obj.Addr{a[1], a[2]} prog.To = a[3] - prog.RegTo2 = -1 break } if p.arch.Family == sys.ARM64 { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) - prog.From3 = newAddr(a[2]) + prog.SetFrom3(a[2]) prog.To = a[3] break } @@ -676,12 +656,12 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { if arch.IsPPC64RLD(op) { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) - prog.From3 = newAddr(a[2]) + prog.SetFrom3(a[2]) prog.To = a[3] break } else if arch.IsPPC64ISEL(op) { // ISEL BC,RB,RA,RT becomes isel rt,ra,rb,bc - prog.From3 = newAddr(a[2]) // ra + prog.SetFrom3(a[2]) // ra prog.From = a[0] // bc prog.Reg = p.getRegister(prog, op, &a[1]) // rb prog.To = a[3] // rt @@ -695,13 +675,13 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { if a[1].Type == obj.TYPE_REG { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) - prog.From3 = newAddr(a[2]) + prog.SetFrom3(a[2]) prog.To = a[3] break } else if a[1].Type == obj.TYPE_CONST { prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[2]) - prog.From3 = newAddr(a[1]) + prog.SetFrom3(a[1]) prog.To = a[3] break } else { @@ -716,7 +696,7 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { } prog.From = a[0] prog.Reg = p.getRegister(prog, op, &a[1]) - prog.From3 = newAddr(a[2]) + prog.SetFrom3(a[2]) prog.To = a[3] break } @@ -735,10 +715,10 @@ func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) { } else { mask = (^uint32(0) >> uint(mask2+1)) & (^uint32(0) << uint(31-(mask1-1))) } - prog.From3 = &obj.Addr{ + prog.SetFrom3(obj.Addr{ Type: obj.TYPE_CONST, Offset: int64(mask), - } + }) prog.To = a[4] break } diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go index e5bc34edec3..e877a531786 100644 --- a/src/cmd/asm/internal/asm/endtoend_test.go +++ b/src/cmd/asm/internal/asm/endtoend_test.go @@ -186,7 +186,7 @@ Diff: t.Errorf(format, args...) ok = false } - obj.Flushplist(ctxt, pList, nil) + obj.Flushplist(ctxt, pList, nil, "") for p := top; p != nil; p = p.Link { if p.As == obj.ATEXT { @@ -290,7 +290,7 @@ func testErrors(t *testing.T, goarch, file string) { errBuf.WriteString(s) } pList.Firstpc, ok = parser.Parse() - obj.Flushplist(ctxt, pList, nil) + obj.Flushplist(ctxt, pList, nil, "") if ok && !failed { t.Errorf("asm: %s had no errors", goarch) } @@ -391,6 +391,7 @@ func TestAMD64EndToEnd(t *testing.T) { func TestAMD64Encoder(t *testing.T) { testEndToEnd(t, "amd64", "amd64enc") + testEndToEnd(t, "amd64", "amd64enc_extra") } func TestAMD64Errors(t *testing.T) { diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go index a6e13db7496..1d5d07344dd 100644 --- a/src/cmd/asm/internal/asm/parse.go +++ b/src/cmd/asm/internal/asm/parse.go @@ -258,11 +258,11 @@ func (p *Parser) parseScale(s string) int8 { } // operand parses a general operand and stores the result in *a. -func (p *Parser) operand(a *obj.Addr) bool { +func (p *Parser) operand(a *obj.Addr) { //fmt.Printf("Operand: %v\n", p.input) if len(p.input) == 0 { p.errorf("empty operand: cannot happen") - return false + return } // General address (with a few exceptions) looks like // $sym±offset(SB)(reg)(index*scale) @@ -290,7 +290,7 @@ func (p *Parser) operand(a *obj.Addr) bool { p.symbolReference(a, name, prefix) // fmt.Printf("SYM %s\n", obj.Dconv(&emptyProg, 0, a)) if p.peek() == scanner.EOF { - return true + return } } @@ -301,7 +301,7 @@ func (p *Parser) operand(a *obj.Addr) bool { } p.registerList(a) p.expectOperandEnd() - return true + return } // Register: R1 @@ -321,6 +321,10 @@ func (p *Parser) operand(a *obj.Addr) bool { a.Reg, _ = p.registerReference(name) p.get(')') } + } else if p.atRegisterExtension() { + p.registerExtension(a, tok.String(), prefix) + p.expectOperandEnd() + return } else if r1, r2, scale, ok := p.register(tok.String(), prefix); ok { if scale != 0 { p.errorf("expected simple register reference") @@ -335,7 +339,7 @@ func (p *Parser) operand(a *obj.Addr) bool { } // fmt.Printf("REG %s\n", obj.Dconv(&emptyProg, 0, a)) p.expectOperandEnd() - return true + return } // Constant. @@ -348,7 +352,7 @@ func (p *Parser) operand(a *obj.Addr) bool { tok := p.next() if tok.ScanToken == scanner.EOF { p.errorf("missing right parenthesis") - return false + return } rname := tok.String() p.back() @@ -367,12 +371,12 @@ func (p *Parser) operand(a *obj.Addr) bool { a.Val = p.floatExpr() // fmt.Printf("FCONST %s\n", obj.Dconv(&emptyProg, 0, a)) p.expectOperandEnd() - return true + return } if p.have(scanner.String) { if prefix != '$' { p.errorf("string constant must be an immediate") - return false + return } str, err := strconv.Unquote(p.get(scanner.String).String()) if err != nil { @@ -382,7 +386,7 @@ func (p *Parser) operand(a *obj.Addr) bool { a.Val = str // fmt.Printf("SCONST %s\n", obj.Dconv(&emptyProg, 0, a)) p.expectOperandEnd() - return true + return } a.Offset = int64(p.expr()) if p.peek() != '(' { @@ -396,7 +400,7 @@ func (p *Parser) operand(a *obj.Addr) bool { } // fmt.Printf("CONST %d %s\n", a.Offset, obj.Dconv(&emptyProg, 0, a)) p.expectOperandEnd() - return true + return } // fmt.Printf("offset %d \n", a.Offset) } @@ -406,7 +410,7 @@ func (p *Parser) operand(a *obj.Addr) bool { // fmt.Printf("DONE %s\n", p.arch.Dconv(&emptyProg, 0, a)) p.expectOperandEnd() - return true + return } // atStartOfRegister reports whether the parser is at the start of a register definition. @@ -439,6 +443,20 @@ func (p *Parser) atRegisterShift() bool { return p.at('(', scanner.Int, ')') && lex.IsRegisterShift(p.input[p.inputPos+3].ScanToken) } +// atRegisterExtension reports whether we are at the start of an ARM64 extended register. +// We have consumed the register or R prefix. +func (p *Parser) atRegisterExtension() bool { + // ARM64 only. + if p.arch.Family != sys.ARM64 { + return false + } + // R1.xxx + if p.peek() == '.' { + return true + } + return false +} + // registerReference parses a register given either the name, R10, or a parenthesized form, SPR(10). func (p *Parser) registerReference(name string) (int16, bool) { r, present := p.arch.Register[name] @@ -573,6 +591,59 @@ func (p *Parser) registerShift(name string, prefix rune) int64 { } } +// registerExtension parses a register with extension or arrangment. +// There is known to be a register (current token) and an extension operator (peeked token). +func (p *Parser) registerExtension(a *obj.Addr, name string, prefix rune) { + if prefix != 0 { + p.errorf("prefix %c not allowed for shifted register: $%s", prefix, name) + } + + reg, ok := p.registerReference(name) + if !ok { + p.errorf("unexpected %s in register extension", name) + return + } + + p.get('.') + tok := p.next() + ext := tok.String() + isIndex := false + num := int16(0) + isAmount := true // Amount is zero by default + if p.peek() == lex.LSH { + // parses left shift amount applied after extension: <(SB)(AX), AX // ERROR "invalid instruction" MOVL (AX)(SP*1), AX // ERROR "invalid instruction" + EXTRACTPS $4, X2, (BX) // ERROR "invalid instruction" + EXTRACTPS $-1, X2, (BX) // ERROR "invalid instruction" + // VSIB addressing does not permit non-vector (X/Y) + // scaled index register. + VPGATHERDQ X12,(R13)(AX*2), X11 // ERROR "invalid instruction" + VPGATHERDQ X2, 664(BX*1), X1 // ERROR "invalid instruction" + VPGATHERDQ Y2, (BP)(AX*2), Y1 // ERROR "invalid instruction" + VPGATHERDQ Y5, 664(DX*8), Y6 // ERROR "invalid instruction" + VPGATHERDQ Y5, (DX), Y0 // ERROR "invalid instruction" + // VM/X rejects Y index register. + VPGATHERDQ Y5, 664(Y14*8), Y6 // ERROR "invalid instruction" + VPGATHERQQ X2, (BP)(Y7*2), X1 // ERROR "invalid instruction" + // VM/Y rejects X index register. + VPGATHERQQ Y2, (BP)(X7*2), Y1 // ERROR "invalid instruction" + VPGATHERDD Y5, -8(X14*8), Y6 // ERROR "invalid instruction" + // No VSIB for legacy instructions. + MOVL (AX)(X0*1), AX // ERROR "invalid instruction" + MOVL (AX)(Y0*1), AX // ERROR "invalid instruction" + // AVX2GATHER mask/index/dest #UD cases. + VPGATHERQQ Y2, (BP)(X2*2), Y2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERQQ Y2, (BP)(X2*2), Y7 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERQQ Y2, (BP)(X7*2), Y2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERQQ Y7, (BP)(X2*2), Y2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERDQ X2, 664(X2*8), X2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERDQ X2, 664(X2*8), X7 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERDQ X2, 664(X7*8), X2 // ERROR "mask, index, and destination registers should be distinct" + VPGATHERDQ X7, 664(X2*8), X2 // ERROR "mask, index, and destination registers should be distinct" RET diff --git a/src/cmd/asm/internal/asm/testdata/arm.s b/src/cmd/asm/internal/asm/testdata/arm.s index 8f743e7bfa6..bc6cf07e83a 100644 --- a/src/cmd/asm/internal/asm/testdata/arm.s +++ b/src/cmd/asm/internal/asm/testdata/arm.s @@ -57,7 +57,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $0 // outcode($1, $2, &$3, 0, &$5); // } MOVW.S R1, R2 - MOVW.S $1, R2 + MOVW $1, R2 MOVW.S R1<0, R8 // 7280a8e6 + XTAB R2@>8, R8 // 7284a8e6 + XTAB R2@>16, R8 // 7288a8e6 + XTAB R2@>24, R8 // 728ca8e6 + XTAH R3@>0, R9 // 7390b9e6 + XTAH R3@>8, R9 // 7394b9e6 + XTAH R3@>16, R9 // 7398b9e6 + XTAH R3@>24, R9 // 739cb9e6 + XTABU R4@>0, R7 // 7470e7e6 + XTABU R4@>8, R7 // 7474e7e6 + XTABU R4@>16, R7 // 7478e7e6 + XTABU R4@>24, R7 // 747ce7e6 + XTAHU R5@>0, R1 // 7510f1e6 + XTAHU R5@>8, R1 // 7514f1e6 + XTAHU R5@>16, R1 // 7518f1e6 + XTAHU R5@>24, R1 // 751cf1e6 + XTAB R2@>0, R4, R8 // 7280a4e6 + XTAB R2@>8, R4, R8 // 7284a4e6 + XTAB R2@>16, R4, R8 // 7288a4e6 + XTAB R2@>24, R4, R8 // 728ca4e6 + XTAH R3@>0, R4, R9 // 7390b4e6 + XTAH R3@>8, R4, R9 // 7394b4e6 + XTAH R3@>16, R4, R9 // 7398b4e6 + XTAH R3@>24, R4, R9 // 739cb4e6 + XTABU R4@>0, R0, R7 // 7470e0e6 + XTABU R4@>8, R0, R7 // 7474e0e6 + XTABU R4@>16, R0, R7 // 7478e0e6 + XTABU R4@>24, R0, R7 // 747ce0e6 + XTAHU R5@>0, R9, R1 // 7510f9e6 + XTAHU R5@>8, R9, R1 // 7514f9e6 + XTAHU R5@>16, R9, R1 // 7518f9e6 + XTAHU R5@>24, R9, R1 // 751cf9e6 + // DIVHW R0, R1, R2: R1 / R0 -> R2 DIVHW R0, R1, R2 // 11f012e7 DIVUHW R0, R1, R2 // 11f032e7 @@ -1007,6 +1029,15 @@ jmp_label_3: SWI $65535 // ffff00ef SWI // 000000ef +// BFX/BFXU/BFC/BFI + BFX $16, $8, R1, R2 // BFX $16, R1, $8, R2 // 5124afe7 + BFX $29, $2, R8 // 5881bce7 + BFXU $16, $8, R1, R2 // BFXU $16, R1, $8, R2 // 5124efe7 + BFXU $29, $2, R8 // 5881fce7 + BFC $29, $2, R8 // 1f81dee7 + BFI $29, $2, R8 // 1881dee7 + BFI $16, $8, R1, R2 // BFI $16, R1, $8, R2 // 1124d7e7 + // synthetic arithmatic ADD $0xffffffaa, R2, R3 // ADD $4294967210, R2, R3 // 55b0e0e30b3082e0 ADD $0xffffff55, R5 // ADD $4294967125, R5 // aab0e0e30b5085e0 @@ -1088,8 +1119,6 @@ jmp_label_3: // MVN MVN $0xff, R1 // MVN $255, R1 // ff10e0e3 MVN $0xff000000, R1 // MVN $4278190080, R1 // ff14e0e3 - MVN.S $0xff, R1 // MVN.S $255, R1 // ff10f0e3 - MVN.S $0xff000000, R1 // MVN.S $4278190080, R1 // ff14f0e3 MVN R9<<30, R7 // 097fe0e1 MVN R9>>30, R7 // 297fe0e1 MVN R9->30, R7 // 497fe0e1 @@ -1106,8 +1135,7 @@ jmp_label_3: MVN.S R9>>R8, R7 // 3978f0e1 MVN.S R9->R8, R7 // 5978f0e1 MVN.S R9@>R8, R7 // 7978f0e1 - MVN $0xffffffae, R5 // MVN $4294967214, R5 // 51b0e0e30b50e0e1 - MVN.S $0xffffffae, R5 // MVN.S $4294967214, R5 // 51b0e0e30b50f0e1 + MVN $0xffffffbe, R5 // MVN $4294967230, R5 // 4150a0e3 // MOVM MOVM.IA [R0,R2,R4,R6], (R1) // MOVM.U [R0,R2,R4,R6], (R1) // 550081e8 @@ -1145,11 +1173,23 @@ jmp_label_3: // MOVW MOVW R3, R4 // 0340a0e1 + MOVW.S R3, R4 // 0340b0e1 MOVW R9, R2 // 0920a0e1 + MOVW.S R9, R2 // 0920b0e1 + MOVW R5>>1, R2 // a520a0e1 + MOVW.S R5>>1, R2 // a520b0e1 + MOVW R5<<1, R2 // 8520a0e1 + MOVW.S R5<<1, R2 // 8520b0e1 + MOVW R5->1, R2 // c520a0e1 + MOVW.S R5->1, R2 // c520b0e1 + MOVW R5@>1, R2 // e520a0e1 + MOVW.S R5@>1, R2 // e520b0e1 MOVW $0xff, R9 // MOVW $255, R9 // ff90a0e3 MOVW $0xff000000, R9 // MOVW $4278190080, R9 // ff94a0e3 MOVW $0xff(R0), R1 // MOVW $255(R0), R1 // ff1080e2 + MOVW.S $0xff(R0), R1 // MOVW.S $255(R0), R1 // ff1090e2 MOVW $-0xff(R0), R1 // MOVW $-255(R0), R1 // ff1040e2 + MOVW.S $-0xff(R0), R1 // MOVW.S $-255(R0), R1 // ff1050e2 MOVW $0xffffffae, R1 // MOVW $4294967214, R1 // 5110e0e3 MOVW $0xaaaaaaaa, R1 // MOVW $2863311530, R1 MOVW R1, (R2) // 001082e5 @@ -1388,6 +1428,18 @@ jmp_label_3: MOVB.U R0<<0(R1), R2 // d02011e1 MOVB.W R0<<0(R1), R2 // d020b1e1 MOVB.P R0<<0(R1), R2 // d02091e0 + MOVBS R2@>0, R8 // 7280afe6 + MOVBS R2@>8, R8 // 7284afe6 + MOVBS R2@>16, R8 // 7288afe6 + MOVBS R2@>24, R8 // 728cafe6 + MOVB R2@>0, R8 // 7280afe6 + MOVB R2@>8, R8 // 7284afe6 + MOVB R2@>16, R8 // 7288afe6 + MOVB R2@>24, R8 // 728cafe6 + MOVBU R4@>0, R7 // 7470efe6 + MOVBU R4@>8, R7 // 7474efe6 + MOVBU R4@>16, R7 // 7478efe6 + MOVBU R4@>24, R7 // 747cefe6 // MOVH MOVH R3, R4 // 0340a0e1 @@ -1490,6 +1542,42 @@ jmp_label_3: MOVHS math·Exp(SB), R0 // MOVHS math.Exp(SB), R0 MOVHU R0, math·Exp(SB) // MOVHU R0, math.Exp(SB) MOVHU math·Exp(SB), R0 // MOVHU math.Exp(SB), R0 + MOVHS R0<<0(R1), R2 // f02091e1 + MOVHS.U R0<<0(R1), R2 // f02011e1 + MOVHS.W R0<<0(R1), R2 // f020b1e1 + MOVHS.P R0<<0(R1), R2 // f02091e0 + MOVH R0<<0(R1), R2 // f02091e1 + MOVH.U R0<<0(R1), R2 // f02011e1 + MOVH.W R0<<0(R1), R2 // f020b1e1 + MOVH.P R0<<0(R1), R2 // f02091e0 + MOVHU R0<<0(R1), R2 // b02091e1 + MOVHU.U R0<<0(R1), R2 // b02011e1 + MOVHU.W R0<<0(R1), R2 // b020b1e1 + MOVHU.P R0<<0(R1), R2 // b02091e0 + MOVHS R2, R5<<0(R1) // b52081e1 + MOVHS.U R2, R5<<0(R1) // b52001e1 + MOVHS.W R2, R5<<0(R1) // b520a1e1 + MOVHS.P R2, R5<<0(R1) // b52081e0 + MOVH R2, R5<<0(R1) // b52081e1 + MOVH.U R2, R5<<0(R1) // b52001e1 + MOVH.W R2, R5<<0(R1) // b520a1e1 + MOVH.P R2, R5<<0(R1) // b52081e0 + MOVHU R2, R5<<0(R1) // b52081e1 + MOVHU.U R2, R5<<0(R1) // b52001e1 + MOVHU.W R2, R5<<0(R1) // b520a1e1 + MOVHU.P R2, R5<<0(R1) // b52081e0 + MOVHS R3@>0, R9 // 7390bfe6 + MOVHS R3@>8, R9 // 7394bfe6 + MOVHS R3@>16, R9 // 7398bfe6 + MOVHS R3@>24, R9 // 739cbfe6 + MOVH R3@>0, R9 // 7390bfe6 + MOVH R3@>8, R9 // 7394bfe6 + MOVH R3@>16, R9 // 7398bfe6 + MOVH R3@>24, R9 // 739cbfe6 + MOVHU R5@>0, R1 // 7510ffe6 + MOVHU R5@>8, R1 // 7514ffe6 + MOVHU R5@>16, R1 // 7518ffe6 + MOVHU R5@>24, R1 // 751cffe6 // // END diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 1b6dc188c4c..ab6ad5bcb79 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -29,6 +29,33 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 ADD R1<<22, R2, R3 ADD R1->33, R2, R3 AND R1@>33, R2, R3 + ADD R1.UXTB, R2, R3 // 4360218b + ADD R1.UXTB<<4, R2, R3 // 4370218b + VADDP V1.B16, V2.B16, V3.B16 // 43bc214e + VADDP V1.S4, V2.S4, V3.S4 // 43bca14e + VADDP V1.D2, V2.D2, V3.D2 // 43bce14e + VAND V21.B8, V12.B8, V3.B8 // 831d350e + VCMEQ V1.H4, V2.H4, V3.H4 // 438c612e + VORR V5.B16, V4.B16, V3.B16 // 831ca54e + VADD V16.S4, V5.S4, V9.S4 // a984b04e + VEOR V0.B16, V1.B16, V0.B16 // 201c206e + SHA256H V9.S4, V3, V2 // 6240095e + SHA256H2 V9.S4, V4, V3 // 8350095e + SHA256SU0 V8.S4, V7.S4 // 0729285e + SHA256SU1 V6.S4, V5.S4, V7.S4 // a760065e + SHA1SU0 V11.S4, V8.S4, V6.S4 // 06310b5e + SHA1SU1 V5.S4, V1.S4 // a118285e + SHA1C V1.S4, V2, V3 // 4300015e + SHA1H V5, V4 // a408285e + SHA1M V8.S4, V7, V6 // e620085e + SHA1P V11.S4, V10, V9 // 49110b5e + VADDV V0.S4, V0 // 00b8b14e + VMOVI $82, V0.B16 // 40e6024f + VUADDLV V6.B16, V6 // c638306e + VADD V1, V2, V3 // 4384e15e + VADD V1, V3, V3 // 6384e15e + VSUB V12, V30, V30 // de87ec7e + VSUB V12, V20, V30 // 9e86ec7e // LTYPE1 imsr ',' spreg ',' // { @@ -84,6 +111,18 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 MOVD $1, ZR MOVD $1, R1 MOVD ZR, (R1) + VLD1 (R8), [V1.B16, V2.B16] // 01a1404c + VLD1.P (R3), [V31.H8, V0.H8] // 7fa4df4c + VLD1.P (R8)(R20), [V21.B16, V22.B16] // VLD1.P (R8)(R20*1), [V21.B16,V22.B16] // 15a1d44c + VLD1.P 64(R1), [V5.B16, V6.B16, V7.B16, V8.B16] // 2520df4c + VST1.P [V4.S4, V5.S4], 32(R1) // 24a89f4c + VST1 [V0.S4, V1.S4], (R0) // 00a8004c + VMOVS V20, (R0) // 140000bd + VMOVS.P V20, 4(R0) // 144400bc + VMOVS.W V20, 4(R0) // 144c00bc + VMOVS (R0), V20 // 140040bd + VMOVS.P 8(R0), V20 // 148440bc + VMOVS.W 8(R0), V20 // 148c40bc // small offset fits into instructions MOVB 1(R1), R2 // 22048039 @@ -147,7 +186,16 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 // outcode($1, &$2, NREG, &$4); // } MOVK $1, R1 - + VMOV V8.S[1], R1 // 013d0c0e + VMOV V0.D[0], R11 // 0b3c084e + VMOV V0.D[1], R11 // 0b3c184e + VMOV R20, V1.S[0] // 811e044e + VMOV R1, V9.H4 // 290c020e + VMOV R22, V11.D2 // cb0e084e + VMOV V2.B16, V4.B16 // 441ca24e + VMOV V20.S[0], V20 // 9406045e + VREV32 V5.B16, V5.B16 // a508206e + VDUP V19.S[0], V17.S4 // 7106044e // // B/BL // @@ -193,6 +241,7 @@ TEXT foo(SB), DUPOK|NOSPLIT, $-8 CMP R1->11, R2 CMP R1>>22, R2 CMP R1<<33, R2 + CMP R22.SXTX, RSP // ffe336eb // // CBZ // @@ -338,6 +387,20 @@ again: JMP foo(SB) CALL foo(SB) +// LDP/STP + LDP (R0), (R1, R2) + LDP 8(R0), (R1, R2) + LDP.W 8(R0), (R1, R2) + LDP.P 8(R0), (R1, R2) + LDP x(SB), (R1, R2) + LDP x+8(SB), (R1, R2) + STP (R3, R4), (R5) + STP (R3, R4), 8(R5) + STP.W (R3, R4), 8(R5) + STP.P (R3, R4), 8(R5) + STP (R3, R4), x(SB) + STP (R3, R4), x+8(SB) + // END // // LTYPEE comma diff --git a/src/cmd/asm/internal/asm/testdata/arm64enc.s b/src/cmd/asm/internal/asm/testdata/arm64enc.s index ec89474990b..b02e0b32ec0 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64enc.s +++ b/src/cmd/asm/internal/asm/testdata/arm64enc.s @@ -57,7 +57,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 CALL -1(PC) // ffffff97 CALL (R15) // e0013fd6 JMP (R29) // a0031fd6 - // BRK $35943 // e08c31d4 + BRK $35943 // e08c31d4 CBNZW R2, -1(PC) // e2ffff35 CBNZ R7, -1(PC) // e7ffffb5 CBZW R15, -1(PC) // efffff34 @@ -81,8 +81,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 CINVW EQ, R2, R17 // 5110825a CINV VS, R12, R7 // 87718cda CINV VS, R30, R30 // de739eda - // CLREX $4 // 5f3403d5 - // CLREX $0 // 5f3003d5 + CLREX $4 // 5f3403d5 + CLREX $0 // 5f3003d5 CLSW R15, R6 // e615c05a CLS R15, ZR // ff15c0da CLZW R1, R14 // 2e10c05a @@ -128,9 +128,9 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 CSNEGW HS, R16, R29, R10 // 0a269d5a CSNEG NE, R21, R18, R11 // ab1692da //TODO DC - // DCPS1 $11378 // 418ea5d4 - // DCPS2 $10699 // 6239a5d4 - // DCPS3 $24415 // e3ebabd4 + DCPS1 $11378 // 418ea5d4 + DCPS2 $10699 // 6239a5d4 + DCPS3 $24415 // e3ebabd4 DMB $1 // bf3103d5 DMB $0 // bf3003d5 DRPS // e003bfd6 @@ -145,8 +145,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 EXTR $35, R22, R12, R8 // 888dd693 SEVL // bf2003d5 HINT $6 // df2003d5 - // HLT $65509 // a0fc5fd4 - // HVC $61428 // 82fe1dd4 + HLT $65509 // a0fc5fd4 + HVC $61428 // 82fe1dd4 ISB $1 // df3103d5 ISB $15 // df3f03d5 LDARW (R12), R29 // 9dfddf88 @@ -242,15 +242,15 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 ORRW $16252928, ZR, R21 // f5130d32 MOVD $-4260607558625, R11 // eb6b16b2 MOVD R30, R7 // e7031eaa - // MOVKW $(3905<<0), R21 // MOVKW $3905, R21 // 35e88172 - // MOVKW $(3905<<16), R21 // MOVKW $255918080, R21 // 35e8a172 - // MOVK $(3905<<32), R21 // MOVK $16771847290880, R21 // 35e8c1f2 + MOVKW $(3905<<0), R21 // MOVKW $3905, R21 // 35e88172 + MOVKW $(3905<<16), R21 // MOVKW $255918080, R21 // 35e8a172 + MOVK $(3905<<32), R21 // MOVK $16771847290880, R21 // 35e8c1f2 MOVD $0, R5 // 050080d2 - // MRS $4567, R16 // f03a32d5 - // MRS $32345, R6 // 26cb3fd5 - // MSR R25, $3452 // 99af11d5 - // MSR R25, $16896 // 194018d5 - // MSR $6, DAIFClr // ff4603d5 + MSR $1, SPSel // bf4100d5 + MSR $9, DAIFSet // df4903d5 + MSR $6, DAIFClr // ff4603d5 + MRS ELR_EL1, R8 // 284038d5 + MSR R16, ELR_EL1 // 304018d5 MSUBW R1, R1, R12, R5 // 8585011b MSUB R19, R16, R26, R2 // 42c3139b MULW R26, R5, R22 // b67c1a1b @@ -304,7 +304,7 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 SMNEGL R26, R3, R15 // 6ffc3a9b SMULH R17, R21, R21 // b57e519b SMULL R0, R5, R0 // a07c209b - // SMC $37977 // 238b12d4 + SMC $37977 // 238b12d4 STLRW R16, (R22) // d0fe9f88 STLR R3, (R24) // 03ff9fc8 //TODO STLRB R11, (R22) // cbfe9f08 @@ -345,10 +345,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 //TODO STTRH 9(R10), R18 // 52990078 //TODO STXP (R20), R18, R5, ZR // 854a3f88 //TODO STXP (R22), R9, R17, R0 // d12620c8 - // STXRW R2, (R19), R18 // 627e1288 - // STXR R15, (R21), R13 // af7e0dc8 - // STXRB R7, (R9), R24 // 277d1808 - // STXRH R12, (R3), R8 // 6c7c0848 + STXRW R2, (R19), R18 // 627e1288 + STXR R15, (R21), R13 // af7e0dc8 + STXRB R7, (R9), R24 // 277d1808 + STXRH R12, (R3), R8 // 6c7c0848 //TODO SUBW R20.UXTW<<7, R23, R18 // f25e344b //TODO SUB R5.SXTW<<2, R1, R26 // 3ac825cb SUB $(1923<<12), R4, R27 // SUB $7876608, R4, R27 // 9b0c5ed1 @@ -398,16 +398,16 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$-8 FCCMPD HI, F11, F15, $15 // ef856b1e FCCMPES HS, F28, F13, $13 // bd253c1e FCCMPED LT, F20, F4, $9 // 99b4741e - // FCMPS F3, F17 // 2022231e - // FCMPS $(0.0), F8 // 0821201e - // FCMPD F11, F27 // 60236b1e - // FCMPD $(0.0), F25 // 2823601e - // FCMPES F16, F30 // d023301e - // FCMPES $(0.0), F29 // b823201e - // FCMPED F13, F10 // 50216d1e - // FCMPED $(0.0), F25 // 3823601e - // FCSELS EQ, F26, F27, F25 // 590f3b1e - // FCSELD PL, F8, F22, F7 // 075d761e + FCMPS F3, F17 // 2022231e + FCMPS $(0.0), F8 // 0821201e + FCMPD F11, F27 // 60236b1e + FCMPD $(0.0), F25 // 2823601e + FCMPES F16, F30 // d023301e + FCMPES $(0.0), F29 // b823201e + FCMPED F13, F10 // 50216d1e + FCMPED $(0.0), F25 // 3823601e + FCSELS EQ, F26, F27, F25 // 590f3b1e + FCSELD PL, F8, F22, F7 // 075d761e //TODO FCVTASW F21, R15 // af02241e //TODO FCVTAS F20, ZR // 9f02249e //TODO FCVTASW F6, R11 // cb00641e diff --git a/src/cmd/asm/internal/asm/testdata/armerror.s b/src/cmd/asm/internal/asm/testdata/armerror.s index 2959a2f47bc..f2bed8d1c37 100644 --- a/src/cmd/asm/internal/asm/testdata/armerror.s +++ b/src/cmd/asm/internal/asm/testdata/armerror.s @@ -35,6 +35,22 @@ TEXT errors(SB),$0 BL 4(R4) // ERROR "non-zero offset" ADDF F0, R1, F2 // ERROR "illegal combination" SWI (R0) // ERROR "illegal combination" + MULAD F0, F1 // ERROR "illegal combination" + MULAF F0, F1 // ERROR "illegal combination" + MULSD F0, F1 // ERROR "illegal combination" + MULSF F0, F1 // ERROR "illegal combination" + NMULAD F0, F1 // ERROR "illegal combination" + NMULAF F0, F1 // ERROR "illegal combination" + NMULSD F0, F1 // ERROR "illegal combination" + NMULSF F0, F1 // ERROR "illegal combination" + FMULAD F0, F1 // ERROR "illegal combination" + FMULAF F0, F1 // ERROR "illegal combination" + FMULSD F0, F1 // ERROR "illegal combination" + FMULSF F0, F1 // ERROR "illegal combination" + FNMULAD F0, F1 // ERROR "illegal combination" + FNMULAF F0, F1 // ERROR "illegal combination" + FNMULSD F0, F1 // ERROR "illegal combination" + FNMULSF F0, F1 // ERROR "illegal combination" NEGF F0, F1, F2 // ERROR "illegal combination" NEGD F0, F1, F2 // ERROR "illegal combination" ABSF F0, F1, F2 // ERROR "illegal combination" @@ -63,6 +79,8 @@ TEXT errors(SB),$0 MOVW errors(SB), F0 // ERROR "illegal combination" MOVW $20, errors(SB) // ERROR "illegal combination" MOVW errors(SB), $20 // ERROR "illegal combination" + MOVW (R1), [R0-R4] // ERROR "illegal combination" + MOVW [R0-R4], (R1) // ERROR "illegal combination" MOVB $245, R1 // ERROR "illegal combination" MOVH $245, R1 // ERROR "illegal combination" MOVB $0xff000000, R1 // ERROR "illegal combination" @@ -85,10 +103,10 @@ TEXT errors(SB),$0 MOVH $0xffffff00, CPSR // ERROR "illegal combination" MOVB $0xfffffff0, FPSR // ERROR "illegal combination" MOVH $0xfffffff0, FPSR // ERROR "illegal combination" - MOVB.IA 4(R1), [R0-R4] // ERROR "illegal combination" - MOVB.DA 4(R1), [R0-R4] // ERROR "illegal combination" - MOVH.IA 4(R1), [R0-R4] // ERROR "illegal combination" - MOVH.DA 4(R1), [R0-R4] // ERROR "illegal combination" + MOVB (R1), [R0-R4] // ERROR "illegal combination" + MOVB [R0-R4], (R1) // ERROR "illegal combination" + MOVH (R1), [R0-R4] // ERROR "illegal combination" + MOVH [R0-R4], (R1) // ERROR "illegal combination" MOVB $0xff(R0), R1 // ERROR "illegal combination" MOVH $0xff(R0), R1 // ERROR "illegal combination" MOVB $errors(SB), R2 // ERROR "illegal combination" @@ -124,5 +142,123 @@ TEXT errors(SB),$0 MOVFW CPSR, R2 // ERROR "illegal combination" MOVDW R1, CPSR // ERROR "illegal combination" MOVFW R1, CPSR // ERROR "illegal combination" + BFX $12, $41, R2, R3 // ERROR "wrong width or LSB" + BFX $12, $-2, R2 // ERROR "wrong width or LSB" + BFXU $40, $4, R2, R3 // ERROR "wrong width or LSB" + BFXU $-40, $4, R2 // ERROR "wrong width or LSB" + BFX $-2, $4, R2, R3 // ERROR "wrong width or LSB" + BFXU $4, R2, R5, R2 // ERROR "missing or wrong LSB" + BFXU $4, R2, R5 // ERROR "missing or wrong LSB" + BFC $12, $8, R2, R3 // ERROR "illegal combination" + MOVB R0>>8, R2 // ERROR "illegal shift" + MOVH R0<<16, R2 // ERROR "illegal shift" + MOVBS R0->8, R2 // ERROR "illegal shift" + MOVHS R0<<24, R2 // ERROR "illegal shift" + MOVBU R0->24, R2 // ERROR "illegal shift" + MOVHU R0@>1, R2 // ERROR "illegal shift" + XTAB R0>>8, R2 // ERROR "illegal shift" + XTAH R0<<16, R2 // ERROR "illegal shift" + XTABU R0->24, R2 // ERROR "illegal shift" + XTAHU R0@>1, R2 // ERROR "illegal shift" + XTAB R0>>8, R5, R2 // ERROR "illegal shift" + XTAH R0<<16, R5, R2 // ERROR "illegal shift" + XTABU R0->24, R5, R2 // ERROR "illegal shift" + XTAHU R0@>1, R5, R2 // ERROR "illegal shift" + AND.W R0, R1 // ERROR "invalid .W suffix" + ORR.P R2, R3, R4 // ERROR "invalid .P suffix" + CMP.S R1, R2 // ERROR "invalid .S suffix" + BIC.P $124, R1, R2 // ERROR "invalid .P suffix" + MOVW.S $124, R1 // ERROR "invalid .S suffix" + MVN.S $123, g // ERROR "invalid .S suffix" + RSB.U $0, R9 // ERROR "invalid .U suffix" + CMP.S $29, g // ERROR "invalid .S suffix" + ADD.W R1<R2, R1 // ERROR "invalid .S suffix" + SLL.P R1, R2, R3 // ERROR "invalid .P suffix" + SRA.U R2, R8 // ERROR "invalid .U suffix" + SWI.S // ERROR "invalid .S suffix" + SWI.P $0 // ERROR "invalid .P suffix" + MOVW.S $0xaaaaaaaa, R7 // ERROR "invalid .S suffix" + MOVW.P $0xffffff44, R1 // ERROR "invalid .P suffix" + MOVW.S $0xffffff77, R1 // ERROR "invalid .S suffix" + MVN.S $0xffffffaa, R8 // ERROR "invalid .S suffix" + MVN.S $0xaaaaaaaa, R8 // ERROR "invalid .S suffix" + ADD.U $0xaaaaaaaa, R4 // ERROR "invalid .U suffix" + ORR.P $0x555555, R7, R3 // ERROR "invalid .P suffix" + TST.S $0xabcd1234, R2 // ERROR "invalid .S suffix" + MOVB.S R1, R2 // ERROR "invalid .S suffix" + MOVBU.P R1, R2 // ERROR "invalid .P suffix" + MOVBS.U R1, R2 // ERROR "invalid .U suffix" + MOVH.S R1, R2 // ERROR "invalid .S suffix" + MOVHU.P R1, R2 // ERROR "invalid .P suffix" + MOVHS.U R1, R2 // ERROR "invalid .U suffix" + MUL.P R0, R1, R2 // ERROR "invalid .P suffix" + MULU.W R1, R2 // ERROR "invalid .W suffix" + DIVHW.S R0, R1, R2 // ERROR "invalid .S suffix" + DIVHW.W R1, R2 // ERROR "invalid .W suffix" + MULL.W R2, R0, (R5, R8) // ERROR "invalid .W suffix" + MULLU.U R2, R0, (R5, R8) // ERROR "invalid .U suffix" + BFX.S $2, $4, R3 // ERROR "invalid .S suffix" + BFXU.W $2, $4, R3, R0 // ERROR "invalid .W suffix" + MOVB.S R1, 4(R2) // ERROR "invalid .S suffix" + MOVHU.S R1, 4(R2) // ERROR "invalid .S suffix" + MOVW.S R1, 4(R2) // ERROR "invalid .S suffix" + MOVBU.S 4(R2), R3 // ERROR "invalid .S suffix" + MOVH.S 4(R2), R3 // ERROR "invalid .S suffix" + MOVW.S 4(R2), R3 // ERROR "invalid .S suffix" + XTAB.S R0@>0, R2 // ERROR "invalid .S suffix" + XTAB.W R0@>8, R2, R9 // ERROR "invalid .W suffix" + MOVBU.S R0@>24, R1 // ERROR "invalid .S suffix" + MOVHS.S R0@>16, R1 // ERROR "invalid .S suffix" + MOVB.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix" + MOVHU.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix" + MOVW.S R1, 0xaaaa(R2) // ERROR "invalid .S suffix" + MOVBU.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix" + MOVH.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix" + MOVW.S 0xaaaa(R2), R3 // ERROR "invalid .S suffix" + MOVW.S CPSR, R1 // ERROR "invalid .S suffix" + MOVW.S R3, CPSR // ERROR "invalid .S suffix" + MOVW.S $0, CPSR // ERROR "invalid .S suffix" + MOVM.S (R0), [R2-R4] // ERROR "invalid .S suffix" + MOVM.S [R1-R6], (R9) // ERROR "invalid .S suffix" + SWPW.S R1, (R2), R3 // ERROR "invalid .S suffix" + MOVF.S (R0), F1 // ERROR "invalid .S suffix" + MOVF.S F9, (R4) // ERROR "invalid .S suffix" + MOVF.S 0xfff0(R0), F1 // ERROR "invalid .S suffix" + MOVF.S F9, 0xfff0(R4) // ERROR "invalid .S suffix" + ADDF.S F1, F2, F3 // ERROR "invalid .S suffix" + SUBD.U F1, F2 // ERROR "invalid .U suffix" + NEGF.W F9, F10 // ERROR "invalid .W suffix" + ABSD.P F9, F10 // ERROR "invalid .P suffix" + MOVW.S FPSR, R0 // ERROR "invalid .S suffix" + MOVW.P g, FPSR // ERROR "invalid .P suffix" + MOVW.S R1->4(R6), R2 // ERROR "invalid .S suffix" + MOVB.S R9, R2<<8(R4) // ERROR "invalid .S suffix" + MOVHU.S R9, R2<<0(R4) // ERROR "invalid .S suffix" + STREX.S R0, (R1), R2 // ERROR "invalid .S suffix" + LDREX.S (R2), R8 // ERROR "invalid .S suffix" + MOVF.S $0.0, F3 // ERROR "invalid .S suffix" + CMPF.S F1, F2 // ERROR "invalid .S suffix" + MOVFW.S F0, F9 // ERROR "invalid .S suffix" + MOVWF.W F3, F1 // ERROR "invalid .W suffix" + MOVFW.P F0, R9 // ERROR "invalid .P suffix" + MOVWF.W R3, F1 // ERROR "invalid .W suffix" + MOVW.S F0, R9 // ERROR "invalid .S suffix" + MOVW.U R3, F1 // ERROR "invalid .U suffix" + PLD.S 4(R1) // ERROR "invalid .S suffix" + CLZ.S R1, R2 // ERROR "invalid .S suffix" + MULBB.S R0, R1, R2 // ERROR "invalid .S suffix" + MULA.W R9, R6, R1, g // ERROR "invalid .W suffix" + MULS.S R2, R3, R4, g // ERROR "invalid .S suffix" + + STREX R1, (R0) // ERROR "illegal combination" + STREX (R1), R0 // ERROR "illegal combination" + STREX R1, (R0), R1 // ERROR "cannot use same register as both source and destination" + STREX R1, (R0), R0 // ERROR "cannot use same register as both source and destination" + STREXD R0, (R2), R0 // ERROR "cannot use same register as both source and destination" + STREXD R0, (R2), R1 // ERROR "cannot use same register as both source and destination" + STREXD R0, (R2), R2 // ERROR "cannot use same register as both source and destination" + STREXD R1, (R4), R7 // ERROR "must be even" END diff --git a/src/cmd/asm/internal/asm/testdata/armv6.s b/src/cmd/asm/internal/asm/testdata/armv6.s index cc79275f690..c6649bc1fb4 100644 --- a/src/cmd/asm/internal/asm/testdata/armv6.s +++ b/src/cmd/asm/internal/asm/testdata/armv6.s @@ -18,6 +18,26 @@ TEXT foo(SB), DUPOK|NOSPLIT, $0 MULD.EQ F3, F4, F5 // 035b240e MULF.NE F0, F2 // 002a221e MULD F3, F5 // 035b25ee + NMULF F0, F1, F2 // 402a21ee + NMULF F3, F7 // 437a27ee + NMULD F0, F1, F2 // 402b21ee + NMULD F3, F7 // 437b27ee + MULAF F5, F6, F7 // 057a06ee + MULAD F5, F6, F7 // 057b06ee + MULSF F5, F6, F7 // 457a06ee + MULSD F5, F6, F7 // 457b06ee + NMULAF F5, F6, F7 // 057a16ee + NMULAD F5, F6, F7 // 057b16ee + NMULSF F5, F6, F7 // 457a16ee + NMULSD F5, F6, F7 // 457b16ee + FMULAF F5, F6, F7 // 057aa6ee + FMULAD F5, F6, F7 // 057ba6ee + FMULSF F5, F6, F7 // 457aa6ee + FMULSD F5, F6, F7 // 457ba6ee + FNMULAF F5, F6, F7 // 457a96ee + FNMULAD F5, F6, F7 // 457b96ee + FNMULSF F5, F6, F7 // 057a96ee + FNMULSD F5, F6, F7 // 057b96ee DIVF F0, F1, F2 // 002a81ee DIVD.EQ F3, F4, F5 // 035b840e DIVF.NE F0, F2 // 002a821e diff --git a/src/cmd/asm/internal/asm/testdata/ppc64.s b/src/cmd/asm/internal/asm/testdata/ppc64.s index 30fb0f2c02b..2909c390945 100644 --- a/src/cmd/asm/internal/asm/testdata/ppc64.s +++ b/src/cmd/asm/internal/asm/testdata/ppc64.s @@ -550,6 +550,14 @@ label1: // ftsqrt BF, FRB FTSQRT F2,$7 +// FCFID +// FCFIDS + + FCFID F2,F3 + FCFIDCC F3,F3 + FCFIDS F2,F3 + FCFIDSCC F2,F3 + // // CMP // @@ -581,6 +589,10 @@ label1: // cmpb RA,RS,RB CMPB R2,R2,R1 +// CMPEQB RA,RB,BF produces +// cmpeqb BF,RA,RB + CMPEQB R1, R2, CR0 + // // rotate extended mnemonics map onto other shift instructions // @@ -706,6 +718,14 @@ label1: // } DCBF (R1) DCBF (R1+R2) // DCBF (R1)(R2*1) + DCBF (R1), $1 + DCBF (R1)(R2*1), $1 + DCBT (R1), $1 + DCBT (R1)(R2*1), $1 + +// LDMX (RB)(RA*1),RT produces +// ldmx RT,RA,RB + LDMX (R2)(R1*1), R3 // Population count, X-form // RS,RA produces @@ -714,6 +734,20 @@ label1: POPCNTW R1,R2 POPCNTB R1,R2 +// Copysign + FCPSGN F1,F2,F3 + +// Random number generator, X-form +// DARN L,RT produces +// darn RT,L + DARN $1, R1 + +// Copy/Paste facility +// RB,RA produces +// RA,RB + COPY R2,R1 + PASTECC R2,R1 + // VMX instructions // Described as: @@ -788,6 +822,11 @@ label1: VPMSUMW V2, V3, V1 VPMSUMD V2, V3, V1 +// Vector multiply-sum, VA-form +// VRA, VRB, VRC, VRT produces +// VRT, VRA, VRB, VRC + VMSUMUDM V4, V3, V2, V1 + // Vector SUB, VX-form // VRA,VRB,VRT produces // VRT,VRA,VRB @@ -885,12 +924,20 @@ label1: VCMPGTSWCC V3, V2, V1 VCMPGTSD V3, V2, V1 VCMPGTSDCC V3, V2, V1 + VCMPNEZB V3, V2, V1 + VCMPNEZBCC V3, V2, V1 // Vector permute, VA-form // VRA,VRB,VRC,VRT produces // VRT,VRA,VRB,VRC VPERM V3, V2, V1, V0 +// Vector bit permute, VX-form +// VRA,VRB,VRT produces +// VRT,VRA,VRB + VBPERMQ V3,V1,V2 + VBPERMD V3,V1,V2 + // Vector select, VA-form // VRA,VRB,VRC,VRT produces // VRT,VRA,VRB,VRC @@ -958,6 +1005,7 @@ label1: // RA,XS MFVSRD VS0, R1 MFVSRWZ VS33, R1 + MFVSRLD VS63, R1 // VSX move to VSR, XX1-form // RA,XT produces @@ -965,6 +1013,8 @@ label1: MTVSRD R1, VS0 MTVSRWA R1, VS31 MTVSRWZ R1, VS63 + MTVSRDD R1, R2, VS0 + MTVSRWS R1, VS32 // VSX AND, XX3-form // XA,XB,XT produces @@ -1062,6 +1112,17 @@ label1: XVCVUXDSP VS0,VS32 XVCVUXWSP VS0,VS32 +// Multiply-Add High Doubleword +// RA,RB,RC,RT produces +// RT,RA,RB,RC + MADDHD R1,R2,R3,R4 + MADDHDU R1,R2,R3,R4 + +// Add Extended using alternate carry bit +// ADDEX RA,RB,CY,RT produces +// addex RT, RA, RB, CY + ADDEX R1, R2, $0, R3 + // // NOP // diff --git a/src/cmd/asm/internal/asm/testdata/s390x.s b/src/cmd/asm/internal/asm/testdata/s390x.s index 6cc129ccc51..884f6b23cf8 100644 --- a/src/cmd/asm/internal/asm/testdata/s390x.s +++ b/src/cmd/asm/internal/asm/testdata/s390x.s @@ -213,6 +213,11 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16- CMPWU R1, R2 // 1512 CMPWU R3, $4294967295 // c23fffffffff + TMHH R1, $65535 // a712ffff + TMHL R2, $1 // a7230001 + TMLH R3, $0 // a7300000 + TMLL R4, $32768 // a7418000 + BNE 0(PC) // a7740000 BEQ 0(PC) // a7840000 BLT 0(PC) // a7440000 @@ -296,6 +301,9 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16- FMADDS F1, F2, F3 // b30e3012 FMSUB F4, F5, F5 // b31f5045 FMSUBS F6, F6, F7 // b30f7066 + LPDFR F1, F2 // b3700021 + LNDFR F3, F4 // b3710043 + CPSDR F5, F6, F7 // b3725076 VL (R15), V1 // e710f0000006 VST V1, (R15) // e710f000000e diff --git a/src/cmd/asm/internal/flags/flags.go b/src/cmd/asm/internal/flags/flags.go index bd90b82bf6a..6acde294326 100644 --- a/src/cmd/asm/internal/flags/flags.go +++ b/src/cmd/asm/internal/flags/flags.go @@ -6,6 +6,7 @@ package flags import ( + "cmd/internal/objabi" "flag" "fmt" "os" @@ -31,6 +32,7 @@ var ( func init() { flag.Var(&D, "D", "predefined symbol with optional simple value -D=identifier=value; can be set multiple times") flag.Var(&I, "I", "include directory; can be set multiple times") + objabi.AddVersionFlag() // -V } // MultiFlag allows setting a value multiple times to collect a list, as in -I=dir1 -I=dir2. diff --git a/src/cmd/asm/internal/lex/input.go b/src/cmd/asm/internal/lex/input.go index ddfcddf36d8..666611e1799 100644 --- a/src/cmd/asm/internal/lex/input.go +++ b/src/cmd/asm/internal/lex/input.go @@ -13,6 +13,7 @@ import ( "text/scanner" "cmd/asm/internal/flags" + "cmd/internal/objabi" "cmd/internal/src" ) @@ -454,7 +455,7 @@ func (in *Input) line() { in.Error("unexpected token at end of #line: ", tok) } pos := src.MakePos(in.Base(), uint(in.Line()), uint(in.Col())) - in.Stack.SetBase(src.NewLinePragmaBase(pos, file, uint(line))) + in.Stack.SetBase(src.NewLinePragmaBase(pos, file, objabi.AbsFile(objabi.WorkingDir(), file, *flags.TrimPath), uint(line))) } // #undef processing diff --git a/src/cmd/asm/main.go b/src/cmd/asm/main.go index 2e799163af4..04f56f96467 100644 --- a/src/cmd/asm/main.go +++ b/src/cmd/asm/main.go @@ -72,7 +72,7 @@ func main() { break } // reports errors to parser.Errorf - obj.Flushplist(ctxt, pList, nil) + obj.Flushplist(ctxt, pList, nil, "") } if ok { obj.WriteObjFile(ctxt, buf) diff --git a/src/cmd/buildid/buildid.go b/src/cmd/buildid/buildid.go new file mode 100644 index 00000000000..8d810ffdd99 --- /dev/null +++ b/src/cmd/buildid/buildid.go @@ -0,0 +1,73 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "fmt" + "log" + "os" + "strings" + + "cmd/internal/buildid" +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: go tool buildid [-w] file\n") + flag.PrintDefaults() + os.Exit(2) +} + +var wflag = flag.Bool("w", false, "write build ID") + +func main() { + log.SetPrefix("buildid: ") + log.SetFlags(0) + flag.Usage = usage + flag.Parse() + if flag.NArg() != 1 { + usage() + } + + file := flag.Arg(0) + id, err := buildid.ReadFile(file) + if err != nil { + log.Fatal(err) + } + if !*wflag { + fmt.Printf("%s\n", id) + return + } + + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + matches, hash, err := buildid.FindAndHash(f, id, 0) + if err != nil { + log.Fatal(err) + } + f.Close() + + tail := id + if i := strings.LastIndex(id, "."); i >= 0 { + tail = tail[i+1:] + } + if len(tail) != len(hash)*2 { + log.Fatalf("%s: cannot find %d-byte hash in id %s", file, len(hash), id) + } + newID := id[:len(id)-len(tail)] + fmt.Sprintf("%x", hash) + + f, err = os.OpenFile(file, os.O_WRONLY, 0) + if err != nil { + log.Fatal(err) + } + if err := buildid.Rewrite(f, matches, newID); err != nil { + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } +} diff --git a/src/cmd/buildid/doc.go b/src/cmd/buildid/doc.go new file mode 100644 index 00000000000..d1ec155c976 --- /dev/null +++ b/src/cmd/buildid/doc.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Buildid displays or updates the build ID stored in a Go package or binary. + +Usage: + go tool buildid [-w] file + +By default, buildid prints the build ID found in the named file. +If the -w option is given, buildid rewrites the build ID found in +the file to accurately record a content hash of the file. + +This tool is only intended for use by the go command or +other build systems. +*/ +package main diff --git a/src/cmd/cgo/ast.go b/src/cmd/cgo/ast.go index 7122a9dbbeb..58e0ee78cb7 100644 --- a/src/cmd/cgo/ast.go +++ b/src/cmd/cgo/ast.go @@ -58,11 +58,14 @@ func (f *File) ParseGo(name string, src []byte) { // so we use ast1 to look for the doc comments on import "C" // and on exported functions, and we use ast2 for translating // and reprinting. + // In cgo mode, we ignore ast2 and just apply edits directly + // the text behind ast1. In godefs mode we modify and print ast2. ast1 := parse(name, src, parser.ParseComments) ast2 := parse(name, src, 0) f.Package = ast1.Name.Name f.Name = make(map[string]*Name) + f.NamePos = make(map[*Name]token.Pos) // In ast1, find the import "C" line and get any extra C preamble. sawC := false @@ -96,36 +99,53 @@ func (f *File) ParseGo(name string, src []byte) { } // In ast2, strip the import "C" line. - w := 0 - for _, decl := range ast2.Decls { - d, ok := decl.(*ast.GenDecl) - if !ok { - ast2.Decls[w] = decl + if *godefs { + w := 0 + for _, decl := range ast2.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok { + ast2.Decls[w] = decl + w++ + continue + } + ws := 0 + for _, spec := range d.Specs { + s, ok := spec.(*ast.ImportSpec) + if !ok || s.Path.Value != `"C"` { + d.Specs[ws] = spec + ws++ + } + } + if ws == 0 { + continue + } + d.Specs = d.Specs[0:ws] + ast2.Decls[w] = d w++ - continue } - ws := 0 - for _, spec := range d.Specs { - s, ok := spec.(*ast.ImportSpec) - if !ok || s.Path.Value != `"C"` { - d.Specs[ws] = spec - ws++ + ast2.Decls = ast2.Decls[0:w] + } else { + for _, decl := range ast2.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + for _, spec := range d.Specs { + if s, ok := spec.(*ast.ImportSpec); ok && s.Path.Value == `"C"` { + // Replace "C" with _ "unsafe", to keep program valid. + // (Deleting import statement or clause is not safe if it is followed + // in the source by an explicit semicolon.) + f.Edit.Replace(f.offset(s.Path.Pos()), f.offset(s.Path.End()), `_ "unsafe"`) + } } } - if ws == 0 { - continue - } - d.Specs = d.Specs[0:ws] - ast2.Decls[w] = d - w++ } - ast2.Decls = ast2.Decls[0:w] // Accumulate pointers to uses of C.x. if f.Ref == nil { f.Ref = make([]*Ref, 0, 8) } - f.walk(ast2, "prog", (*File).saveExprs) + f.walk(ast2, ctxProg, (*File).saveExprs) // Accumulate exported functions. // The comments are only on ast1 but we need to @@ -133,8 +153,8 @@ func (f *File) ParseGo(name string, src []byte) { // The first walk fills in ExpFunc, and the // second walk changes the entries to // refer to ast2 instead. - f.walk(ast1, "prog", (*File).saveExport) - f.walk(ast2, "prog", (*File).saveExport2) + f.walk(ast1, ctxProg, (*File).saveExport) + f.walk(ast2, ctxProg, (*File).saveExport2) f.Comments = ast1.Comments f.AST = ast2 @@ -143,9 +163,6 @@ func (f *File) ParseGo(name string, src []byte) { // Like ast.CommentGroup's Text method but preserves // leading blank lines, so that line numbers line up. func commentText(g *ast.CommentGroup) string { - if g == nil { - return "" - } var pieces []string for _, com := range g.List { c := com.Text @@ -165,7 +182,7 @@ func commentText(g *ast.CommentGroup) string { } // Save various references we are going to need later. -func (f *File) saveExprs(x interface{}, context string) { +func (f *File) saveExprs(x interface{}, context astContext) { switch x := x.(type) { case *ast.Expr: switch (*x).(type) { @@ -178,7 +195,7 @@ func (f *File) saveExprs(x interface{}, context string) { } // Save references to C.xxx for later processing. -func (f *File) saveRef(n *ast.Expr, context string) { +func (f *File) saveRef(n *ast.Expr, context astContext) { sel := (*n).(*ast.SelectorExpr) // For now, assume that the only instance of capital C is when // used as the imported package identifier. @@ -188,10 +205,10 @@ func (f *File) saveRef(n *ast.Expr, context string) { if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" { return } - if context == "as2" { - context = "expr" + if context == ctxAssign2 { + context = ctxExpr } - if context == "embed-type" { + if context == ctxEmbedType { error_(sel.Pos(), "cannot embed C type") } goname := sel.Sel.Name @@ -212,6 +229,7 @@ func (f *File) saveRef(n *ast.Expr, context string) { Go: goname, } f.Name[goname] = name + f.NamePos[name] = sel.Pos() } f.Ref = append(f.Ref, &Ref{ Name: name, @@ -221,7 +239,7 @@ func (f *File) saveRef(n *ast.Expr, context string) { } // Save calls to C.xxx for later processing. -func (f *File) saveCall(call *ast.CallExpr, context string) { +func (f *File) saveCall(call *ast.CallExpr, context astContext) { sel, ok := call.Fun.(*ast.SelectorExpr) if !ok { return @@ -229,12 +247,12 @@ func (f *File) saveCall(call *ast.CallExpr, context string) { if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" { return } - c := &Call{Call: call, Deferred: context == "defer"} + c := &Call{Call: call, Deferred: context == ctxDefer} f.Calls = append(f.Calls, c) } // If a function should be exported add it to ExpFunc. -func (f *File) saveExport(x interface{}, context string) { +func (f *File) saveExport(x interface{}, context astContext) { n, ok := x.(*ast.FuncDecl) if !ok { return @@ -274,7 +292,7 @@ func (f *File) saveExport(x interface{}, context string) { } // Make f.ExpFunc[i] point at the Func from this AST instead of the other one. -func (f *File) saveExport2(x interface{}, context string) { +func (f *File) saveExport2(x interface{}, context astContext) { n, ok := x.(*ast.FuncDecl) if !ok { return @@ -288,8 +306,30 @@ func (f *File) saveExport2(x interface{}, context string) { } } +type astContext int + +const ( + ctxProg astContext = iota + ctxEmbedType + ctxType + ctxStmt + ctxExpr + ctxField + ctxParam + ctxAssign2 // assignment of a single expression to two variables + ctxSwitch + ctxTypeSwitch + ctxFile + ctxDecl + ctxSpec + ctxDefer + ctxCall // any function call other than ctxCall2 + ctxCall2 // function call whose result is assigned to two variables + ctxSelector +) + // walk walks the AST x, calling visit(f, x, context) for each node. -func (f *File) walk(x interface{}, context string, visit func(*File, interface{}, string)) { +func (f *File) walk(x interface{}, context astContext, visit func(*File, interface{}, astContext)) { visit(f, x, context) switch n := x.(type) { case *ast.Expr: @@ -304,10 +344,10 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{} // These are ordered and grouped to match ../../go/ast/ast.go case *ast.Field: - if len(n.Names) == 0 && context == "field" { - f.walk(&n.Type, "embed-type", visit) + if len(n.Names) == 0 && context == ctxField { + f.walk(&n.Type, ctxEmbedType, visit) } else { - f.walk(&n.Type, "type", visit) + f.walk(&n.Type, ctxType, visit) } case *ast.FieldList: for _, field := range n.List { @@ -318,163 +358,163 @@ func (f *File) walk(x interface{}, context string, visit func(*File, interface{} case *ast.Ellipsis: case *ast.BasicLit: case *ast.FuncLit: - f.walk(n.Type, "type", visit) - f.walk(n.Body, "stmt", visit) + f.walk(n.Type, ctxType, visit) + f.walk(n.Body, ctxStmt, visit) case *ast.CompositeLit: - f.walk(&n.Type, "type", visit) - f.walk(n.Elts, "expr", visit) + f.walk(&n.Type, ctxType, visit) + f.walk(n.Elts, ctxExpr, visit) case *ast.ParenExpr: f.walk(&n.X, context, visit) case *ast.SelectorExpr: - f.walk(&n.X, "selector", visit) + f.walk(&n.X, ctxSelector, visit) case *ast.IndexExpr: - f.walk(&n.X, "expr", visit) - f.walk(&n.Index, "expr", visit) + f.walk(&n.X, ctxExpr, visit) + f.walk(&n.Index, ctxExpr, visit) case *ast.SliceExpr: - f.walk(&n.X, "expr", visit) + f.walk(&n.X, ctxExpr, visit) if n.Low != nil { - f.walk(&n.Low, "expr", visit) + f.walk(&n.Low, ctxExpr, visit) } if n.High != nil { - f.walk(&n.High, "expr", visit) + f.walk(&n.High, ctxExpr, visit) } if n.Max != nil { - f.walk(&n.Max, "expr", visit) + f.walk(&n.Max, ctxExpr, visit) } case *ast.TypeAssertExpr: - f.walk(&n.X, "expr", visit) - f.walk(&n.Type, "type", visit) + f.walk(&n.X, ctxExpr, visit) + f.walk(&n.Type, ctxType, visit) case *ast.CallExpr: - if context == "as2" { - f.walk(&n.Fun, "call2", visit) + if context == ctxAssign2 { + f.walk(&n.Fun, ctxCall2, visit) } else { - f.walk(&n.Fun, "call", visit) + f.walk(&n.Fun, ctxCall, visit) } - f.walk(n.Args, "expr", visit) + f.walk(n.Args, ctxExpr, visit) case *ast.StarExpr: f.walk(&n.X, context, visit) case *ast.UnaryExpr: - f.walk(&n.X, "expr", visit) + f.walk(&n.X, ctxExpr, visit) case *ast.BinaryExpr: - f.walk(&n.X, "expr", visit) - f.walk(&n.Y, "expr", visit) + f.walk(&n.X, ctxExpr, visit) + f.walk(&n.Y, ctxExpr, visit) case *ast.KeyValueExpr: - f.walk(&n.Key, "expr", visit) - f.walk(&n.Value, "expr", visit) + f.walk(&n.Key, ctxExpr, visit) + f.walk(&n.Value, ctxExpr, visit) case *ast.ArrayType: - f.walk(&n.Len, "expr", visit) - f.walk(&n.Elt, "type", visit) + f.walk(&n.Len, ctxExpr, visit) + f.walk(&n.Elt, ctxType, visit) case *ast.StructType: - f.walk(n.Fields, "field", visit) + f.walk(n.Fields, ctxField, visit) case *ast.FuncType: - f.walk(n.Params, "param", visit) + f.walk(n.Params, ctxParam, visit) if n.Results != nil { - f.walk(n.Results, "param", visit) + f.walk(n.Results, ctxParam, visit) } case *ast.InterfaceType: - f.walk(n.Methods, "field", visit) + f.walk(n.Methods, ctxField, visit) case *ast.MapType: - f.walk(&n.Key, "type", visit) - f.walk(&n.Value, "type", visit) + f.walk(&n.Key, ctxType, visit) + f.walk(&n.Value, ctxType, visit) case *ast.ChanType: - f.walk(&n.Value, "type", visit) + f.walk(&n.Value, ctxType, visit) case *ast.BadStmt: case *ast.DeclStmt: - f.walk(n.Decl, "decl", visit) + f.walk(n.Decl, ctxDecl, visit) case *ast.EmptyStmt: case *ast.LabeledStmt: - f.walk(n.Stmt, "stmt", visit) + f.walk(n.Stmt, ctxStmt, visit) case *ast.ExprStmt: - f.walk(&n.X, "expr", visit) + f.walk(&n.X, ctxExpr, visit) case *ast.SendStmt: - f.walk(&n.Chan, "expr", visit) - f.walk(&n.Value, "expr", visit) + f.walk(&n.Chan, ctxExpr, visit) + f.walk(&n.Value, ctxExpr, visit) case *ast.IncDecStmt: - f.walk(&n.X, "expr", visit) + f.walk(&n.X, ctxExpr, visit) case *ast.AssignStmt: - f.walk(n.Lhs, "expr", visit) + f.walk(n.Lhs, ctxExpr, visit) if len(n.Lhs) == 2 && len(n.Rhs) == 1 { - f.walk(n.Rhs, "as2", visit) + f.walk(n.Rhs, ctxAssign2, visit) } else { - f.walk(n.Rhs, "expr", visit) + f.walk(n.Rhs, ctxExpr, visit) } case *ast.GoStmt: - f.walk(n.Call, "expr", visit) + f.walk(n.Call, ctxExpr, visit) case *ast.DeferStmt: - f.walk(n.Call, "defer", visit) + f.walk(n.Call, ctxDefer, visit) case *ast.ReturnStmt: - f.walk(n.Results, "expr", visit) + f.walk(n.Results, ctxExpr, visit) case *ast.BranchStmt: case *ast.BlockStmt: f.walk(n.List, context, visit) case *ast.IfStmt: - f.walk(n.Init, "stmt", visit) - f.walk(&n.Cond, "expr", visit) - f.walk(n.Body, "stmt", visit) - f.walk(n.Else, "stmt", visit) + f.walk(n.Init, ctxStmt, visit) + f.walk(&n.Cond, ctxExpr, visit) + f.walk(n.Body, ctxStmt, visit) + f.walk(n.Else, ctxStmt, visit) case *ast.CaseClause: - if context == "typeswitch" { - context = "type" + if context == ctxTypeSwitch { + context = ctxType } else { - context = "expr" + context = ctxExpr } f.walk(n.List, context, visit) - f.walk(n.Body, "stmt", visit) + f.walk(n.Body, ctxStmt, visit) case *ast.SwitchStmt: - f.walk(n.Init, "stmt", visit) - f.walk(&n.Tag, "expr", visit) - f.walk(n.Body, "switch", visit) + f.walk(n.Init, ctxStmt, visit) + f.walk(&n.Tag, ctxExpr, visit) + f.walk(n.Body, ctxSwitch, visit) case *ast.TypeSwitchStmt: - f.walk(n.Init, "stmt", visit) - f.walk(n.Assign, "stmt", visit) - f.walk(n.Body, "typeswitch", visit) + f.walk(n.Init, ctxStmt, visit) + f.walk(n.Assign, ctxStmt, visit) + f.walk(n.Body, ctxTypeSwitch, visit) case *ast.CommClause: - f.walk(n.Comm, "stmt", visit) - f.walk(n.Body, "stmt", visit) + f.walk(n.Comm, ctxStmt, visit) + f.walk(n.Body, ctxStmt, visit) case *ast.SelectStmt: - f.walk(n.Body, "stmt", visit) + f.walk(n.Body, ctxStmt, visit) case *ast.ForStmt: - f.walk(n.Init, "stmt", visit) - f.walk(&n.Cond, "expr", visit) - f.walk(n.Post, "stmt", visit) - f.walk(n.Body, "stmt", visit) + f.walk(n.Init, ctxStmt, visit) + f.walk(&n.Cond, ctxExpr, visit) + f.walk(n.Post, ctxStmt, visit) + f.walk(n.Body, ctxStmt, visit) case *ast.RangeStmt: - f.walk(&n.Key, "expr", visit) - f.walk(&n.Value, "expr", visit) - f.walk(&n.X, "expr", visit) - f.walk(n.Body, "stmt", visit) + f.walk(&n.Key, ctxExpr, visit) + f.walk(&n.Value, ctxExpr, visit) + f.walk(&n.X, ctxExpr, visit) + f.walk(n.Body, ctxStmt, visit) case *ast.ImportSpec: case *ast.ValueSpec: - f.walk(&n.Type, "type", visit) + f.walk(&n.Type, ctxType, visit) if len(n.Names) == 2 && len(n.Values) == 1 { - f.walk(&n.Values[0], "as2", visit) + f.walk(&n.Values[0], ctxAssign2, visit) } else { - f.walk(n.Values, "expr", visit) + f.walk(n.Values, ctxExpr, visit) } case *ast.TypeSpec: - f.walk(&n.Type, "type", visit) + f.walk(&n.Type, ctxType, visit) case *ast.BadDecl: case *ast.GenDecl: - f.walk(n.Specs, "spec", visit) + f.walk(n.Specs, ctxSpec, visit) case *ast.FuncDecl: if n.Recv != nil { - f.walk(n.Recv, "param", visit) + f.walk(n.Recv, ctxParam, visit) } - f.walk(n.Type, "type", visit) + f.walk(n.Type, ctxType, visit) if n.Body != nil { - f.walk(n.Body, "stmt", visit) + f.walk(n.Body, ctxStmt, visit) } case *ast.File: - f.walk(n.Decls, "decl", visit) + f.walk(n.Decls, ctxDecl, visit) case *ast.Package: for _, file := range n.Files { - f.walk(file, "file", visit) + f.walk(file, ctxFile, visit) } case []ast.Decl: diff --git a/src/cmd/cgo/doc.go b/src/cmd/cgo/doc.go index b2388829a87..c1bdf0659fa 100644 --- a/src/cmd/cgo/doc.go +++ b/src/cmd/cgo/doc.go @@ -102,11 +102,13 @@ the use of cgo, and to 0 to disable it. The go tool will set the build constraint "cgo" if cgo is enabled. When cross-compiling, you must specify a C cross-compiler for cgo to -use. You can do this by setting the CC_FOR_TARGET environment -variable when building the toolchain using make.bash, or by setting -the CC environment variable any time you run the go tool. The -CXX_FOR_TARGET and CXX environment variables work in a similar way for -C++ code. +use. You can do this by setting the generic CC_FOR_TARGET or the +more specific CC_FOR_${GOOS}_${GOARCH} (for example, CC_FOR_linux_arm) +environment variable when building the toolchain using make.bash, +or you can set the CC environment variable any time you run the go tool. + +The CXX_FOR_TARGET, CXX_FOR_${GOOS}_${GOARCH}, and CXX +environment variables work in a similar way for C++ code. Go references to C @@ -126,12 +128,29 @@ C.complexfloat (complex float), and C.complexdouble (complex double). The C type void* is represented by Go's unsafe.Pointer. The C types __int128_t and __uint128_t are represented by [16]byte. +A few special C types which would normally be represented by a pointer +type in Go are instead represented by a uintptr. See the Special +cases section below. + To access a struct, union, or enum type directly, prefix it with struct_, union_, or enum_, as in C.struct_stat. The size of any C type T is available as C.sizeof_T, as in C.sizeof_struct_stat. +A C function may be declared in the Go file with a parameter type of +the special name _GoString_. This function may be called with an +ordinary Go string value. The string length, and a pointer to the +string contents, may be accessed by calling the C functions + + size_t _GoStringLen(_GoString_ s); + const char *_GoStringPtr(_GoString_ s); + +These functions are only available in the preamble, not in other C +files. The C code must not modify the contents of the pointer returned +by _GoStringPtr. Note that the string contents may not have a trailing +NUL byte. + As Go doesn't have support for C's union type in the general case, C's union types are represented as a Go byte array with the same length. @@ -241,7 +260,16 @@ They will be available in the C code as: found in the _cgo_export.h generated header, after any preambles copied from the cgo input files. Functions with multiple return values are mapped to functions returning a struct. + Not all Go types can be mapped to C types in a useful way. +Go struct types are not supported; use a C struct type. +Go array types are not supported; use a C pointer. + +Go functions that take arguments of type string may be called with the +C type _GoString_, described above. The _GoString_ type will be +automatically defined in the preamble. Note that there is no way for C +code to create a value of this type; this is only useful for passing +string values from Go to C and back to Go. Using //export in a file places a restriction on the preamble: since it is copied into two different C output files, it must not @@ -264,6 +292,14 @@ pointer is a Go pointer or a C pointer is a dynamic property determined by how the memory was allocated; it has nothing to do with the type of the pointer. +Note that values of some Go types, other than the type's zero value, +always include Go pointers. This is true of string, slice, interface, +channel, map, and function types. A pointer type may hold a Go pointer +or a C pointer. Array and struct types may or may not include Go +pointers, depending on the element types. All the discussion below +about Go pointers applies not just to pointer types, but also to other +types that include Go pointers. + Go code may pass a Go pointer to C provided the Go memory to which it points does not contain any Go pointers. The C code must preserve this property: it must not store any Go pointers in Go memory, even @@ -274,14 +310,17 @@ the Go memory in question is the entire array or the entire backing array of the slice. C code may not keep a copy of a Go pointer after the call returns. +This includes the _GoString_ type, which, as noted above, includes a +Go pointer; _GoString_ values may not be retained by C code. -A Go function called by C code may not return a Go pointer. A Go -function called by C code may take C pointers as arguments, and it may -store non-pointer or C pointer data through those pointers, but it may -not store a Go pointer in memory pointed to by a C pointer. A Go -function called by C code may take a Go pointer as an argument, but it -must preserve the property that the Go memory to which it points does -not contain any Go pointers. +A Go function called by C code may not return a Go pointer (which +implies that it may not return a string, slice, channel, and so +forth). A Go function called by C code may take C pointers as +arguments, and it may store non-pointer or C pointer data through +those pointers, but it may not store a Go pointer in memory pointed to +by a C pointer. A Go function called by C code may take a Go pointer +as an argument, but it must preserve the property that the Go memory +to which it points does not contain any Go pointers. Go code may not store a Go pointer in C memory. C code may store Go pointers in C memory, subject to the rule above: it must stop storing @@ -299,6 +338,84 @@ and of course there is nothing stopping the C code from doing anything it likes. However, programs that break these rules are likely to fail in unexpected and unpredictable ways. +Special cases + +A few special C types which would normally be represented by a pointer +type in Go are instead represented by a uintptr. Those types are +the CF*Ref types from the CoreFoundation library on Darwin, including: + + CFAllocatorRef + CFArrayRef + CFAttributedStringRef + CFBagRef + CFBinaryHeapRef + CFBitVectorRef + CFBooleanRef + CFBundleRef + CFCalendarRef + CFCharacterSetRef + CFDataRef + CFDateFormatterRef + CFDateRef + CFDictionaryRef + CFErrorRef + CFFileDescriptorRef + CFFileSecurityRef + CFLocaleRef + CFMachPortRef + CFMessagePortRef + CFMutableArrayRef + CFMutableAttributedStringRef + CFMutableBagRef + CFMutableBitVectorRef + CFMutableCharacterSetRef + CFMutableDataRef + CFMutableDictionaryRef + CFMutableSetRef + CFMutableStringRef + CFNotificationCenterRef + CFNullRef + CFNumberFormatterRef + CFNumberRef + CFPlugInInstanceRef + CFPlugInRef + CFPropertyListRef + CFReadStreamRef + CFRunLoopObserverRef + CFRunLoopRef + CFRunLoopSourceRef + CFRunLoopTimerRef + CFSetRef + CFSocketRef + CFStringRef + CFStringTokenizerRef + CFTimeZoneRef + CFTreeRef + CFTypeRef + CFURLCreateFromFSRef + CFURLEnumeratorRef + CFURLGetFSRef + CFURLRef + CFUUIDRef + CFUserNotificationRef + CFWriteStreamRef + CFXMLNodeRef + CFXMLParserRef + CFXMLTreeRef + +These types are uintptr on the Go side because they would otherwise +confuse the Go garbage collector; they are sometimes not really +pointers but data structures encoded in a pointer type. All operations +on these types must happen in C. The proper constant to initialize an +empty such reference is 0, not nil. + +This special case was introduced in Go 1.10. For auto-updating code +from Go 1.9 and earlier, use the cftype rewrite in the Go fix tool: + + go tool fix -r cftype + +It will replace nil with 0 in the appropriate places. + Using cgo directly Usage: @@ -312,32 +429,35 @@ invoking the C compiler to compile the C parts of the package. The following options are available when running cgo directly: + -V + Print cgo version and exit. + -debug-define + Debugging option. Print #defines. + -debug-gcc + Debugging option. Trace C compiler execution and output. -dynimport file Write list of symbols imported by file. Write to -dynout argument or to standard output. Used by go build when building a cgo package. + -dynlinker + Write dynamic linker as part of -dynimport output. -dynout file Write -dynimport output to file. -dynpackage package Set Go package for -dynimport output. - -dynlinker - Write dynamic linker as part of -dynimport output. - -godefs - Write out input file in Go syntax replacing C package - names with real values. Used to generate files in the - syscall package when bootstrapping a new target. - -srcdir directory - Find the Go input files, listed on the command line, - in directory. - -objdir directory - Put all generated files in directory. - -importpath string - The import path for the Go package. Optional; used for - nicer comments in the generated files. -exportheader file If there are any exported functions, write the generated export declarations to file. C code can #include this to see the declarations. + -importpath string + The import path for the Go package. Optional; used for + nicer comments in the generated files. + -import_runtime_cgo + If set (which it is by default) import runtime/cgo in + generated output. + -import_syscall + If set (which it is by default) import syscall in + generated output. -gccgo Generate output for the gccgo compiler rather than the gc compiler. @@ -345,16 +465,13 @@ The following options are available when running cgo directly: The -fgo-prefix option to be used with gccgo. -gccgopkgpath path The -fgo-pkgpath option to be used with gccgo. - -import_runtime_cgo - If set (which it is by default) import runtime/cgo in - generated output. - -import_syscall - If set (which it is by default) import syscall in - generated output. - -debug-define - Debugging option. Print #defines. - -debug-gcc - Debugging option. Trace C compiler execution and output. + -godefs + Write out input file in Go syntax replacing C package + names with real values. Used to generate files in the + syscall package when bootstrapping a new target. + -objdir directory + Put all generated files in directory. + -srcdir directory */ package main @@ -403,21 +520,19 @@ about simple #defines for constants and the like. These are recorded for later use. Next, cgo needs to identify the kinds for each identifier. For the -identifiers C.foo and C.bar, cgo generates this C program: +identifiers C.foo, cgo generates this C program: #line 1 "not-declared" - void __cgo_f_xxx_1(void) { __typeof__(foo) *__cgo_undefined__; } + void __cgo_f_1_1(void) { __typeof__(foo) *__cgo_undefined__1; } #line 1 "not-type" - void __cgo_f_xxx_2(void) { foo *__cgo_undefined__; } - #line 1 "not-const" - void __cgo_f_xxx_3(void) { enum { __cgo_undefined__ = (foo)*1 }; } - #line 2 "not-declared" - void __cgo_f_xxx_1(void) { __typeof__(bar) *__cgo_undefined__; } - #line 2 "not-type" - void __cgo_f_xxx_2(void) { bar *__cgo_undefined__; } - #line 2 "not-const" - void __cgo_f_xxx_3(void) { enum { __cgo_undefined__ = (bar)*1 }; } + void __cgo_f_1_2(void) { foo *__cgo_undefined__2; } + #line 1 "not-int-const" + void __cgo_f_1_3(void) { enum { __cgo_undefined__3 = (foo)*1 }; } + #line 1 "not-num-const" + void __cgo_f_1_4(void) { static const double __cgo_undefined__4 = (foo); } + #line 1 "not-str-lit" + void __cgo_f_1_5(void) { static const char __cgo_undefined__5[] = (foo); } This program will not compile, but cgo can use the presence or absence of an error message on a given line to deduce the information it @@ -427,45 +542,72 @@ errors that might stop parsing early. An error on not-declared:1 indicates that foo is undeclared. An error on not-type:1 indicates that foo is not a type (if declared at all, it is an identifier). -An error on not-const:1 indicates that foo is not an integer constant. +An error on not-int-const:1 indicates that foo is not an integer constant. +An error on not-num-const:1 indicates that foo is not a number constant. +An error on not-str-lit:1 indicates that foo is not a string literal. +An error on not-signed-int-const:1 indicates that foo is not a signed integer constant. -The line number specifies the name involved. In the example, 1 is foo and 2 is bar. +The line number specifies the name involved. In the example, 1 is foo. Next, cgo must learn the details of each type, variable, function, or constant. It can do this by reading object files. If cgo has decided -that t1 is a type, v2 and v3 are variables or functions, and c4, c5, -and c6 are constants, it generates: +that t1 is a type, v2 and v3 are variables or functions, and i4, i5 +are integer constants, u6 is an unsigned integer constant, and f7 and f8 +are float constants, and s9 and s10 are string constants, it generates: __typeof__(t1) *__cgo__1; __typeof__(v2) *__cgo__2; __typeof__(v3) *__cgo__3; - __typeof__(c4) *__cgo__4; - enum { __cgo_enum__4 = c4 }; - __typeof__(c5) *__cgo__5; - enum { __cgo_enum__5 = c5 }; - __typeof__(c6) *__cgo__6; - enum { __cgo_enum__6 = c6 }; + __typeof__(i4) *__cgo__4; + enum { __cgo_enum__4 = i4 }; + __typeof__(i5) *__cgo__5; + enum { __cgo_enum__5 = i5 }; + __typeof__(u6) *__cgo__6; + enum { __cgo_enum__6 = u6 }; + __typeof__(f7) *__cgo__7; + __typeof__(f8) *__cgo__8; + __typeof__(s9) *__cgo__9; + __typeof__(s10) *__cgo__10; - long long __cgo_debug_data[] = { + long long __cgodebug_ints[] = { 0, // t1 0, // v2 0, // v3 - c4, - c5, - c6, + i4, + i5, + u6, + 0, // f7 + 0, // f8 + 0, // s9 + 0, // s10 1 }; + double __cgodebug_floats[] = { + 0, // t1 + 0, // v2 + 0, // v3 + 0, // i4 + 0, // i5 + 0, // u6 + f7, + f8, + 0, // s9 + 0, // s10 + 1 + }; + + const char __cgodebug_str__9[] = s9; + const unsigned long long __cgodebug_strlen__9 = sizeof(s9)-1; + const char __cgodebug_str__10[] = s10; + const unsigned long long __cgodebug_strlen__10 = sizeof(s10)-1; + and again invokes the system C compiler, to produce an object file containing debug information. Cgo parses the DWARF debug information for __cgo__N to learn the type of each identifier. (The types also -distinguish functions from global variables.) If using a standard gcc, -cgo can parse the DWARF debug information for the __cgo_enum__N to -learn the identifier's value. The LLVM-based gcc on OS X emits -incomplete DWARF information for enums; in that case cgo reads the -constant values from the __cgo_debug_data from the object file's data -segment. +distinguish functions from global variables.) Cgo reads the constant +values from the __cgodebug_* from the object file's data segment. At this point cgo knows the meaning of each C.xxx well enough to start the translation process. @@ -550,9 +692,12 @@ _cgo_main.c: int main() { return 0; } void crosscall2(void(*fn)(void*, int, uintptr_t), void *a, int c, uintptr_t ctxt) { } - uintptr_t _cgo_wait_runtime_init_done() { } + uintptr_t _cgo_wait_runtime_init_done() { return 0; } + void _cgo_release_context(uintptr_t ctxt) { } + char* _cgo_topofstack(void) { return (char*)0; } void _cgo_allocate(void *a, int c) { } void _cgo_panic(void *a, int c) { } + void _cgo_reginit(void) { } The extra functions here are stubs to satisfy the references in the C code generated for gcc. The build process links this stub, along with diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index c104067a93c..5cd6ac953c3 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -169,21 +169,8 @@ func (p *Package) Translate(f *File) { p.loadDWARF(f, needType) } if p.rewriteCalls(f) { - // Add `import _cgo_unsafe "unsafe"` as the first decl - // after the package statement. - imp := &ast.GenDecl{ - Tok: token.IMPORT, - Specs: []ast.Spec{ - &ast.ImportSpec{ - Name: ast.NewIdent("_cgo_unsafe"), - Path: &ast.BasicLit{ - Kind: token.STRING, - Value: `"unsafe"`, - }, - }, - }, - } - f.AST.Decls = append([]ast.Decl{imp}, f.AST.Decls...) + // Add `import _cgo_unsafe "unsafe"` after the package statement. + f.Edit.Insert(f.offset(f.AST.Name.End()), "; import _cgo_unsafe \"unsafe\"") } p.rewriteRef(f) } @@ -192,8 +179,8 @@ func (p *Package) Translate(f *File) { // in the file f and saves relevant renamings in f.Name[name].Define. func (p *Package) loadDefines(f *File) { var b bytes.Buffer - b.WriteString(f.Preamble) b.WriteString(builtinProlog) + b.WriteString(f.Preamble) stdout := p.gccDefines(b.Bytes()) for _, line := range strings.Split(stdout, "\n") { @@ -264,10 +251,6 @@ func (p *Package) guessKinds(f *File) []*Name { if n.IsConst() { continue } - - if isName(n.Define) { - n.C = n.Define - } } // If this is a struct, union, or enum type name, no need to guess the kind. @@ -316,8 +299,8 @@ func (p *Package) guessKinds(f *File) []*Name { // whether name denotes a type or an expression. var b bytes.Buffer - b.WriteString(f.Preamble) b.WriteString(builtinProlog) + b.WriteString(f.Preamble) for i, n := range names { fmt.Fprintf(&b, "#line %d \"not-declared\"\n"+ @@ -423,14 +406,7 @@ func (p *Package) guessKinds(f *File) []*Name { for i, n := range names { switch sniff[i] { default: - var tpos token.Pos - for _, ref := range f.Ref { - if ref.Name == n { - tpos = ref.Pos() - break - } - } - error_(tpos, "could not determine kind of name for C.%s", fixGo(n.Go)) + error_(f.NamePos[n], "could not determine kind of name for C.%s", fixGo(n.Go)) case notStrLiteral | notType: n.Kind = "iconst" case notIntConst | notStrLiteral | notType: @@ -472,8 +448,8 @@ func (p *Package) loadDWARF(f *File, names []*Name) { // for each entry in names and then dereference the type we // learn for __cgo__i. var b bytes.Buffer - b.WriteString(f.Preamble) b.WriteString(builtinProlog) + b.WriteString(f.Preamble) b.WriteString("#line 1 \"cgo-dwarf-inference\"\n") for i, n := range names { fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i) @@ -524,14 +500,6 @@ func (p *Package) loadDWARF(f *File, names []*Name) { // Scan DWARF info for top-level TagVariable entries with AttrName __cgo__i. types := make([]dwarf.Type, len(names)) - nameToIndex := make(map[*Name]int) - for i, n := range names { - nameToIndex[n] = i - } - nameToRef := make(map[*Name]*Ref) - for _, ref := range f.Ref { - nameToRef[ref.Name] = ref - } r := d.Reader() for { e, err := r.Next() @@ -582,10 +550,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) { if types[i] == nil { continue } - pos := token.NoPos - if ref, ok := nameToRef[n]; ok { - pos = ref.Pos() - } + pos := f.NamePos[n] f, fok := types[i].(*dwarf.FuncType) if n.Kind != "type" && fok { n.Kind = "func" @@ -740,8 +705,9 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool { stmts = append(stmts, stmt) } + const cgoMarker = "__cgo__###__marker__" fcall := &ast.CallExpr{ - Fun: call.Call.Fun, + Fun: ast.NewIdent(cgoMarker), Args: nargs, } ftype := &ast.FuncType{ @@ -763,31 +729,26 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool { } } - // There is a Ref pointing to the old call.Call.Fun. + // If this call expects two results, we have to + // adjust the results of the function we generated. for _, ref := range f.Ref { - if ref.Expr == &call.Call.Fun { - ref.Expr = &fcall.Fun - - // If this call expects two results, we have to - // adjust the results of the function we generated. - if ref.Context == "call2" { - if ftype.Results == nil { - // An explicit void argument - // looks odd but it seems to - // be how cgo has worked historically. - ftype.Results = &ast.FieldList{ - List: []*ast.Field{ - &ast.Field{ - Type: ast.NewIdent("_Ctype_void"), - }, + if ref.Expr == &call.Call.Fun && ref.Context == ctxCall2 { + if ftype.Results == nil { + // An explicit void argument + // looks odd but it seems to + // be how cgo has worked historically. + ftype.Results = &ast.FieldList{ + List: []*ast.Field{ + &ast.Field{ + Type: ast.NewIdent("_Ctype_void"), }, - } + }, } - ftype.Results.List = append(ftype.Results.List, - &ast.Field{ - Type: ast.NewIdent("error"), - }) } + ftype.Results.List = append(ftype.Results.List, + &ast.Field{ + Type: ast.NewIdent("error"), + }) } } @@ -801,14 +762,16 @@ func (p *Package) rewriteCall(f *File, call *Call, name *Name) bool { Results: []ast.Expr{fcall}, } } - call.Call.Fun = &ast.FuncLit{ + lit := &ast.FuncLit{ Type: ftype, Body: &ast.BlockStmt{ List: append(stmts, fbody), }, } - call.Call.Lparen = token.NoPos - call.Call.Rparen = token.NoPos + text := strings.Replace(gofmt(lit), "\n", ";", -1) + repl := strings.Split(text, cgoMarker) + f.Edit.Insert(f.offset(call.Call.Fun.Pos()), repl[0]) + f.Edit.Insert(f.offset(call.Call.Fun.End()), repl[1]) return needsUnsafe } @@ -962,8 +925,8 @@ func (p *Package) checkAddrArgs(f *File, args []ast.Expr, x ast.Expr) []ast.Expr // effect is a function call. func (p *Package) hasSideEffects(f *File, x ast.Expr) bool { found := false - f.walk(x, "expr", - func(f *File, x interface{}, context string) { + f.walk(x, ctxExpr, + func(f *File, x interface{}, context astContext) { switch x.(type) { case *ast.CallExpr: found = true @@ -1072,7 +1035,17 @@ func (p *Package) rewriteRef(f *File) { // Assign mangled names. for _, n := range f.Name { if n.Kind == "not-type" { - n.Kind = "var" + if n.Define == "" { + n.Kind = "var" + } else { + n.Kind = "macro" + n.FuncType = &FuncType{ + Result: n.Type, + Go: &ast.FuncType{ + Results: &ast.FieldList{List: []*ast.Field{{Type: n.Type.Go}}}, + }, + } + } } if n.Mangle == "" { p.mangleName(n) @@ -1092,10 +1065,10 @@ func (p *Package) rewriteRef(f *File) { } var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default switch r.Context { - case "call", "call2": + case ctxCall, ctxCall2: if r.Name.Kind != "func" { if r.Name.Kind == "type" { - r.Context = "type" + r.Context = ctxType if r.Name.Type == nil { error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) break @@ -1107,7 +1080,7 @@ func (p *Package) rewriteRef(f *File) { break } functions[r.Name.Go] = true - if r.Context == "call2" { + if r.Context == ctxCall2 { if r.Name.Go == "_CMalloc" { error_(r.Pos(), "no two-result form for C.malloc") break @@ -1125,8 +1098,9 @@ func (p *Package) rewriteRef(f *File) { r.Name = n break } - case "expr": - if r.Name.Kind == "func" { + case ctxExpr: + switch r.Name.Kind { + case "func": if builtinDefs[r.Name.C] != "" { error_(r.Pos(), "use of builtin '%s' not in function call", fixGo(r.Name.C)) } @@ -1153,25 +1127,25 @@ func (p *Package) rewriteRef(f *File) { Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"}, Args: []ast.Expr{ast.NewIdent(name.Mangle)}, } - } else if r.Name.Kind == "type" { + case "type": // Okay - might be new(T) if r.Name.Type == nil { error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C) break } expr = r.Name.Type.Go - } else if r.Name.Kind == "var" { + case "var": expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr} + case "macro": + expr = &ast.CallExpr{Fun: expr} } - - case "selector": + case ctxSelector: if r.Name.Kind == "var" { expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr} } else { error_(r.Pos(), "only C variables allowed in selector expression %s", fixGo(r.Name.Go)) } - - case "type": + case ctxType: if r.Name.Kind != "type" { error_(r.Pos(), "expression C.%s used as type", fixGo(r.Name.Go)) } else if r.Name.Type == nil { @@ -1186,6 +1160,7 @@ func (p *Package) rewriteRef(f *File) { error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go)) } } + if *godefs { // Substitute definition for mangled type name. if id, ok := expr.(*ast.Ident); ok { @@ -1207,7 +1182,17 @@ func (p *Package) rewriteRef(f *File) { expr = &ast.Ident{NamePos: pos, Name: x.Name} } + // Change AST, because some later processing depends on it, + // and also because -godefs mode still prints the AST. + old := *r.Expr *r.Expr = expr + + // Record source-level edit for cgo output. + repl := gofmt(expr) + if r.Name.Kind != "type" { + repl = "(" + repl + ")" + } + f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), repl) } // Remove functions only used as expressions, so their respective @@ -1232,7 +1217,7 @@ func (p *Package) gccBaseCmd() []string { if ret := strings.Fields(os.Getenv("GCC")); len(ret) > 0 { return ret } - return strings.Fields(defaultCC) + return strings.Fields(defaultCC(goos, goarch)) } // gccMachine returns the gcc -m flag to use, either "-m32", "-m64" or "-marm". @@ -2072,6 +2057,12 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type { name := c.Ident("_Ctype_" + dt.Name) goIdent[name.Name] = name sub := c.Type(dt.Type, pos) + if badPointerTypedef(dt.Name) { + // Treat this typedef as a uintptr. + s := *sub + s.Go = c.uintptr + sub = &s + } t.Go = name if unionWithPointer[sub.Go] { unionWithPointer[t.Go] = true @@ -2152,7 +2143,7 @@ func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type { if ss, ok := dwarfToName[s]; ok { s = ss } - s = strings.Join(strings.Split(s, " "), "") // strip spaces + s = strings.Replace(s, " ", "", -1) name := c.Ident("_Ctype_" + s) tt := *t typedef[name.Name] = &tt @@ -2230,6 +2221,17 @@ func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type { if _, void := base(ptr.Type).(*dwarf.VoidType); void { break } + // ...or the typedef is one in which we expect bad pointers. + // It will be a uintptr instead of *X. + if badPointerTypedef(dt.Name) { + break + } + + // If we already know the typedef for t just use that. + // See issue 19832. + if def := typedef[t.Go.(*ast.Ident).Name]; def != nil { + break + } t = c.Type(ptr, pos) if t == nil { @@ -2386,7 +2388,9 @@ func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.Struct size := t.Size talign := t.Align if f.BitSize > 0 { - if f.BitSize%8 != 0 { + switch f.BitSize { + case 8, 16, 32, 64: + default: continue } size = f.BitSize / 8 @@ -2562,3 +2566,51 @@ func fieldPrefix(fld []*ast.Field) string { } return prefix } + +// badPointerTypedef reports whether t is a C typedef that should not be considered a pointer in Go. +// A typedef is bad if C code sometimes stores non-pointers in this type. +// TODO: Currently our best solution is to find these manually and list them as +// they come up. A better solution is desired. +func badPointerTypedef(t string) bool { + // The real bad types are CFNumberRef and CFTypeRef. + // Sometimes non-pointers are stored in these types. + // CFTypeRef is a supertype of those, so it can have bad pointers in it as well. + // We return true for the other CF*Ref types just so casting between them is easier. + // See comment below for details about the bad pointers. + return goos == "darwin" && strings.HasPrefix(t, "CF") && strings.HasSuffix(t, "Ref") +} + +// Comment from Darwin's CFInternal.h +/* +// Tagged pointer support +// Low-bit set means tagged object, next 3 bits (currently) +// define the tagged object class, next 4 bits are for type +// information for the specific tagged object class. Thus, +// the low byte is for type info, and the rest of a pointer +// (32 or 64-bit) is for payload, whatever the tagged class. +// +// Note that the specific integers used to identify the +// specific tagged classes can and will change from release +// to release (that's why this stuff is in CF*Internal*.h), +// as can the definition of type info vs payload above. +// +#if __LP64__ +#define CF_IS_TAGGED_OBJ(PTR) ((uintptr_t)(PTR) & 0x1) +#define CF_TAGGED_OBJ_TYPE(PTR) ((uintptr_t)(PTR) & 0xF) +#else +#define CF_IS_TAGGED_OBJ(PTR) 0 +#define CF_TAGGED_OBJ_TYPE(PTR) 0 +#endif + +enum { + kCFTaggedObjectID_Invalid = 0, + kCFTaggedObjectID_Atom = (0 << 1) + 1, + kCFTaggedObjectID_Undefined3 = (1 << 1) + 1, + kCFTaggedObjectID_Undefined2 = (2 << 1) + 1, + kCFTaggedObjectID_Integer = (3 << 1) + 1, + kCFTaggedObjectID_DateTS = (4 << 1) + 1, + kCFTaggedObjectID_ManagedObjectID = (5 << 1) + 1, // Core Data + kCFTaggedObjectID_Date = (6 << 1) + 1, + kCFTaggedObjectID_Undefined7 = (7 << 1) + 1, +}; +*/ diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index 3ad13ef9c73..0c1c863a7a0 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -24,6 +24,9 @@ import ( "runtime" "sort" "strings" + + "cmd/internal/edit" + "cmd/internal/objabi" ) // A Package collects information about the package we're going to write. @@ -54,6 +57,12 @@ type File struct { Calls []*Call // all calls to C.xxx in AST ExpFunc []*ExpFunc // exported functions for this file Name map[string]*Name // map from Go name to Name + NamePos map[*Name]token.Pos // map from Name to position of the first reference + Edit *edit.Buffer +} + +func (f *File) offset(p token.Pos) int { + return fset.Position(p).Offset } func nameKeys(m map[string]*Name) []string { @@ -75,7 +84,7 @@ type Call struct { type Ref struct { Name *Name Expr *ast.Expr - Context string // "type", "expr", "call", or "call2" + Context astContext } func (r *Ref) Pos() token.Pos { @@ -88,7 +97,7 @@ type Name struct { Mangle string // name used in generated Go C string // name used in C Define string // #define expansion - Kind string // "iconst", "fconst", "sconst", "type", "var", "fpvar", "func", "not-type" + Kind string // "iconst", "fconst", "sconst", "type", "var", "fpvar", "func", "macro", "not-type" Type *Type // the type of xxx FuncType *FuncType AddError bool @@ -105,7 +114,7 @@ func (n *Name) IsConst() bool { return strings.HasSuffix(n.Kind, "const") } -// A ExpFunc is an exported function, callable from C. +// An ExpFunc is an exported function, callable from C. // Such functions are identified in the Go input file // by doc comments containing the line //export ExpName type ExpFunc struct { @@ -200,6 +209,7 @@ var importSyscall = flag.Bool("import_syscall", true, "import syscall in generat var goarch, goos string func main() { + objabi.AddVersionFlag() // -V flag.Usage = usage flag.Parse() @@ -280,6 +290,7 @@ func main() { } f := new(File) + f.Edit = edit.NewBuffer(b) f.ParseGo(input, b) f.DiscardCgoDirectives() fs[i] = f @@ -300,11 +311,13 @@ func main() { p.Translate(f) for _, cref := range f.Ref { switch cref.Context { - case "call", "call2": + case ctxCall, ctxCall2: if cref.Name.Kind != "type" { break } + old := *cref.Expr *cref.Expr = cref.Name.Type.Go + f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), gofmt(cref.Name.Type.Go)) } } if nerrors > 0 { diff --git a/src/cmd/cgo/out.go b/src/cmd/cgo/out.go index 9ab6bd8f977..8834c3db5a1 100644 --- a/src/cmd/cgo/out.go +++ b/src/cmd/cgo/out.go @@ -15,6 +15,7 @@ import ( "go/token" "io" "os" + "path/filepath" "sort" "strings" ) @@ -110,7 +111,13 @@ func (p *Package) writeDefs() { // Which is not useful. Moreover we never override source info, // so subsequent source code uses the same source info. // Moreover, empty file name makes compile emit no source debug info at all. - noSourceConf.Fprint(fgo2, fset, def.Go) + var buf bytes.Buffer + noSourceConf.Fprint(&buf, fset, def.Go) + if bytes.HasPrefix(buf.Bytes(), []byte("_Ctype_")) { + // This typedef is of the form `typedef a b` and should be an alias. + fmt.Fprintf(fgo2, "= ") + } + fmt.Fprintf(fgo2, "%s", buf.Bytes()) fmt.Fprintf(fgo2, "\n\n") } if *gccgo { @@ -400,10 +407,12 @@ func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) { inProlog := builtinDefs[name] != "" cname := fmt.Sprintf("_cgo%s%s", cPrefix, n.Mangle) paramnames := []string(nil) - for i, param := range d.Type.Params.List { - paramName := fmt.Sprintf("p%d", i) - param.Names = []*ast.Ident{ast.NewIdent(paramName)} - paramnames = append(paramnames, paramName) + if d.Type.Params != nil { + for i, param := range d.Type.Params.List { + paramName := fmt.Sprintf("p%d", i) + param.Names = []*ast.Ident{ast.NewIdent(paramName)} + paramnames = append(paramnames, paramName) + } } if *gccgo { @@ -502,8 +511,10 @@ func (p *Package) writeDefsFunc(fgo2 io.Writer, n *Name, callsMalloc *bool) { fmt.Fprintf(fgo2, "\tif errno != 0 { r2 = syscall.Errno(errno) }\n") } fmt.Fprintf(fgo2, "\tif _Cgo_always_false {\n") - for i := range d.Type.Params.List { - fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i) + if d.Type.Params != nil { + for i := range d.Type.Params.List { + fmt.Fprintf(fgo2, "\t\t_Cgo_use(p%d)\n", i) + } } fmt.Fprintf(fgo2, "\t}\n") fmt.Fprintf(fgo2, "\treturn\n") @@ -516,7 +527,7 @@ func (p *Package) writeOutput(f *File, srcfile string) { if strings.HasSuffix(base, ".go") { base = base[0 : len(base)-3] } - base = strings.Map(slashToUnderscore, base) + base = filepath.Base(base) fgo1 := creat(*objDir + base + ".cgo1.go") fgcc := creat(*objDir + base + ".cgo2.c") @@ -525,10 +536,12 @@ func (p *Package) writeOutput(f *File, srcfile string) { // Write Go output: Go input with rewrites of C.xxx to _C_xxx. fmt.Fprintf(fgo1, "// Created by cgo - DO NOT EDIT\n\n") - conf.Fprint(fgo1, fset, f.AST) + fmt.Fprintf(fgo1, "//line %s:1\n", srcfile) + fgo1.Write(f.Edit.Bytes()) // While we process the vars and funcs, also write gcc output. // Gcc output starts with the preamble. + fmt.Fprintf(fgcc, "%s\n", builtinProlog) fmt.Fprintf(fgcc, "%s\n", f.Preamble) fmt.Fprintf(fgcc, "%s\n", gccProlog) fmt.Fprintf(fgcc, "%s\n", tsanProlog) @@ -615,14 +628,18 @@ func (p *Package) writeOutputFunc(fgcc *os.File, n *Name) { fmt.Fprint(fgcc, "(__typeof__(a->r)) ") } } - fmt.Fprintf(fgcc, "%s(", n.C) - for i := range n.FuncType.Params { - if i > 0 { - fmt.Fprintf(fgcc, ", ") + if n.Kind == "macro" { + fmt.Fprintf(fgcc, "%s;\n", n.C) + } else { + fmt.Fprintf(fgcc, "%s(", n.C) + for i := range n.FuncType.Params { + if i > 0 { + fmt.Fprintf(fgcc, ", ") + } + fmt.Fprintf(fgcc, "a->p%d", i) } - fmt.Fprintf(fgcc, "a->p%d", i) + fmt.Fprintf(fgcc, ");\n") } - fmt.Fprintf(fgcc, ");\n") if n.AddError { fmt.Fprintf(fgcc, "\t_cgo_errno = errno;\n") } @@ -985,7 +1002,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) { default: // Declare a result struct. fmt.Fprintf(fgcch, "\n/* Return type for %s */\n", exp.ExpName) - fmt.Fprintf(fgcch, "struct %s_result {\n", exp.ExpName) + fmt.Fprintf(fgcch, "struct %s_return {\n", exp.ExpName) forFieldList(fntype.Results, func(i int, aname string, atype ast.Expr) { t := p.cgoType(atype) @@ -996,7 +1013,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) { fmt.Fprint(fgcch, "\n") }) fmt.Fprintf(fgcch, "};\n") - fmt.Fprintf(cdeclBuf, "struct %s_result", exp.ExpName) + fmt.Fprintf(cdeclBuf, "struct %s_return", exp.ExpName) } cRet := cdeclBuf.String() @@ -1022,7 +1039,7 @@ func (p *Package) writeGccgoExports(fgo2, fm, fgcc, fgcch io.Writer) { fmt.Fprintf(fgcch, "\n%s", exp.Doc) } - fmt.Fprintf(fgcch, "extern %s %s %s;\n", cRet, exp.ExpName, cParams) + fmt.Fprintf(fgcch, "extern %s %s%s;\n", cRet, exp.ExpName, cParams) // We need to use a name that will be exported by the // Go code; otherwise gccgo will make it static and we @@ -1131,6 +1148,7 @@ func (p *Package) writeExportHeader(fgcch io.Writer) { pkg = p.PackagePath } fmt.Fprintf(fgcch, "/* package %s */\n\n", pkg) + fmt.Fprintf(fgcch, "%s\n", builtinExportProlog) fmt.Fprintf(fgcch, "/* Start of preamble from import \"C\" comments. */\n\n") fmt.Fprintf(fgcch, "%s\n", p.Preamble) @@ -1223,8 +1241,9 @@ func (p *Package) cgoType(e ast.Expr) *Type { // Slice: pointer, len, cap. return &Type{Size: p.PtrSize * 3, Align: p.PtrSize, C: c("GoSlice")} } + // Non-slice array types are not supported. case *ast.StructType: - // TODO + // Not supported. case *ast.FuncType: return &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*")} case *ast.InterfaceType: @@ -1374,7 +1393,7 @@ const builtinProlog = ` /* Define intgo when compiling with GCC. */ typedef ptrdiff_t intgo; -typedef struct { char *p; intgo n; } _GoString_; +typedef struct { const char *p; intgo n; } _GoString_; typedef struct { char *p; intgo n; intgo c; } _GoBytes_; _GoString_ GoString(char *p); _GoString_ GoStringN(char *p, int l); @@ -1382,6 +1401,12 @@ _GoBytes_ GoBytes(void *p, int n); char *CString(_GoString_); void *CBytes(_GoBytes_); void *_CMalloc(size_t); + +__attribute__ ((unused)) +static size_t _GoStringLen(_GoString_ s) { return s.n; } + +__attribute__ ((unused)) +static const char *_GoStringPtr(_GoString_ s) { return s.p; } ` const goProlog = ` @@ -1613,6 +1638,27 @@ void localCgoCheckResult(Eface val) { } ` +// builtinExportProlog is a shorter version of builtinProlog, +// to be put into the _cgo_export.h file. +// For historical reasons we can't use builtinProlog in _cgo_export.h, +// because _cgo_export.h defines GoString as a struct while builtinProlog +// defines it as a function. We don't change this to avoid unnecessarily +// breaking existing code. +const builtinExportProlog = ` +#line 1 "cgo-builtin-prolog" + +#include /* for ptrdiff_t below */ + +#ifndef GO_CGO_EXPORT_PROLOGUE_H +#define GO_CGO_EXPORT_PROLOGUE_H + +typedef ptrdiff_t intgo; + +typedef struct { const char *p; intgo n; } _GoString_; + +#endif +` + func (p *Package) gccExportHeaderProlog() string { return strings.Replace(gccExportHeaderProlog, "GOINTBITS", fmt.Sprint(8*p.IntSize), -1) } @@ -1646,7 +1692,7 @@ typedef double _Complex GoComplex128; */ typedef char _check_for_GOINTBITS_bit_pointer_matching_GoInt[sizeof(void*)==GOINTBITS/8 ? 1:-1]; -typedef struct { const char *p; GoInt n; } GoString; +typedef _GoString_ GoString; typedef void *GoMap; typedef void *GoChan; typedef struct { void *t; void *v; } GoInterface; diff --git a/src/cmd/compile/fmt_test.go b/src/cmd/compile/fmt_test.go index 59de326a919..3f329dea9cc 100644 --- a/src/cmd/compile/fmt_test.go +++ b/src/cmd/compile/fmt_test.go @@ -229,7 +229,7 @@ func TestFormats(t *testing.T) { } } if mismatch { - t.Errorf("knownFormats is out of date; please run with -v to regenerate") + t.Errorf("knownFormats is out of date; please 'go test -v fmt_test.go > foo', then extract new definition of knownFormats from foo") } } @@ -419,7 +419,7 @@ func stringVal(tv types.TypeAndValue) (string, bool) { // formatIter iterates through the string s in increasing // index order and calls f for each format specifier '%..v'. // The arguments for f describe the specifier's index range. -// If a format specifier contains a "*", f is called with +// If a format specifier contains a "*", f is called with // the index range for "*" alone, before being called for // the entire specifier. The result of f is the index of // the rune at which iteration continues. @@ -571,9 +571,14 @@ var knownFormats = map[string]string{ "*cmd/compile/internal/ssa.Block %s": "", "*cmd/compile/internal/ssa.Block %v": "", "*cmd/compile/internal/ssa.Func %s": "", + "*cmd/compile/internal/ssa.Func %v": "", + "*cmd/compile/internal/ssa.LocalSlot %+v": "", + "*cmd/compile/internal/ssa.LocalSlot %v": "", + "*cmd/compile/internal/ssa.Register %s": "", "*cmd/compile/internal/ssa.SparseTreeNode %v": "", "*cmd/compile/internal/ssa.Value %s": "", "*cmd/compile/internal/ssa.Value %v": "", + "*cmd/compile/internal/ssa.VarLoc %v": "", "*cmd/compile/internal/ssa.sparseTreeMapEntry %v": "", "*cmd/compile/internal/types.Field %p": "", "*cmd/compile/internal/types.Field %v": "", @@ -592,25 +597,30 @@ var knownFormats = map[string]string{ "*cmd/compile/internal/types.Type %p": "", "*cmd/compile/internal/types.Type %s": "", "*cmd/compile/internal/types.Type %v": "", + "*cmd/internal/dwarf.Location %#v": "", "*cmd/internal/obj.Addr %v": "", "*cmd/internal/obj.LSym %v": "", - "*cmd/internal/obj.Prog %s": "", "*math/big.Int %#x": "", "*math/big.Int %s": "", "[16]byte %x": "", "[]*cmd/compile/internal/gc.Node %v": "", "[]*cmd/compile/internal/gc.Sig %#v": "", "[]*cmd/compile/internal/ssa.Value %v": "", + "[][]cmd/compile/internal/ssa.SlotID %v": "", "[]byte %s": "", "[]byte %x": "", "[]cmd/compile/internal/ssa.Edge %v": "", "[]cmd/compile/internal/ssa.ID %v": "", + "[]cmd/compile/internal/ssa.VarLocList %v": "", + "[]cmd/compile/internal/syntax.token %s": "", "[]string %v": "", "bool %v": "", "byte %08b": "", "byte %c": "", "cmd/compile/internal/arm.shift %d": "", "cmd/compile/internal/gc.Class %d": "", + "cmd/compile/internal/gc.Class %s": "", + "cmd/compile/internal/gc.Class %v": "", "cmd/compile/internal/gc.Ctype %d": "", "cmd/compile/internal/gc.Ctype %v": "", "cmd/compile/internal/gc.Level %d": "", @@ -620,21 +630,25 @@ var knownFormats = map[string]string{ "cmd/compile/internal/gc.Nodes %.v": "", "cmd/compile/internal/gc.Nodes %v": "", "cmd/compile/internal/gc.Op %#v": "", + "cmd/compile/internal/gc.Op %d": "", "cmd/compile/internal/gc.Op %v": "", "cmd/compile/internal/gc.Val %#v": "", "cmd/compile/internal/gc.Val %T": "", "cmd/compile/internal/gc.Val %v": "", "cmd/compile/internal/gc.fmtMode %d": "", "cmd/compile/internal/gc.initKind %d": "", + "cmd/compile/internal/gc.locID %v": "", "cmd/compile/internal/ssa.BranchPrediction %d": "", "cmd/compile/internal/ssa.Edge %v": "", "cmd/compile/internal/ssa.GCNode %v": "", "cmd/compile/internal/ssa.ID %d": "", - "cmd/compile/internal/ssa.LocalSlot %v": "", - "cmd/compile/internal/ssa.Location %v": "", + "cmd/compile/internal/ssa.ID %v": "", + "cmd/compile/internal/ssa.LocalSlot %s": "", + "cmd/compile/internal/ssa.Location %s": "", "cmd/compile/internal/ssa.Op %s": "", "cmd/compile/internal/ssa.Op %v": "", "cmd/compile/internal/ssa.ValAndOff %s": "", + "cmd/compile/internal/ssa.VarLocList %v": "", "cmd/compile/internal/ssa.rbrank %d": "", "cmd/compile/internal/ssa.regMask %d": "", "cmd/compile/internal/ssa.register %d": "", @@ -648,6 +662,7 @@ var knownFormats = map[string]string{ "cmd/compile/internal/types.EType %d": "", "cmd/compile/internal/types.EType %s": "", "cmd/compile/internal/types.EType %v": "", + "cmd/internal/dwarf.Location %#v": "", "cmd/internal/src.Pos %s": "", "cmd/internal/src.Pos %v": "", "error %v": "", @@ -670,6 +685,7 @@ var knownFormats = map[string]string{ "int32 %x": "", "int64 %+d": "", "int64 %-10d": "", + "int64 %.5d": "", "int64 %X": "", "int64 %d": "", "int64 %v": "", @@ -687,6 +703,7 @@ var knownFormats = map[string]string{ "rune %c": "", "string %-*s": "", "string %-16s": "", + "string %-6s": "", "string %.*s": "", "string %q": "", "string %s": "", diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go index e294bce66b9..df0a69a4417 100644 --- a/src/cmd/compile/internal/amd64/ggen.go +++ b/src/cmd/compile/internal/amd64/ggen.go @@ -14,14 +14,14 @@ import ( // no floating point in note handlers on Plan 9 var isPlan9 = objabi.GOOS == "plan9" -// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD, +// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ, // See runtime/mkduff.go. const ( dzBlocks = 16 // number of MOV/ADD blocks dzBlockLen = 4 // number of clears per block dzBlockSize = 19 // size of instructions in a single block dzMovSize = 4 // size of single MOV instruction w/ offset - dzAddSize = 4 // size of single ADD instruction + dzLeaqSize = 4 // size of single LEAQ instruction dzClearStep = 16 // number of bytes cleared by each MOV instruction dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block @@ -35,7 +35,7 @@ func dzOff(b int64) int64 { off -= b / dzClearLen * dzBlockSize tailLen := b % dzClearLen if tailLen >= dzClearStep { - off -= dzAddSize + dzMovSize*(tailLen/dzClearStep) + off -= dzLeaqSize + dzMovSize*(tailLen/dzClearStep) } return off } diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go index 2d7727b2700..ce322e5e990 100644 --- a/src/cmd/compile/internal/amd64/ssa.go +++ b/src/cmd/compile/internal/amd64/ssa.go @@ -117,7 +117,7 @@ func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog { return p } -// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD, +// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ, // See runtime/mkduff.go. func duffStart(size int64) int64 { x, _ := duff(size) @@ -140,7 +140,7 @@ func duff(size int64) (int64, int64) { off := dzBlockSize * (dzBlocks - blocks) var adj int64 if steps != 0 { - off -= dzAddSize + off -= dzLeaqSize off -= dzMovSize * steps adj -= dzClearStep * (dzBlockLen - steps) } @@ -494,6 +494,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Args[0].Reg() case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst: x := v.Reg() + + // If flags aren't live (indicated by v.Aux == nil), + // then we can rewrite MOV $0, AX into XOR AX, AX. + if v.AuxInt == 0 && v.Aux == nil { + p := s.Prog(x86.AXORL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + p.To.Type = obj.TYPE_REG + p.To.Reg = x + break + } + asm := v.Op.Asm() // Use MOVL to move a small constant into a register // when the constant is positive and fits into 32 bits. @@ -506,11 +518,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = x - // If flags are live at this instruction, suppress the - // MOV $0,AX -> XOR AX,AX optimization. - if v.Aux != nil { - p.Mark |= x86.PRESERVEFLAGS - } case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst: x := v.Reg() p := s.Prog(v.Op.Asm()) @@ -525,7 +532,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8: + case ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_MEM p.From.Reg = v.Args[0].Reg() @@ -573,7 +580,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux(&p.To, v) - case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8: + case ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[2].Reg() @@ -614,6 +621,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Scale = 1 p.To.Index = i gc.AddAux(&p.To, v) + case ssa.OpAMD64ADDQconstmem, ssa.OpAMD64ADDLconstmem: + sc := v.AuxValAndOff() + off := sc.Off() + val := sc.Val() + if val == 1 { + var asm obj.As + if v.Op == ssa.OpAMD64ADDQconstmem { + asm = x86.AINCQ + } else { + asm = x86.AINCL + } + p := s.Prog(asm) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux2(&p.To, v, off) + } else { + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = val + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux2(&p.To, v, off) + } case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST @@ -655,6 +685,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // Break false dependency on destination register. opregreg(s, x86.AXORPS, r, r) opregreg(s, v.Op.Asm(), r, v.Args[0].Reg()) + case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i: + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i: + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpAMD64ADDQmem, ssa.OpAMD64ADDLmem, ssa.OpAMD64SUBQmem, ssa.OpAMD64SUBLmem, ssa.OpAMD64ANDQmem, ssa.OpAMD64ANDLmem, ssa.OpAMD64ORQmem, ssa.OpAMD64ORLmem, ssa.OpAMD64XORQmem, ssa.OpAMD64XORLmem, ssa.OpAMD64ADDSDmem, ssa.OpAMD64ADDSSmem, @@ -673,9 +715,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { adj := duffAdj(v.AuxInt) var p *obj.Prog if adj != 0 { - p = s.Prog(x86.AADDQ) - p.From.Type = obj.TYPE_CONST + p = s.Prog(x86.ALEAQ) + p.From.Type = obj.TYPE_MEM p.From.Offset = adj + p.From.Reg = x86.REG_DI p.To.Type = obj.TYPE_REG p.To.Reg = x86.REG_DI } @@ -695,7 +738,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Sym = gc.Duffcopy p.To.Offset = v.AuxInt - case ssa.OpCopy, ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert: // TODO: use MOVQreg for reg->reg copies instead of OpCopy? + case ssa.OpAMD64MOVQconvert, ssa.OpAMD64MOVLconvert: + if v.Args[0].Reg() != v.Reg() { + v.Fatalf("MOVXconvert should be a no-op") + } + case ssa.OpCopy: // TODO: use MOVQreg for reg->reg copies instead of OpCopy? if v.Type.IsMemory() { return } @@ -755,6 +802,34 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { } case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter: s.Call(v) + + case ssa.OpAMD64LoweredGetCallerPC: + p := s.Prog(x86.AMOVQ) + p.From.Type = obj.TYPE_MEM + p.From.Offset = -8 // PC is stored 8 bytes below first parameter. + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64LoweredGetCallerSP: + // caller's SP is the address of the first arg + mov := x86.AMOVQ + if gc.Widthptr == 4 { + mov = x86.AMOVL + } + p := s.Prog(mov) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpAMD64LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = v.Aux.(*obj.LSym) + case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL, ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL, ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL: @@ -777,6 +852,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpAMD64ROUNDSD: + p := s.Prog(v.Op.Asm()) + val := v.AuxInt + // 0 means math.RoundToEven, 1 Floor, 2 Ceil, 3 Trunc + if val != 0 && val != 1 && val != 2 && val != 3 { + v.Fatalf("Invalid rounding mode") + } + p.From.Offset = val + p.From.Type = obj.TYPE_CONST + p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[0].Reg()}) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL: if v.Args[0].Reg() != v.Reg() { // POPCNT on Intel has a false dependency on the destination register. @@ -792,6 +879,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE, ssa.OpAMD64SETL, ssa.OpAMD64SETLE, ssa.OpAMD64SETG, ssa.OpAMD64SETGE, @@ -803,6 +891,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() + case ssa.OpAMD64SETEQmem, ssa.OpAMD64SETNEmem, + ssa.OpAMD64SETLmem, ssa.OpAMD64SETLEmem, + ssa.OpAMD64SETGmem, ssa.OpAMD64SETGEmem, + ssa.OpAMD64SETBmem, ssa.OpAMD64SETBEmem, + ssa.OpAMD64SETAmem, ssa.OpAMD64SETAEmem: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux(&p.To, v) + case ssa.OpAMD64SETNEF: p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG @@ -838,7 +936,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpAMD64LoweredNilCheck: // Issue a load which will fault if the input is nil. // TODO: We currently use the 2-byte instruction TESTB AX, (reg). - // Should we use the 3-byte TESTB $0, (reg) instead? It is larger + // Should we use the 3-byte TESTB $0, (reg) instead? It is larger // but it doesn't have false dependency on AX. // Or maybe allocate an output register and use MOVL (reg),reg2 ? // That trades clobbering flags for clobbering a register. diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 93abee3da0f..300672d9cf8 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -13,6 +13,7 @@ import ( "cmd/compile/internal/types" "cmd/internal/obj" "cmd/internal/obj/arm" + "cmd/internal/objabi" ) // loadByType returns the load instruction of the given type. @@ -184,6 +185,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ssa.OpARMSUBD, ssa.OpARMMULF, ssa.OpARMMULD, + ssa.OpARMNMULF, + ssa.OpARMNMULD, ssa.OpARMDIVF, ssa.OpARMDIVD: r := v.Reg() @@ -195,6 +198,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r + case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD: + r := v.Reg() + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + r2 := v.Args[2].Reg() + if r != r0 { + v.Fatalf("result and addend are not in the same register: %v", v.LongString()) + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r case ssa.OpARMADDS, ssa.OpARMSUBS: r := v.Reg0() @@ -242,6 +259,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r + case ssa.OpARMBFX, ssa.OpARMBFXU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt >> 8 + p.SetFrom3(obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt & 0xff}) + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpARMADDconst, ssa.OpARMADCconst, ssa.OpARMSUBconst, @@ -402,7 +427,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REGREG p.To.Reg = v.Reg0() // high 32-bit p.To.Offset = int64(v.Reg1()) // low 32-bit - case ssa.OpARMMULA: + case ssa.OpARMMULA, ssa.OpARMMULS: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() @@ -449,17 +474,17 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - case ssa.OpARMCMPshiftLL: + case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL: genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt) - case ssa.OpARMCMPshiftRL: + case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL: genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt) - case ssa.OpARMCMPshiftRA: + case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA: genshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt) - case ssa.OpARMCMPshiftLLreg: + case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg: genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL) - case ssa.OpARMCMPshiftRLreg: + case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg: genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR) - case ssa.OpARMCMPshiftRAreg: + case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg: genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR) case ssa.OpARMMOVWaddr: p := s.Prog(arm.AMOVW) @@ -477,10 +502,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) - case *ssa.ExternSymbol: + case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ssa.ArgSymbol, *ssa.AutoSymbol: + case *gc.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: @@ -516,7 +541,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux(&p.To, v) - case ssa.OpARMMOVWloadidx: + case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx: // this is just shift 0 bits fallthrough case ssa.OpARMMOVWloadshiftLL: @@ -528,7 +553,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpARMMOVWloadshiftRA: p := genshift(s, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt) p.From.Reg = v.Args[0].Reg() - case ssa.OpARMMOVWstoreidx: + case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx: // this is just shift 0 bits fallthrough case ssa.OpARMMOVWstoreshiftLL: @@ -580,6 +605,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { default: } } + if objabi.GOARM >= 6 { + // generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7 + genshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0) + return + } fallthrough case ssa.OpARMMVN, ssa.OpARMCLZ, @@ -754,6 +784,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpARMLoweredGetClosurePtr: // Closure pointer is R7 (arm.REGCTXT). gc.CheckLoweredGetClosurePtr(v) + case ssa.OpARMLoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(arm.AMOVW) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpARMFlagEQ, ssa.OpARMFlagLT_ULT, ssa.OpARMFlagLT_UGT, diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go index 52a8e3f3e37..f7b3851398f 100644 --- a/src/cmd/compile/internal/arm64/ggen.go +++ b/src/cmd/compile/internal/arm64/ggen.go @@ -31,13 +31,18 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i) } } else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend + if cnt%(2*int64(gc.Widthptr)) != 0 { + p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off) + off += int64(gc.Widthptr) + cnt -= int64(gc.Widthptr) + } p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) - p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, arm64.REGRT1, 0) + p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REGRT1, 0) p.Reg = arm64.REGRT1 p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Duffzero - p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) + p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr))) } else { p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, arm64.REGTMP, 0) p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0) diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go index 0f9e82c727d..6fa01912f50 100644 --- a/src/cmd/compile/internal/arm64/ssa.go +++ b/src/cmd/compile/internal/arm64/ssa.go @@ -273,10 +273,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) - case *ssa.ExternSymbol: + case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ssa.ArgSymbol, *ssa.AutoSymbol: + case *gc.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: @@ -324,6 +324,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux(&p.To, v) + case ssa.OpARM64STP: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REGREG + p.From.Reg = v.Args[1].Reg() + p.From.Offset = int64(v.Args[2].Reg()) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux(&p.To, v) case ssa.OpARM64MOVBstorezero, ssa.OpARM64MOVHstorezero, ssa.OpARM64MOVWstorezero, @@ -334,6 +342,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() gc.AddAux(&p.To, v) + case ssa.OpARM64MOVQstorezero: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REGREG + p.From.Reg = arm64.REGZERO + p.From.Offset = int64(arm64.REGZERO) + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux(&p.To, v) case ssa.OpARM64LoweredAtomicExchange64, ssa.OpARM64LoweredAtomicExchange32: // LDAXR (Rarg0), Rout @@ -555,34 +571,29 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg p.From.Reg = arm64.COND_LO p.Reg = v.Args[0].Reg() - p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r1} + p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r1}) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpARM64DUFFZERO: - // runtime.duffzero expects start address - 8 in R16 - p := s.Prog(arm64.ASUB) - p.From.Type = obj.TYPE_CONST - p.From.Offset = 8 - p.Reg = v.Args[0].Reg() - p.To.Type = obj.TYPE_REG - p.To.Reg = arm64.REG_R16 - p = s.Prog(obj.ADUFFZERO) + // runtime.duffzero expects start address in R16 + p := s.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = gc.Duffzero p.To.Offset = v.AuxInt case ssa.OpARM64LoweredZero: - // MOVD.P ZR, 8(R16) + // STP.P (ZR,ZR), 16(R16) // CMP Rarg1, R16 // BLE -2(PC) - // arg1 is the address of the last element to zero - p := s.Prog(arm64.AMOVD) + // arg1 is the address of the last 16-byte unit to zero + p := s.Prog(arm64.ASTP) p.Scond = arm64.C_XPOST - p.From.Type = obj.TYPE_REG + p.From.Type = obj.TYPE_REGREG p.From.Reg = arm64.REGZERO + p.From.Offset = int64(arm64.REGZERO) p.To.Type = obj.TYPE_MEM p.To.Reg = arm64.REG_R16 - p.To.Offset = 8 + p.To.Offset = 16 p2 := s.Prog(arm64.ACMP) p2.From.Type = obj.TYPE_REG p2.From.Reg = v.Args[1].Reg() @@ -655,6 +666,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpARM64LoweredGetClosurePtr: // Closure pointer is R26 (arm64.REGCTXT). gc.CheckLoweredGetClosurePtr(v) + case ssa.OpARM64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(arm64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpARM64FlagEQ, ssa.OpARM64FlagLT_ULT, ssa.OpARM64FlagLT_UGT, @@ -686,20 +705,22 @@ var condBits = map[ssa.Op]int16{ var blockJump = map[ssa.BlockKind]struct { asm, invasm obj.As }{ - ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE}, - ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ}, - ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE}, - ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT}, - ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT}, - ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE}, - ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS}, - ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO}, - ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS}, - ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI}, - ssa.BlockARM64Z: {arm64.ACBZ, arm64.ACBNZ}, - ssa.BlockARM64NZ: {arm64.ACBNZ, arm64.ACBZ}, - ssa.BlockARM64ZW: {arm64.ACBZW, arm64.ACBNZW}, - ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW}, + ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE}, + ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ}, + ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE}, + ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT}, + ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT}, + ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE}, + ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS}, + ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO}, + ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS}, + ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI}, + ssa.BlockARM64Z: {arm64.ACBZ, arm64.ACBNZ}, + ssa.BlockARM64NZ: {arm64.ACBNZ, arm64.ACBZ}, + ssa.BlockARM64ZW: {arm64.ACBZW, arm64.ACBNZW}, + ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW}, + ssa.BlockARM64TBZ: {arm64.ATBZ, arm64.ATBNZ}, + ssa.BlockARM64TBNZ: {arm64.ATBNZ, arm64.ATBZ}, } func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { @@ -770,6 +791,35 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { p.From.Type = obj.TYPE_REG p.From.Reg = b.Control.Reg() } + case ssa.BlockARM64TBZ, ssa.BlockARM64TBNZ: + jmp := blockJump[b.Kind] + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = s.Prog(jmp.invasm) + p.To.Type = obj.TYPE_BRANCH + p.From.Offset = b.Aux.(int64) + p.From.Type = obj.TYPE_CONST + p.Reg = b.Control.Reg() + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + case b.Succs[1].Block(): + p = s.Prog(jmp.asm) + p.To.Type = obj.TYPE_BRANCH + p.From.Offset = b.Aux.(int64) + p.From.Type = obj.TYPE_CONST + p.Reg = b.Control.Reg() + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + default: + p = s.Prog(jmp.asm) + p.To.Type = obj.TYPE_BRANCH + p.From.Offset = b.Aux.(int64) + p.From.Type = obj.TYPE_CONST + p.Reg = b.Control.Reg() + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + q := s.Prog(obj.AJMP) + q.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()}) + } default: b.Fatalf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString()) diff --git a/src/cmd/compile/internal/gc/alg.go b/src/cmd/compile/internal/gc/alg.go index 0b4c9c7b3f6..e98df71b34b 100644 --- a/src/cmd/compile/internal/gc/alg.go +++ b/src/cmd/compile/internal/gc/alg.go @@ -292,7 +292,7 @@ func genhash(sym *types.Sym, t *types.Type) { dumplist("genhash body", fn.Nbody) } - funcbody(fn) + funcbody() Curfn = fn fn.Func.SetDupok(true) fn = typecheck(fn, Etop) @@ -476,7 +476,7 @@ func geneq(sym *types.Sym, t *types.Type) { dumplist("geneq body", fn.Nbody) } - funcbody(fn) + funcbody() Curfn = fn fn.Func.SetDupok(true) fn = typecheck(fn, Etop) diff --git a/src/cmd/compile/internal/gc/align.go b/src/cmd/compile/internal/gc/align.go index f29c587877b..dc2d04a8ed8 100644 --- a/src/cmd/compile/internal/gc/align.go +++ b/src/cmd/compile/internal/gc/align.go @@ -176,6 +176,7 @@ func dowidth(t *types.Type) { } t.Width = 0 + t.Align = 1 return } @@ -290,6 +291,7 @@ func dowidth(t *types.Type) { case TFORW: // should have been filled in if !t.Broke() { + t.SetBroke(true) yyerror("invalid recursive type %v", t) } w = 1 // anything will do diff --git a/src/cmd/compile/internal/gc/asm_test.go b/src/cmd/compile/internal/gc/asm_test.go index 08ec638f44e..a7b817da257 100644 --- a/src/cmd/compile/internal/gc/asm_test.go +++ b/src/cmd/compile/internal/gc/asm_test.go @@ -18,6 +18,52 @@ import ( "testing" ) +// This file contains code generation tests. +// +// Each test is defined in a variable of type asmTest. Tests are +// architecture-specific, and they are grouped in arrays of tests, one +// for each architecture. +// +// Each asmTest consists of a function to compile, an array of +// positive regexps that must match the generated assembly and +// an array of negative regexps that must not match generated assembly. +// For example, the following amd64 test +// +// { +// fn: ` +// func f0(x int) int { +// return x * 64 +// } +// `, +// pos: []string{"\tSHLQ\t[$]6,"}, +// neg: []string{"MULQ"} +// } +// +// verifies that the code the compiler generates for a multiplication +// by 64 contains a 'SHLQ' instruction and does not contain a MULQ. +// +// Since all the tests for a given architecture are dumped in the same +// file, the function names must be unique. As a workaround for this +// restriction, the test harness supports the use of a '$' placeholder +// for function names. The func f0 above can be also written as +// +// { +// fn: ` +// func $(x int) int { +// return x * 64 +// } +// `, +// pos: []string{"\tSHLQ\t[$]6,"}, +// neg: []string{"MULQ"} +// } +// +// Each '$'-function will be given a unique name of form f_, +// where is the test index in the test array, and is the +// test's architecture. +// +// It is allowed to mix named and unnamed functions in the same test +// array; the named functions will retain their original names. + // TestAssembly checks to make sure the assembly generated for // functions contains certain expected instructions. func TestAssembly(t *testing.T) { @@ -41,8 +87,13 @@ func TestAssembly(t *testing.T) { asm := ats.compileToAsm(tt, dir) - for _, at := range ats.tests { - funcName := nameRegexp.FindString(at.function)[len("func "):] + for i, at := range ats.tests { + var funcName string + if strings.Contains(at.fn, "func $") { + funcName = fmt.Sprintf("f%d_%s", i, ats.arch) + } else { + funcName = nameRegexp.FindString(at.fn)[len("func "):] + } fa := funcAsm(tt, asm, funcName) if fa != "" { at.verifyAsm(tt, fa) @@ -74,17 +125,23 @@ func funcAsm(t *testing.T, asm string, funcName string) string { } type asmTest struct { - // function to compile, must be named fX, - // where X is this test's index in asmTests.tests. - function string - // regexps that must match the generated assembly - regexps []string + // function to compile + fn string + // regular expressions that must match the generated assembly + pos []string + // regular expressions that must not match the generated assembly + neg []string } func (at asmTest) verifyAsm(t *testing.T, fa string) { - for _, r := range at.regexps { + for _, r := range at.pos { if b, err := regexp.MatchString(r, fa); !b || err != nil { - t.Errorf("expected:%s\ngo:%s\nasm:%s\n", r, at.function, fa) + t.Errorf("expected:%s\ngo:%s\nasm:%s\n", r, at.fn, fa) + } + } + for _, r := range at.neg { + if b, err := regexp.MatchString(r, fa); b || err != nil { + t.Errorf("not expected:%s\ngo:%s\nasm:%s\n", r, at.fn, fa) } } } @@ -103,8 +160,9 @@ func (ats *asmTests) generateCode() []byte { fmt.Fprintf(&buf, "import %q\n", s) } - for _, t := range ats.tests { - fmt.Fprintln(&buf, t.function) + for i, t := range ats.tests { + function := strings.Replace(t.fn, "func $", fmt.Sprintf("func f%d_%s", i, ats.arch), 1) + fmt.Fprintln(&buf, function) } return buf.Bytes() @@ -166,7 +224,7 @@ var allAsmTests = []*asmTests{ { arch: "amd64", os: "linux", - imports: []string{"encoding/binary", "math/bits", "unsafe"}, + imports: []string{"encoding/binary", "math", "math/bits", "unsafe", "runtime"}, tests: linuxAMD64Tests, }, { @@ -178,13 +236,13 @@ var allAsmTests = []*asmTests{ { arch: "s390x", os: "linux", - imports: []string{"encoding/binary", "math/bits"}, + imports: []string{"encoding/binary", "math", "math/bits"}, tests: linuxS390XTests, }, { arch: "arm", os: "linux", - imports: []string{"math/bits"}, + imports: []string{"math/bits", "runtime"}, tests: linuxARMTests, }, { @@ -200,173 +258,196 @@ var allAsmTests = []*asmTests{ tests: linuxMIPSTests, }, { - arch: "ppc64le", + arch: "mips64", os: "linux", - tests: linuxPPC64LETests, + tests: linuxMIPS64Tests, + }, + { + arch: "ppc64le", + os: "linux", + imports: []string{"encoding/binary", "math", "math/bits"}, + tests: linuxPPC64LETests, + }, + { + arch: "amd64", + os: "plan9", + tests: plan9AMD64Tests, }, } var linuxAMD64Tests = []*asmTest{ + // multiplication by powers of two { - ` - func f0(x int) int { - return x * 64 + fn: ` + func $(n int) int { + return n * 64 } `, - []string{"\tSHLQ\t\\$6,"}, + pos: []string{"\tSHLQ\t\\$6,"}, + neg: []string{"IMULQ"}, }, { - ` - func f1(x int) int { + fn: ` + func $(n int) int { + return -128*n + } + `, + pos: []string{"SHLQ"}, + neg: []string{"IMULQ"}, + }, + + { + fn: ` + func $(x int) int { return x * 96 } `, - []string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"}, + pos: []string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"}, }, // Load-combining tests. { - ` + fn: ` func f2(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } `, - []string{"\tMOVQ\t\\(.*\\),"}, + pos: []string{"\tMOVQ\t\\(.*\\),"}, }, { - ` + fn: ` func f3(b []byte, i int) uint64 { return binary.LittleEndian.Uint64(b[i:]) } `, - []string{"\tMOVQ\t\\(.*\\)\\(.*\\*1\\),"}, + pos: []string{"\tMOVQ\t\\(.*\\)\\(.*\\*1\\),"}, }, { - ` + fn: ` func f4(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } `, - []string{"\tMOVL\t\\(.*\\),"}, + pos: []string{"\tMOVL\t\\(.*\\),"}, }, { - ` + fn: ` func f5(b []byte, i int) uint32 { return binary.LittleEndian.Uint32(b[i:]) } `, - []string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"}, + pos: []string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"}, }, { - ` + fn: ` func f6(b []byte) uint64 { return binary.BigEndian.Uint64(b) } `, - []string{"\tBSWAPQ\t"}, + pos: []string{"\tBSWAPQ\t"}, }, { - ` + fn: ` func f7(b []byte, i int) uint64 { return binary.BigEndian.Uint64(b[i:]) } `, - []string{"\tBSWAPQ\t"}, + pos: []string{"\tBSWAPQ\t"}, }, { - ` + fn: ` func f8(b []byte, v uint64) { binary.BigEndian.PutUint64(b, v) } `, - []string{"\tBSWAPQ\t"}, + pos: []string{"\tBSWAPQ\t"}, }, { - ` + fn: ` func f9(b []byte, i int, v uint64) { binary.BigEndian.PutUint64(b[i:], v) } `, - []string{"\tBSWAPQ\t"}, + pos: []string{"\tBSWAPQ\t"}, }, { - ` + fn: ` func f10(b []byte) uint32 { return binary.BigEndian.Uint32(b) } `, - []string{"\tBSWAPL\t"}, + pos: []string{"\tBSWAPL\t"}, }, { - ` + fn: ` func f11(b []byte, i int) uint32 { return binary.BigEndian.Uint32(b[i:]) } `, - []string{"\tBSWAPL\t"}, + pos: []string{"\tBSWAPL\t"}, }, { - ` + fn: ` func f12(b []byte, v uint32) { binary.BigEndian.PutUint32(b, v) } `, - []string{"\tBSWAPL\t"}, + pos: []string{"\tBSWAPL\t"}, }, { - ` + fn: ` func f13(b []byte, i int, v uint32) { binary.BigEndian.PutUint32(b[i:], v) } `, - []string{"\tBSWAPL\t"}, + pos: []string{"\tBSWAPL\t"}, }, { - ` + fn: ` func f14(b []byte) uint16 { return binary.BigEndian.Uint16(b) } `, - []string{"\tROLW\t\\$8,"}, + pos: []string{"\tROLW\t\\$8,"}, }, { - ` + fn: ` func f15(b []byte, i int) uint16 { return binary.BigEndian.Uint16(b[i:]) } `, - []string{"\tROLW\t\\$8,"}, + pos: []string{"\tROLW\t\\$8,"}, }, { - ` + fn: ` func f16(b []byte, v uint16) { binary.BigEndian.PutUint16(b, v) } `, - []string{"\tROLW\t\\$8,"}, + pos: []string{"\tROLW\t\\$8,"}, }, { - ` + fn: ` func f17(b []byte, i int, v uint16) { binary.BigEndian.PutUint16(b[i:], v) } `, - []string{"\tROLW\t\\$8,"}, + pos: []string{"\tROLW\t\\$8,"}, }, // Structure zeroing. See issue #18370. { - ` + fn: ` type T1 struct { a, b, c int } - func f18(t *T1) { + func $(t *T1) { *t = T1{} } `, - []string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)"}, + pos: []string{"\tXORPS\tX., X", "\tMOVUPS\tX., \\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)"}, }, // SSA-able composite literal initialization. Issue 18872. { - ` + fn: ` type T18872 struct { a, b, c, d int } @@ -375,11 +456,11 @@ var linuxAMD64Tests = []*asmTest{ *p = T18872{1, 2, 3, 4} } `, - []string{"\tMOVQ\t[$]1", "\tMOVQ\t[$]2", "\tMOVQ\t[$]3", "\tMOVQ\t[$]4"}, + pos: []string{"\tMOVQ\t[$]1", "\tMOVQ\t[$]2", "\tMOVQ\t[$]3", "\tMOVQ\t[$]4"}, }, // Also test struct containing pointers (this was special because of write barriers). { - ` + fn: ` type T2 struct { a, b, c *int } @@ -387,108 +468,108 @@ var linuxAMD64Tests = []*asmTest{ *t = T2{} } `, - []string{"\tMOVQ\t\\$0, \\(.*\\)", "\tMOVQ\t\\$0, 8\\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)", "\tCALL\truntime\\.writebarrierptr\\(SB\\)"}, + pos: []string{"\tXORPS\tX., X", "\tMOVUPS\tX., \\(.*\\)", "\tMOVQ\t\\$0, 16\\(.*\\)", "\tCALL\truntime\\.(writebarrierptr|gcWriteBarrier)\\(SB\\)"}, }, // Rotate tests { - ` + fn: ` func f20(x uint64) uint64 { return x<<7 | x>>57 } `, - []string{"\tROLQ\t[$]7,"}, + pos: []string{"\tROLQ\t[$]7,"}, }, { - ` + fn: ` func f21(x uint64) uint64 { return x<<7 + x>>57 } `, - []string{"\tROLQ\t[$]7,"}, + pos: []string{"\tROLQ\t[$]7,"}, }, { - ` + fn: ` func f22(x uint64) uint64 { return x<<7 ^ x>>57 } `, - []string{"\tROLQ\t[$]7,"}, + pos: []string{"\tROLQ\t[$]7,"}, }, { - ` + fn: ` func f23(x uint32) uint32 { return x<<7 + x>>25 } `, - []string{"\tROLL\t[$]7,"}, + pos: []string{"\tROLL\t[$]7,"}, }, { - ` + fn: ` func f24(x uint32) uint32 { return x<<7 | x>>25 } `, - []string{"\tROLL\t[$]7,"}, + pos: []string{"\tROLL\t[$]7,"}, }, { - ` + fn: ` func f25(x uint32) uint32 { return x<<7 ^ x>>25 } `, - []string{"\tROLL\t[$]7,"}, + pos: []string{"\tROLL\t[$]7,"}, }, { - ` + fn: ` func f26(x uint16) uint16 { return x<<7 + x>>9 } `, - []string{"\tROLW\t[$]7,"}, + pos: []string{"\tROLW\t[$]7,"}, }, { - ` + fn: ` func f27(x uint16) uint16 { return x<<7 | x>>9 } `, - []string{"\tROLW\t[$]7,"}, + pos: []string{"\tROLW\t[$]7,"}, }, { - ` + fn: ` func f28(x uint16) uint16 { return x<<7 ^ x>>9 } `, - []string{"\tROLW\t[$]7,"}, + pos: []string{"\tROLW\t[$]7,"}, }, { - ` + fn: ` func f29(x uint8) uint8 { return x<<7 + x>>1 } `, - []string{"\tROLB\t[$]7,"}, + pos: []string{"\tROLB\t[$]7,"}, }, { - ` + fn: ` func f30(x uint8) uint8 { return x<<7 | x>>1 } `, - []string{"\tROLB\t[$]7,"}, + pos: []string{"\tROLB\t[$]7,"}, }, { - ` + fn: ` func f31(x uint8) uint8 { return x<<7 ^ x>>1 } `, - []string{"\tROLB\t[$]7,"}, + pos: []string{"\tROLB\t[$]7,"}, }, // Rotate after inlining (see issue 18254). { - ` + fn: ` func f32(x uint32) uint32 { return g(x, 7) } @@ -496,46 +577,46 @@ var linuxAMD64Tests = []*asmTest{ return x<>(32-k) } `, - []string{"\tROLL\t[$]7,"}, + pos: []string{"\tROLL\t[$]7,"}, }, { - ` + fn: ` func f33(m map[int]int) int { return m[5] } `, - []string{"\tMOVQ\t[$]5,"}, + pos: []string{"\tMOVQ\t[$]5,"}, }, // Direct use of constants in fast map access calls. Issue 19015. { - ` + fn: ` func f34(m map[int]int) bool { _, ok := m[5] return ok } `, - []string{"\tMOVQ\t[$]5,"}, + pos: []string{"\tMOVQ\t[$]5,"}, }, { - ` + fn: ` func f35(m map[string]int) int { return m["abc"] } `, - []string{"\"abc\""}, + pos: []string{"\"abc\""}, }, { - ` + fn: ` func f36(m map[string]int) bool { _, ok := m["abc"] return ok } `, - []string{"\"abc\""}, + pos: []string{"\"abc\""}, }, // Bit test ops on amd64, issue 18943. { - ` + fn: ` func f37(a, b uint64) int { if a&(1<<(b&63)) != 0 { return 1 @@ -543,18 +624,18 @@ var linuxAMD64Tests = []*asmTest{ return -1 } `, - []string{"\tBTQ\t"}, + pos: []string{"\tBTQ\t"}, }, { - ` + fn: ` func f38(a, b uint64) bool { return a&(1<<(b&63)) != 0 } `, - []string{"\tBTQ\t"}, + pos: []string{"\tBTQ\t"}, }, { - ` + fn: ` func f39(a uint64) int { if a&(1<<60) != 0 { return 1 @@ -562,1107 +643,2408 @@ var linuxAMD64Tests = []*asmTest{ return -1 } `, - []string{"\tBTQ\t\\$60"}, + pos: []string{"\tBTQ\t\\$60"}, }, { - ` + fn: ` func f40(a uint64) bool { return a&(1<<60) != 0 } `, - []string{"\tBTQ\t\\$60"}, + pos: []string{"\tBTQ\t\\$60"}, }, // Intrinsic tests for math/bits { - ` + fn: ` func f41(a uint64) int { return bits.TrailingZeros64(a) } `, - []string{"\tBSFQ\t", "\tMOVL\t\\$64,", "\tCMOVQEQ\t"}, + pos: []string{"\tBSFQ\t", "\tMOVL\t\\$64,", "\tCMOVQEQ\t"}, }, { - ` + fn: ` func f42(a uint32) int { return bits.TrailingZeros32(a) } `, - []string{"\tBSFQ\t", "\tORQ\t[^$]", "\tMOVQ\t\\$4294967296,"}, + pos: []string{"\tBSFQ\t", "\tORQ\t[^$]", "\tMOVQ\t\\$4294967296,"}, }, { - ` + fn: ` func f43(a uint16) int { return bits.TrailingZeros16(a) } `, - []string{"\tBSFQ\t", "\tORQ\t\\$65536,"}, + pos: []string{"\tBSFQ\t", "\tORQ\t\\$65536,"}, }, { - ` + fn: ` func f44(a uint8) int { return bits.TrailingZeros8(a) } `, - []string{"\tBSFQ\t", "\tORQ\t\\$256,"}, + pos: []string{"\tBSFQ\t", "\tORQ\t\\$256,"}, }, { - ` + fn: ` func f45(a uint64) uint64 { return bits.ReverseBytes64(a) } `, - []string{"\tBSWAPQ\t"}, + pos: []string{"\tBSWAPQ\t"}, }, { - ` + fn: ` func f46(a uint32) uint32 { return bits.ReverseBytes32(a) } `, - []string{"\tBSWAPL\t"}, + pos: []string{"\tBSWAPL\t"}, }, { - ` + fn: ` func f47(a uint16) uint16 { return bits.ReverseBytes16(a) } `, - []string{"\tROLW\t\\$8,"}, + pos: []string{"\tROLW\t\\$8,"}, }, { - ` + fn: ` func f48(a uint64) int { return bits.Len64(a) } `, - []string{"\tBSRQ\t"}, + pos: []string{"\tBSRQ\t"}, }, { - ` + fn: ` func f49(a uint32) int { return bits.Len32(a) } `, - []string{"\tBSRQ\t"}, + pos: []string{"\tBSRQ\t"}, }, { - ` + fn: ` func f50(a uint16) int { return bits.Len16(a) } `, - []string{"\tBSRQ\t"}, + pos: []string{"\tBSRQ\t"}, }, /* see ssa.go { - ` + fn:` func f51(a uint8) int { return bits.Len8(a) } `, - []string{"\tBSRQ\t"}, + pos:[]string{"\tBSRQ\t"}, }, */ { - ` + fn: ` func f52(a uint) int { return bits.Len(a) } `, - []string{"\tBSRQ\t"}, + pos: []string{"\tBSRQ\t"}, }, { - ` + fn: ` func f53(a uint64) int { return bits.LeadingZeros64(a) } `, - []string{"\tBSRQ\t"}, + pos: []string{"\tBSRQ\t"}, }, { - ` + fn: ` func f54(a uint32) int { return bits.LeadingZeros32(a) } `, - []string{"\tBSRQ\t"}, + pos: []string{"\tBSRQ\t"}, }, { - ` + fn: ` func f55(a uint16) int { return bits.LeadingZeros16(a) } `, - []string{"\tBSRQ\t"}, + pos: []string{"\tBSRQ\t"}, }, /* see ssa.go { - ` + fn:` func f56(a uint8) int { return bits.LeadingZeros8(a) } `, - []string{"\tBSRQ\t"}, + pos:[]string{"\tBSRQ\t"}, }, */ { - ` + fn: ` func f57(a uint) int { return bits.LeadingZeros(a) } `, - []string{"\tBSRQ\t"}, + pos: []string{"\tBSRQ\t"}, }, { - ` + fn: ` func pop1(x uint64) int { return bits.OnesCount64(x) }`, - []string{"\tPOPCNTQ\t", "support_popcnt"}, + pos: []string{"\tPOPCNTQ\t", "support_popcnt"}, }, { - ` + fn: ` func pop2(x uint32) int { return bits.OnesCount32(x) }`, - []string{"\tPOPCNTL\t", "support_popcnt"}, + pos: []string{"\tPOPCNTL\t", "support_popcnt"}, }, { - ` + fn: ` func pop3(x uint16) int { return bits.OnesCount16(x) }`, - []string{"\tPOPCNTL\t", "support_popcnt"}, + pos: []string{"\tPOPCNTL\t", "support_popcnt"}, }, { - ` + fn: ` func pop4(x uint) int { return bits.OnesCount(x) }`, - []string{"\tPOPCNTQ\t", "support_popcnt"}, + pos: []string{"\tPOPCNTQ\t", "support_popcnt"}, }, + // multiplication merging tests + { + fn: ` + func mul1(n int) int { + return 15*n + 31*n + }`, + pos: []string{"\tIMULQ\t[$]46"}, // 46*n + }, + { + fn: ` + func mul2(n int) int { + return 5*n + 7*(n+1) + 11*(n+2) + }`, + pos: []string{"\tIMULQ\t[$]23", "\tADDQ\t[$]29"}, // 23*n + 29 + }, + { + fn: ` + func mul3(a, n int) int { + return a*n + 19*n + }`, + pos: []string{"\tADDQ\t[$]19", "\tIMULQ"}, // (a+19)*n + }, + { + fn: ` + func mul4(n int) int { + return 23*n - 9*n + }`, + pos: []string{"\tIMULQ\t[$]14"}, // 14*n + }, + { + fn: ` + func mul5(a, n int) int { + return a*n - 19*n + }`, + pos: []string{"\tADDQ\t[$]-19", "\tIMULQ"}, // (a-19)*n + }, + // see issue 19595. // We want to merge load+op in f58, but not in f59. { - ` + fn: ` func f58(p, q *int) { x := *p *q += x }`, - []string{"\tADDQ\t\\("}, + pos: []string{"\tADDQ\t\\("}, }, { - ` + fn: ` func f59(p, q *int) { x := *p for i := 0; i < 10; i++ { *q += x } }`, - []string{"\tADDQ\t[A-Z]"}, + pos: []string{"\tADDQ\t[A-Z]"}, }, // Floating-point strength reduction { - ` + fn: ` func f60(f float64) float64 { return f * 2.0 }`, - []string{"\tADDSD\t"}, + pos: []string{"\tADDSD\t"}, }, { - ` + fn: ` func f62(f float64) float64 { return f / 16.0 }`, - []string{"\tMULSD\t"}, + pos: []string{"\tMULSD\t"}, }, { - ` + fn: ` func f63(f float64) float64 { return f / 0.125 }`, - []string{"\tMULSD\t"}, + pos: []string{"\tMULSD\t"}, }, { - ` + fn: ` func f64(f float64) float64 { return f / 0.5 }`, - []string{"\tADDSD\t"}, + pos: []string{"\tADDSD\t"}, }, // Check that compare to constant string uses 2/4/8 byte compares { - ` + fn: ` func f65(a string) bool { return a == "xx" }`, - []string{"\tCMPW\t[A-Z]"}, + pos: []string{"\tCMPW\t[A-Z]"}, }, { - ` + fn: ` func f66(a string) bool { return a == "xxxx" }`, - []string{"\tCMPL\t[A-Z]"}, + pos: []string{"\tCMPL\t[A-Z]"}, }, { - ` + fn: ` func f67(a string) bool { return a == "xxxxxxxx" }`, - []string{"\tCMPQ\t[A-Z]"}, + pos: []string{"\tCMPQ\t[A-Z]"}, }, // Non-constant rotate { - `func rot64l(x uint64, y int) uint64 { + fn: `func rot64l(x uint64, y int) uint64 { z := uint(y & 63) return x << z | x >> (64-z) }`, - []string{"\tROLQ\t"}, + pos: []string{"\tROLQ\t"}, }, { - `func rot64r(x uint64, y int) uint64 { + fn: `func rot64r(x uint64, y int) uint64 { z := uint(y & 63) return x >> z | x << (64-z) }`, - []string{"\tRORQ\t"}, + pos: []string{"\tRORQ\t"}, }, { - `func rot32l(x uint32, y int) uint32 { + fn: `func rot32l(x uint32, y int) uint32 { z := uint(y & 31) return x << z | x >> (32-z) }`, - []string{"\tROLL\t"}, + pos: []string{"\tROLL\t"}, }, { - `func rot32r(x uint32, y int) uint32 { + fn: `func rot32r(x uint32, y int) uint32 { z := uint(y & 31) return x >> z | x << (32-z) }`, - []string{"\tRORL\t"}, + pos: []string{"\tRORL\t"}, }, { - `func rot16l(x uint16, y int) uint16 { + fn: `func rot16l(x uint16, y int) uint16 { z := uint(y & 15) return x << z | x >> (16-z) }`, - []string{"\tROLW\t"}, + pos: []string{"\tROLW\t"}, }, { - `func rot16r(x uint16, y int) uint16 { + fn: `func rot16r(x uint16, y int) uint16 { z := uint(y & 15) return x >> z | x << (16-z) }`, - []string{"\tRORW\t"}, + pos: []string{"\tRORW\t"}, }, { - `func rot8l(x uint8, y int) uint8 { + fn: `func rot8l(x uint8, y int) uint8 { z := uint(y & 7) return x << z | x >> (8-z) }`, - []string{"\tROLB\t"}, + pos: []string{"\tROLB\t"}, }, { - `func rot8r(x uint8, y int) uint8 { + fn: `func rot8r(x uint8, y int) uint8 { z := uint(y & 7) return x >> z | x << (8-z) }`, - []string{"\tRORB\t"}, + pos: []string{"\tRORB\t"}, }, // Check that array compare uses 2/4/8 byte compares { - ` + fn: ` func f68(a,b [2]byte) bool { return a == b }`, - []string{"\tCMPW\t[A-Z]"}, + pos: []string{"\tCMPW\t[A-Z]"}, }, { - ` + fn: ` func f69(a,b [3]uint16) bool { return a == b }`, - []string{"\tCMPL\t[A-Z]"}, + pos: []string{"\tCMPL\t[A-Z]"}, }, { - ` + fn: ` func f70(a,b [15]byte) bool { return a == b }`, - []string{"\tCMPQ\t[A-Z]"}, + pos: []string{"\tCMPQ\t[A-Z]"}, }, { - ` + fn: ` func f71(a,b unsafe.Pointer) bool { // This was a TODO in mapaccess1_faststr return *((*[4]byte)(a)) != *((*[4]byte)(b)) }`, - []string{"\tCMPL\t[A-Z]"}, + pos: []string{"\tCMPL\t[A-Z]"}, }, { // make sure assembly output has matching offset and base register. - ` + fn: ` func f72(a, b int) int { - var x [16]byte // use some frame - _ = x + runtime.GC() // use some frame return b } `, - []string{"b\\+40\\(SP\\)"}, + pos: []string{"b\\+24\\(SP\\)"}, + }, + { + // check load combining + fn: ` + func f73(a, b byte) (byte,byte) { + return f73(f73(a,b)) + } + `, + pos: []string{"\tMOVW\t"}, + }, + { + fn: ` + func f74(a, b uint16) (uint16,uint16) { + return f74(f74(a,b)) + } + `, + pos: []string{"\tMOVL\t"}, + }, + { + fn: ` + func f75(a, b uint32) (uint32,uint32) { + return f75(f75(a,b)) + } + `, + pos: []string{"\tMOVQ\t"}, + }, + { + fn: ` + func f76(a, b uint64) (uint64,uint64) { + return f76(f76(a,b)) + } + `, + pos: []string{"\tMOVUPS\t"}, + }, + // Make sure we don't put pointers in SSE registers across safe points. + { + fn: ` + func $(p, q *[2]*int) { + a, b := p[0], p[1] + runtime.GC() + q[0], q[1] = a, b + } + `, + neg: []string{"MOVUPS"}, + }, + { + // check that stack store is optimized away + fn: ` + func $() int { + var x int + return *(&x) + } + `, + pos: []string{"TEXT\t.*, [$]0-8"}, + }, + // math.Abs using integer registers + { + fn: ` + func $(x float64) float64 { + return math.Abs(x) + } + `, + pos: []string{"\tSHLQ\t[$]1,", "\tSHRQ\t[$]1,"}, + }, + // math.Copysign using integer registers + { + fn: ` + func $(x, y float64) float64 { + return math.Copysign(x, y) + } + `, + pos: []string{"\tSHLQ\t[$]1,", "\tSHRQ\t[$]1,", "\tSHRQ\t[$]63,", "\tSHLQ\t[$]63,", "\tORQ\t"}, + }, + // int <-> fp moves + { + fn: ` + func $(x float64) uint64 { + return math.Float64bits(x+1) + 1 + } + `, + pos: []string{"\tMOVQ\tX.*, [^X].*"}, + }, + { + fn: ` + func $(x float32) uint32 { + return math.Float32bits(x+1) + 1 + } + `, + pos: []string{"\tMOVL\tX.*, [^X].*"}, + }, + { + fn: ` + func $(x uint64) float64 { + return math.Float64frombits(x+1) + 1 + } + `, + pos: []string{"\tMOVQ\t[^X].*, X.*"}, + }, + { + fn: ` + func $(x uint32) float32 { + return math.Float32frombits(x+1) + 1 + } + `, + pos: []string{"\tMOVL\t[^X].*, X.*"}, + }, + { + fn: ` + func $(x uint32) bool { + return x > 4 + } + `, + pos: []string{"\tSETHI\t\\("}, + }, + // Check that len() and cap() div by a constant power of two + // are compiled into SHRQ. + { + fn: ` + func $(a []int) int { + return len(a) / 1024 + } + `, + pos: []string{"\tSHRQ\t\\$10,"}, + }, + { + fn: ` + func $(s string) int { + return len(s) / (4097 >> 1) + } + `, + pos: []string{"\tSHRQ\t\\$11,"}, + }, + { + fn: ` + func $(a []int) int { + return cap(a) / ((1 << 11) + 2048) + } + `, + pos: []string{"\tSHRQ\t\\$12,"}, + }, + // Check that len() and cap() mod by a constant power of two + // are compiled into ANDQ. + { + fn: ` + func $(a []int) int { + return len(a) % 1024 + } + `, + pos: []string{"\tANDQ\t\\$1023,"}, + }, + { + fn: ` + func $(s string) int { + return len(s) % (4097 >> 1) + } + `, + pos: []string{"\tANDQ\t\\$2047,"}, + }, + { + fn: ` + func $(a []int) int { + return cap(a) % ((1 << 11) + 2048) + } + `, + pos: []string{"\tANDQ\t\\$4095,"}, + }, + { + // Test that small memmove was replaced with direct movs + fn: ` + func $() { + x := [...]byte{1, 2, 3, 4, 5, 6, 7} + copy(x[1:], x[:]) + } + `, + neg: []string{"memmove"}, + }, + { + // Same as above but with different size + fn: ` + func $() { + x := [...]byte{1, 2, 3, 4} + copy(x[1:], x[:]) + } + `, + neg: []string{"memmove"}, + }, + { + // Same as above but with different size + fn: ` + func $() { + x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + copy(x[1:], x[:]) + } + `, + neg: []string{"memmove"}, + }, + // Nil checks before calling interface methods + { + fn: ` + type I interface { + foo000() + foo001() + foo002() + foo003() + foo004() + foo005() + foo006() + foo007() + foo008() + foo009() + foo010() + foo011() + foo012() + foo013() + foo014() + foo015() + foo016() + foo017() + foo018() + foo019() + foo020() + foo021() + foo022() + foo023() + foo024() + foo025() + foo026() + foo027() + foo028() + foo029() + foo030() + foo031() + foo032() + foo033() + foo034() + foo035() + foo036() + foo037() + foo038() + foo039() + foo040() + foo041() + foo042() + foo043() + foo044() + foo045() + foo046() + foo047() + foo048() + foo049() + foo050() + foo051() + foo052() + foo053() + foo054() + foo055() + foo056() + foo057() + foo058() + foo059() + foo060() + foo061() + foo062() + foo063() + foo064() + foo065() + foo066() + foo067() + foo068() + foo069() + foo070() + foo071() + foo072() + foo073() + foo074() + foo075() + foo076() + foo077() + foo078() + foo079() + foo080() + foo081() + foo082() + foo083() + foo084() + foo085() + foo086() + foo087() + foo088() + foo089() + foo090() + foo091() + foo092() + foo093() + foo094() + foo095() + foo096() + foo097() + foo098() + foo099() + foo100() + foo101() + foo102() + foo103() + foo104() + foo105() + foo106() + foo107() + foo108() + foo109() + foo110() + foo111() + foo112() + foo113() + foo114() + foo115() + foo116() + foo117() + foo118() + foo119() + foo120() + foo121() + foo122() + foo123() + foo124() + foo125() + foo126() + foo127() + foo128() + foo129() + foo130() + foo131() + foo132() + foo133() + foo134() + foo135() + foo136() + foo137() + foo138() + foo139() + foo140() + foo141() + foo142() + foo143() + foo144() + foo145() + foo146() + foo147() + foo148() + foo149() + foo150() + foo151() + foo152() + foo153() + foo154() + foo155() + foo156() + foo157() + foo158() + foo159() + foo160() + foo161() + foo162() + foo163() + foo164() + foo165() + foo166() + foo167() + foo168() + foo169() + foo170() + foo171() + foo172() + foo173() + foo174() + foo175() + foo176() + foo177() + foo178() + foo179() + foo180() + foo181() + foo182() + foo183() + foo184() + foo185() + foo186() + foo187() + foo188() + foo189() + foo190() + foo191() + foo192() + foo193() + foo194() + foo195() + foo196() + foo197() + foo198() + foo199() + foo200() + foo201() + foo202() + foo203() + foo204() + foo205() + foo206() + foo207() + foo208() + foo209() + foo210() + foo211() + foo212() + foo213() + foo214() + foo215() + foo216() + foo217() + foo218() + foo219() + foo220() + foo221() + foo222() + foo223() + foo224() + foo225() + foo226() + foo227() + foo228() + foo229() + foo230() + foo231() + foo232() + foo233() + foo234() + foo235() + foo236() + foo237() + foo238() + foo239() + foo240() + foo241() + foo242() + foo243() + foo244() + foo245() + foo246() + foo247() + foo248() + foo249() + foo250() + foo251() + foo252() + foo253() + foo254() + foo255() + foo256() + foo257() + foo258() + foo259() + foo260() + foo261() + foo262() + foo263() + foo264() + foo265() + foo266() + foo267() + foo268() + foo269() + foo270() + foo271() + foo272() + foo273() + foo274() + foo275() + foo276() + foo277() + foo278() + foo279() + foo280() + foo281() + foo282() + foo283() + foo284() + foo285() + foo286() + foo287() + foo288() + foo289() + foo290() + foo291() + foo292() + foo293() + foo294() + foo295() + foo296() + foo297() + foo298() + foo299() + foo300() + foo301() + foo302() + foo303() + foo304() + foo305() + foo306() + foo307() + foo308() + foo309() + foo310() + foo311() + foo312() + foo313() + foo314() + foo315() + foo316() + foo317() + foo318() + foo319() + foo320() + foo321() + foo322() + foo323() + foo324() + foo325() + foo326() + foo327() + foo328() + foo329() + foo330() + foo331() + foo332() + foo333() + foo334() + foo335() + foo336() + foo337() + foo338() + foo339() + foo340() + foo341() + foo342() + foo343() + foo344() + foo345() + foo346() + foo347() + foo348() + foo349() + foo350() + foo351() + foo352() + foo353() + foo354() + foo355() + foo356() + foo357() + foo358() + foo359() + foo360() + foo361() + foo362() + foo363() + foo364() + foo365() + foo366() + foo367() + foo368() + foo369() + foo370() + foo371() + foo372() + foo373() + foo374() + foo375() + foo376() + foo377() + foo378() + foo379() + foo380() + foo381() + foo382() + foo383() + foo384() + foo385() + foo386() + foo387() + foo388() + foo389() + foo390() + foo391() + foo392() + foo393() + foo394() + foo395() + foo396() + foo397() + foo398() + foo399() + foo400() + foo401() + foo402() + foo403() + foo404() + foo405() + foo406() + foo407() + foo408() + foo409() + foo410() + foo411() + foo412() + foo413() + foo414() + foo415() + foo416() + foo417() + foo418() + foo419() + foo420() + foo421() + foo422() + foo423() + foo424() + foo425() + foo426() + foo427() + foo428() + foo429() + foo430() + foo431() + foo432() + foo433() + foo434() + foo435() + foo436() + foo437() + foo438() + foo439() + foo440() + foo441() + foo442() + foo443() + foo444() + foo445() + foo446() + foo447() + foo448() + foo449() + foo450() + foo451() + foo452() + foo453() + foo454() + foo455() + foo456() + foo457() + foo458() + foo459() + foo460() + foo461() + foo462() + foo463() + foo464() + foo465() + foo466() + foo467() + foo468() + foo469() + foo470() + foo471() + foo472() + foo473() + foo474() + foo475() + foo476() + foo477() + foo478() + foo479() + foo480() + foo481() + foo482() + foo483() + foo484() + foo485() + foo486() + foo487() + foo488() + foo489() + foo490() + foo491() + foo492() + foo493() + foo494() + foo495() + foo496() + foo497() + foo498() + foo499() + foo500() + foo501() + foo502() + foo503() + foo504() + foo505() + foo506() + foo507() + foo508() + foo509() + foo510() + foo511() + } + func $(i I) { + i.foo511() + } + `, + pos: []string{"TESTB"}, + }, + { + fn: ` + func $(i I) { + i.foo001() + } + `, + neg: []string{"TESTB"}, }, } var linux386Tests = []*asmTest{ { - ` + fn: ` func f0(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } `, - []string{"\tMOVL\t\\(.*\\),"}, + pos: []string{"\tMOVL\t\\(.*\\),"}, }, { - ` + fn: ` func f1(b []byte, i int) uint32 { return binary.LittleEndian.Uint32(b[i:]) } `, - []string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"}, + pos: []string{"\tMOVL\t\\(.*\\)\\(.*\\*1\\),"}, + }, + + // multiplication by powers of two + { + fn: ` + func $(n int) int { + return 32*n + } + `, + pos: []string{"SHLL"}, + neg: []string{"IMULL"}, + }, + { + fn: ` + func $(n int) int { + return -64*n + } + `, + pos: []string{"SHLL"}, + neg: []string{"IMULL"}, + }, + + // multiplication merging tests + { + fn: ` + func $(n int) int { + return 9*n + 14*n + }`, + pos: []string{"\tIMULL\t[$]23"}, // 23*n + }, + { + fn: ` + func $(a, n int) int { + return 19*a + a*n + }`, + pos: []string{"\tADDL\t[$]19", "\tIMULL"}, // (n+19)*a + }, + { + // check that stack store is optimized away + fn: ` + func $() int { + var x int + return *(&x) + } + `, + pos: []string{"TEXT\t.*, [$]0-4"}, + }, + { + fn: ` + func mul3(n int) int { + return 23*n - 9*n + }`, + pos: []string{"\tIMULL\t[$]14"}, // 14*n + }, + { + fn: ` + func mul4(a, n int) int { + return n*a - a*19 + }`, + pos: []string{"\tADDL\t[$]-19", "\tIMULL"}, // (n-19)*a + }, + // Check that len() and cap() div by a constant power of two + // are compiled into SHRL. + { + fn: ` + func $(a []int) int { + return len(a) / 1024 + } + `, + pos: []string{"\tSHRL\t\\$10,"}, + }, + { + fn: ` + func $(s string) int { + return len(s) / (4097 >> 1) + } + `, + pos: []string{"\tSHRL\t\\$11,"}, + }, + { + fn: ` + func $(a []int) int { + return cap(a) / ((1 << 11) + 2048) + } + `, + pos: []string{"\tSHRL\t\\$12,"}, + }, + // Check that len() and cap() mod by a constant power of two + // are compiled into ANDL. + { + fn: ` + func $(a []int) int { + return len(a) % 1024 + } + `, + pos: []string{"\tANDL\t\\$1023,"}, + }, + { + fn: ` + func $(s string) int { + return len(s) % (4097 >> 1) + } + `, + pos: []string{"\tANDL\t\\$2047,"}, + }, + { + fn: ` + func $(a []int) int { + return cap(a) % ((1 << 11) + 2048) + } + `, + pos: []string{"\tANDL\t\\$4095,"}, + }, + { + // Test that small memmove was replaced with direct movs + fn: ` + func $() { + x := [...]byte{1, 2, 3, 4, 5, 6, 7} + copy(x[1:], x[:]) + } + `, + neg: []string{"memmove"}, + }, + { + // Same as above but with different size + fn: ` + func $() { + x := [...]byte{1, 2, 3, 4} + copy(x[1:], x[:]) + } + `, + neg: []string{"memmove"}, }, } var linuxS390XTests = []*asmTest{ { - ` + fn: ` func f0(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } `, - []string{"\tMOVWBR\t\\(.*\\),"}, + pos: []string{"\tMOVWBR\t\\(.*\\),"}, }, { - ` + fn: ` func f1(b []byte, i int) uint32 { return binary.LittleEndian.Uint32(b[i:]) } `, - []string{"\tMOVWBR\t\\(.*\\)\\(.*\\*1\\),"}, + pos: []string{"\tMOVWBR\t\\(.*\\)\\(.*\\*1\\),"}, }, { - ` + fn: ` func f2(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } `, - []string{"\tMOVDBR\t\\(.*\\),"}, + pos: []string{"\tMOVDBR\t\\(.*\\),"}, }, { - ` + fn: ` func f3(b []byte, i int) uint64 { return binary.LittleEndian.Uint64(b[i:]) } `, - []string{"\tMOVDBR\t\\(.*\\)\\(.*\\*1\\),"}, + pos: []string{"\tMOVDBR\t\\(.*\\)\\(.*\\*1\\),"}, }, { - ` + fn: ` func f4(b []byte) uint32 { return binary.BigEndian.Uint32(b) } `, - []string{"\tMOVWZ\t\\(.*\\),"}, + pos: []string{"\tMOVWZ\t\\(.*\\),"}, }, { - ` + fn: ` func f5(b []byte, i int) uint32 { return binary.BigEndian.Uint32(b[i:]) } `, - []string{"\tMOVWZ\t\\(.*\\)\\(.*\\*1\\),"}, + pos: []string{"\tMOVWZ\t\\(.*\\)\\(.*\\*1\\),"}, }, { - ` + fn: ` func f6(b []byte) uint64 { return binary.BigEndian.Uint64(b) } `, - []string{"\tMOVD\t\\(.*\\),"}, + pos: []string{"\tMOVD\t\\(.*\\),"}, }, { - ` + fn: ` func f7(b []byte, i int) uint64 { return binary.BigEndian.Uint64(b[i:]) } `, - []string{"\tMOVD\t\\(.*\\)\\(.*\\*1\\),"}, + pos: []string{"\tMOVD\t\\(.*\\)\\(.*\\*1\\),"}, }, { - ` + fn: ` func f8(x uint64) uint64 { return x<<7 + x>>57 } `, - []string{"\tRLLG\t[$]7,"}, + pos: []string{"\tRLLG\t[$]7,"}, }, { - ` + fn: ` func f9(x uint64) uint64 { return x<<7 | x>>57 } `, - []string{"\tRLLG\t[$]7,"}, + pos: []string{"\tRLLG\t[$]7,"}, }, { - ` + fn: ` func f10(x uint64) uint64 { return x<<7 ^ x>>57 } `, - []string{"\tRLLG\t[$]7,"}, + pos: []string{"\tRLLG\t[$]7,"}, }, { - ` + fn: ` func f11(x uint32) uint32 { return x<<7 + x>>25 } `, - []string{"\tRLL\t[$]7,"}, + pos: []string{"\tRLL\t[$]7,"}, }, { - ` + fn: ` func f12(x uint32) uint32 { return x<<7 | x>>25 } `, - []string{"\tRLL\t[$]7,"}, + pos: []string{"\tRLL\t[$]7,"}, }, { - ` + fn: ` func f13(x uint32) uint32 { return x<<7 ^ x>>25 } `, - []string{"\tRLL\t[$]7,"}, + pos: []string{"\tRLL\t[$]7,"}, }, // Fused multiply-add/sub instructions. { - ` + fn: ` func f14(x, y, z float64) float64 { return x * y + z } `, - []string{"\tFMADD\t"}, + pos: []string{"\tFMADD\t"}, }, { - ` + fn: ` func f15(x, y, z float64) float64 { return x * y - z } `, - []string{"\tFMSUB\t"}, + pos: []string{"\tFMSUB\t"}, }, { - ` + fn: ` func f16(x, y, z float32) float32 { return x * y + z } `, - []string{"\tFMADDS\t"}, + pos: []string{"\tFMADDS\t"}, }, { - ` + fn: ` func f17(x, y, z float32) float32 { return x * y - z } `, - []string{"\tFMSUBS\t"}, + pos: []string{"\tFMSUBS\t"}, }, // Intrinsic tests for math/bits { - ` + fn: ` func f18(a uint64) int { return bits.TrailingZeros64(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f19(a uint32) int { return bits.TrailingZeros32(a) } `, - []string{"\tFLOGR\t", "\tMOVWZ\t"}, + pos: []string{"\tFLOGR\t", "\tMOVWZ\t"}, }, { - ` + fn: ` func f20(a uint16) int { return bits.TrailingZeros16(a) } `, - []string{"\tFLOGR\t", "\tOR\t\\$65536,"}, + pos: []string{"\tFLOGR\t", "\tOR\t\\$65536,"}, }, { - ` + fn: ` func f21(a uint8) int { return bits.TrailingZeros8(a) } `, - []string{"\tFLOGR\t", "\tOR\t\\$256,"}, + pos: []string{"\tFLOGR\t", "\tOR\t\\$256,"}, }, // Intrinsic tests for math/bits { - ` + fn: ` func f22(a uint64) uint64 { return bits.ReverseBytes64(a) } `, - []string{"\tMOVDBR\t"}, + pos: []string{"\tMOVDBR\t"}, }, { - ` + fn: ` func f23(a uint32) uint32 { return bits.ReverseBytes32(a) } `, - []string{"\tMOVWBR\t"}, + pos: []string{"\tMOVWBR\t"}, }, { - ` + fn: ` func f24(a uint64) int { return bits.Len64(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f25(a uint32) int { return bits.Len32(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f26(a uint16) int { return bits.Len16(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f27(a uint8) int { return bits.Len8(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f28(a uint) int { return bits.Len(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f29(a uint64) int { return bits.LeadingZeros64(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f30(a uint32) int { return bits.LeadingZeros32(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f31(a uint16) int { return bits.LeadingZeros16(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f32(a uint8) int { return bits.LeadingZeros8(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, }, { - ` + fn: ` func f33(a uint) int { return bits.LeadingZeros(a) } `, - []string{"\tFLOGR\t"}, + pos: []string{"\tFLOGR\t"}, + }, + // Intrinsic tests for math. + { + fn: ` + func ceil(x float64) float64 { + return math.Ceil(x) + } + `, + pos: []string{"\tFIDBR\t[$]6"}, + }, + { + fn: ` + func floor(x float64) float64 { + return math.Floor(x) + } + `, + pos: []string{"\tFIDBR\t[$]7"}, + }, + { + fn: ` + func round(x float64) float64 { + return math.Round(x) + } + `, + pos: []string{"\tFIDBR\t[$]1"}, + }, + { + fn: ` + func trunc(x float64) float64 { + return math.Trunc(x) + } + `, + pos: []string{"\tFIDBR\t[$]5"}, + }, + { + fn: ` + func roundToEven(x float64) float64 { + return math.RoundToEven(x) + } + `, + pos: []string{"\tFIDBR\t[$]4"}, + }, + { + // check that stack store is optimized away + fn: ` + func $() int { + var x int + return *(&x) + } + `, + pos: []string{"TEXT\t.*, [$]0-8"}, + }, + // Constant propagation through raw bits conversions. + { + // uint32 constant converted to float32 constant + fn: ` + func $(x float32) float32 { + if x > math.Float32frombits(0x3f800000) { + return -x + } + return x + } + `, + pos: []string{"\tFMOVS\t[$]f32.3f800000\\(SB\\)"}, + }, + { + // float32 constant converted to uint32 constant + fn: ` + func $(x uint32) uint32 { + if x > math.Float32bits(1) { + return -x + } + return x + } + `, + neg: []string{"\tFMOVS\t"}, + }, + // Constant propagation through float comparisons. + { + fn: ` + func $() bool { + return 0.5 == float64(uint32(1)) || + 1.5 > float64(uint64(1<<63)) || + math.NaN() == math.NaN() + } + `, + pos: []string{"\tMOV(B|BZ|D)\t[$]0,"}, + neg: []string{"\tFCMPU\t", "\tMOV(B|BZ|D)\t[$]1,"}, + }, + { + fn: ` + func $() bool { + return float32(0.5) <= float32(int64(1)) && + float32(1.5) >= float32(int32(-1<<31)) && + float32(math.NaN()) != float32(math.NaN()) + } + `, + pos: []string{"\tMOV(B|BZ|D)\t[$]1,"}, + neg: []string{"\tCEBR\t", "\tMOV(B|BZ|D)\t[$]0,"}, + }, + // math tests + { + fn: ` + func $(x float64) float64 { + return math.Abs(x) + } + `, + pos: []string{"\tLPDFR\t"}, + neg: []string{"\tMOVD\t"}, // no integer loads/stores + }, + { + fn: ` + func $(x float32) float32 { + return float32(math.Abs(float64(x))) + } + `, + pos: []string{"\tLPDFR\t"}, + neg: []string{"\tLDEBR\t", "\tLEDBR\t"}, // no float64 conversion + }, + { + fn: ` + func $(x float64) float64 { + return math.Float64frombits(math.Float64bits(x)|1<<63) + } + `, + pos: []string{"\tLNDFR\t"}, + neg: []string{"\tMOVD\t"}, // no integer loads/stores + }, + { + fn: ` + func $(x float64) float64 { + return -math.Abs(x) + } + `, + pos: []string{"\tLNDFR\t"}, + neg: []string{"\tMOVD\t"}, // no integer loads/stores + }, + { + fn: ` + func $(x, y float64) float64 { + return math.Copysign(x, y) + } + `, + pos: []string{"\tCPSDR\t"}, + neg: []string{"\tMOVD\t"}, // no integer loads/stores + }, + { + fn: ` + func $(x float64) float64 { + return math.Copysign(x, -1) + } + `, + pos: []string{"\tLNDFR\t"}, + neg: []string{"\tMOVD\t"}, // no integer loads/stores + }, + { + fn: ` + func $(x float64) float64 { + return math.Copysign(-1, x) + } + `, + pos: []string{"\tCPSDR\t"}, + neg: []string{"\tMOVD\t"}, // no integer loads/stores }, } var linuxARMTests = []*asmTest{ + // multiplication by powers of two { - ` + fn: ` + func $(n int) int { + return 16*n + } + `, + pos: []string{"\tSLL\t[$]4"}, + neg: []string{"\tMUL\t"}, + }, + { + fn: ` + func $(n int) int { + return -32*n + } + `, + pos: []string{"\tSLL\t[$]5"}, + neg: []string{"\tMUL\t"}, + }, + + { + fn: ` func f0(x uint32) uint32 { return x<<7 + x>>25 } `, - []string{"\tMOVW\tR[0-9]+@>25,"}, + pos: []string{"\tMOVW\tR[0-9]+@>25,"}, }, { - ` + fn: ` func f1(x uint32) uint32 { return x<<7 | x>>25 } `, - []string{"\tMOVW\tR[0-9]+@>25,"}, + pos: []string{"\tMOVW\tR[0-9]+@>25,"}, }, { - ` + fn: ` func f2(x uint32) uint32 { return x<<7 ^ x>>25 } `, - []string{"\tMOVW\tR[0-9]+@>25,"}, + pos: []string{"\tMOVW\tR[0-9]+@>25,"}, }, { - ` + fn: ` func f3(a uint64) int { return bits.Len64(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f4(a uint32) int { return bits.Len32(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f5(a uint16) int { return bits.Len16(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f6(a uint8) int { return bits.Len8(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f7(a uint) int { return bits.Len(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f8(a uint64) int { return bits.LeadingZeros64(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f9(a uint32) int { return bits.LeadingZeros32(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f10(a uint16) int { return bits.LeadingZeros16(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f11(a uint8) int { return bits.LeadingZeros8(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f12(a uint) int { return bits.LeadingZeros(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { // make sure assembly output has matching offset and base register. - ` + fn: ` func f13(a, b int) int { - var x [16]byte // use some frame - _ = x + runtime.GC() // use some frame return b } `, - []string{"b\\+4\\(FP\\)"}, + pos: []string{"b\\+4\\(FP\\)"}, + }, + { + // check that stack store is optimized away + fn: ` + func $() int { + var x int + return *(&x) + } + `, + pos: []string{"TEXT\t.*, [$]-4-4"}, }, } var linuxARM64Tests = []*asmTest{ + // multiplication by powers of two { - ` + fn: ` + func $(n int) int { + return 64*n + } + `, + pos: []string{"\tLSL\t[$]6"}, + neg: []string{"\tMUL\t"}, + }, + { + fn: ` + func $(n int) int { + return -128*n + } + `, + pos: []string{"\tLSL\t[$]7"}, + neg: []string{"\tMUL\t"}, + }, + + { + fn: ` func f0(x uint64) uint64 { return x<<7 + x>>57 } `, - []string{"\tROR\t[$]57,"}, + pos: []string{"\tROR\t[$]57,"}, }, { - ` + fn: ` func f1(x uint64) uint64 { return x<<7 | x>>57 } `, - []string{"\tROR\t[$]57,"}, + pos: []string{"\tROR\t[$]57,"}, }, { - ` + fn: ` func f2(x uint64) uint64 { return x<<7 ^ x>>57 } `, - []string{"\tROR\t[$]57,"}, + pos: []string{"\tROR\t[$]57,"}, }, { - ` + fn: ` func f3(x uint32) uint32 { return x<<7 + x>>25 } `, - []string{"\tRORW\t[$]25,"}, + pos: []string{"\tRORW\t[$]25,"}, }, { - ` + fn: ` func f4(x uint32) uint32 { return x<<7 | x>>25 } `, - []string{"\tRORW\t[$]25,"}, + pos: []string{"\tRORW\t[$]25,"}, }, { - ` + fn: ` func f5(x uint32) uint32 { return x<<7 ^ x>>25 } `, - []string{"\tRORW\t[$]25,"}, + pos: []string{"\tRORW\t[$]25,"}, }, { - ` + fn: ` func f22(a uint64) uint64 { return bits.ReverseBytes64(a) } `, - []string{"\tREV\t"}, + pos: []string{"\tREV\t"}, }, { - ` + fn: ` func f23(a uint32) uint32 { return bits.ReverseBytes32(a) } `, - []string{"\tREVW\t"}, + pos: []string{"\tREVW\t"}, }, { - ` + fn: ` func f24(a uint64) int { return bits.Len64(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f25(a uint32) int { return bits.Len32(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f26(a uint16) int { return bits.Len16(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f27(a uint8) int { return bits.Len8(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f28(a uint) int { return bits.Len(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f29(a uint64) int { return bits.LeadingZeros64(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f30(a uint32) int { return bits.LeadingZeros32(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f31(a uint16) int { return bits.LeadingZeros16(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f32(a uint8) int { return bits.LeadingZeros8(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f33(a uint) int { return bits.LeadingZeros(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f34(a uint64) uint64 { return a & ((1<<63)-1) } `, - []string{"\tAND\t"}, + pos: []string{"\tAND\t"}, }, { - ` + fn: ` func f35(a uint64) uint64 { return a & (1<<63) } `, - []string{"\tAND\t"}, + pos: []string{"\tAND\t"}, }, { // make sure offsets are folded into load and store. - ` + fn: ` func f36(_, a [20]byte) (b [20]byte) { b = a return } `, - []string{"\tMOVD\t\"\"\\.a\\+[0-9]+\\(FP\\), R[0-9]+", "\tMOVD\tR[0-9]+, \"\"\\.b\\+[0-9]+\\(FP\\)"}, + pos: []string{"\tMOVD\t\"\"\\.a\\+[0-9]+\\(FP\\), R[0-9]+", "\tMOVD\tR[0-9]+, \"\"\\.b\\+[0-9]+\\(FP\\)"}, + }, + { + // check that stack store is optimized away + fn: ` + func $() int { + var x int + return *(&x) + } + `, + pos: []string{"TEXT\t.*, [$]-8-8"}, + }, + { + // check that we don't emit comparisons for constant shift + fn: ` +//go:nosplit + func $(x int) int { + return x << 17 + } + `, + pos: []string{"LSL\t\\$17"}, + neg: []string{"CMP"}, }, } var linuxMIPSTests = []*asmTest{ { - ` + fn: ` func f0(a uint64) int { return bits.Len64(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f1(a uint32) int { return bits.Len32(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f2(a uint16) int { return bits.Len16(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f3(a uint8) int { return bits.Len8(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f4(a uint) int { return bits.Len(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f5(a uint64) int { return bits.LeadingZeros64(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f6(a uint32) int { return bits.LeadingZeros32(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f7(a uint16) int { return bits.LeadingZeros16(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f8(a uint8) int { return bits.LeadingZeros8(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, }, { - ` + fn: ` func f9(a uint) int { return bits.LeadingZeros(a) } `, - []string{"\tCLZ\t"}, + pos: []string{"\tCLZ\t"}, + }, + { + // check that stack store is optimized away + fn: ` + func $() int { + var x int + return *(&x) + } + `, + pos: []string{"TEXT\t.*, [$]-4-4"}, + }, +} + +var linuxMIPS64Tests = []*asmTest{ + { + // check that we don't emit comparisons for constant shift + fn: ` + func $(x int) int { + return x << 17 + } + `, + pos: []string{"SLLV\t\\$17"}, + neg: []string{"SGT"}, }, } var linuxPPC64LETests = []*asmTest{ // Fused multiply-add/sub instructions. { - ` + fn: ` func f0(x, y, z float64) float64 { return x * y + z } `, - []string{"\tFMADD\t"}, + pos: []string{"\tFMADD\t"}, }, { - ` + fn: ` func f1(x, y, z float64) float64 { return x * y - z } `, - []string{"\tFMSUB\t"}, + pos: []string{"\tFMSUB\t"}, }, { - ` + fn: ` func f2(x, y, z float32) float32 { return x * y + z } `, - []string{"\tFMADDS\t"}, + pos: []string{"\tFMADDS\t"}, }, { - ` + fn: ` func f3(x, y, z float32) float32 { return x * y - z } `, - []string{"\tFMSUBS\t"}, + pos: []string{"\tFMSUBS\t"}, }, { - ` + fn: ` func f4(x uint32) uint32 { return x<<7 | x>>25 } `, - []string{"\tROTLW\t"}, + pos: []string{"\tROTLW\t"}, }, { - ` + fn: ` func f5(x uint32) uint32 { return x<<7 + x>>25 } `, - []string{"\tROTLW\t"}, + pos: []string{"\tROTLW\t"}, }, { - ` + fn: ` func f6(x uint32) uint32 { return x<<7 ^ x>>25 } `, - []string{"\tROTLW\t"}, + pos: []string{"\tROTLW\t"}, }, { - ` + fn: ` func f7(x uint64) uint64 { return x<<7 | x>>57 } `, - []string{"\tROTL\t"}, + pos: []string{"\tROTL\t"}, }, { - ` + fn: ` func f8(x uint64) uint64 { return x<<7 + x>>57 } `, - []string{"\tROTL\t"}, + pos: []string{"\tROTL\t"}, }, { - ` + fn: ` func f9(x uint64) uint64 { return x<<7 ^ x>>57 } `, - []string{"\tROTL\t"}, + pos: []string{"\tROTL\t"}, + }, + { + fn: ` + func f10(a uint32) uint32 { + return bits.RotateLeft32(a, 9) + } + `, + pos: []string{"\tROTLW\t"}, + }, + { + fn: ` + func f11(a uint64) uint64 { + return bits.RotateLeft64(a, 37) + } + `, + pos: []string{"\tROTL\t"}, + }, + + { + fn: ` + func f12(a, b float64) float64 { + return math.Copysign(a, b) + } + `, + pos: []string{"\tFCPSGN\t"}, + }, + + { + fn: ` + func f13(a float64) float64 { + return math.Abs(a) + } + `, + pos: []string{"\tFABS\t"}, + }, + + { + fn: ` + func f14(b []byte) uint16 { + return binary.LittleEndian.Uint16(b) + } + `, + pos: []string{"\tMOVHZ\t"}, + }, + { + fn: ` + func f15(b []byte) uint32 { + return binary.LittleEndian.Uint32(b) + } + `, + pos: []string{"\tMOVWZ\t"}, + }, + + { + fn: ` + func f16(b []byte) uint64 { + return binary.LittleEndian.Uint64(b) + } + `, + pos: []string{"\tMOVD\t"}, + neg: []string{"MOVBZ", "MOVHZ", "MOVWZ"}, + }, + + { + fn: ` + func f17(b []byte, v uint16) { + binary.LittleEndian.PutUint16(b, v) + } + `, + pos: []string{"\tMOVH\t"}, + }, + + { + fn: ` + func f18(b []byte, v uint32) { + binary.LittleEndian.PutUint32(b, v) + } + `, + pos: []string{"\tMOVW\t"}, + }, + + { + fn: ` + func f19(b []byte, v uint64) { + binary.LittleEndian.PutUint64(b, v) + } + `, + pos: []string{"\tMOVD\t"}, + neg: []string{"MOVB", "MOVH", "MOVW"}, + }, + + { + // check that stack store is optimized away + fn: ` + func $() int { + var x int + return *(&x) + } + `, + pos: []string{"TEXT\t.*, [$]0-8"}, + }, + // Constant propagation through raw bits conversions. + { + // uint32 constant converted to float32 constant + fn: ` + func $(x float32) float32 { + if x > math.Float32frombits(0x3f800000) { + return -x + } + return x + } + `, + pos: []string{"\tFMOVS\t[$]f32.3f800000\\(SB\\)"}, + }, + { + // float32 constant converted to uint32 constant + fn: ` + func $(x uint32) uint32 { + if x > math.Float32bits(1) { + return -x + } + return x + } + `, + neg: []string{"\tFMOVS\t"}, + }, +} + +var plan9AMD64Tests = []*asmTest{ + // We should make sure that the compiler doesn't generate floating point + // instructions for non-float operations on Plan 9, because floating point + // operations are not allowed in the note handler. + // Array zeroing. + { + fn: ` + func $() [16]byte { + var a [16]byte + return a + } + `, + pos: []string{"\tMOVQ\t\\$0, \"\""}, + }, + // Array copy. + { + fn: ` + func $(a [16]byte) (b [16]byte) { + b = a + return + } + `, + pos: []string{"\tMOVQ\t\"\"\\.a\\+[0-9]+\\(SP\\), (AX|CX)", "\tMOVQ\t(AX|CX), \"\"\\.b\\+[0-9]+\\(SP\\)"}, }, } diff --git a/src/cmd/compile/internal/gc/bexport.go b/src/cmd/compile/internal/gc/bexport.go index 3ac81367048..52ee4defc2a 100644 --- a/src/cmd/compile/internal/gc/bexport.go +++ b/src/cmd/compile/internal/gc/bexport.go @@ -174,6 +174,8 @@ type exporter struct { typIndex map[*types.Type]int funcList []*Func + marked map[*types.Type]bool // types already seen by markType + // position encoding posInfoFormat bool prevFile string @@ -230,6 +232,23 @@ func export(out *bufio.Writer, trace bool) int { p.tracef("\n") } + // Mark all inlineable functions that the importer could call. + // This is done by tracking down all inlineable methods + // reachable from exported types. + p.marked = make(map[*types.Type]bool) + for _, n := range exportlist { + sym := n.Sym + if sym.Exported() { + // Closures are added to exportlist, but with Exported + // already set. The export code below skips over them, so + // we have to here as well. + // TODO(mdempsky): Investigate why. This seems suspicious. + continue + } + p.markType(asNode(sym.Def).Type) + } + p.marked = nil + // export objects // // First, export all exported (package-level) objects; i.e., all objects @@ -377,6 +396,7 @@ func export(out *bufio.Writer, trace bool) int { p.tracef("\n----\nfunc { %#v }\n", f.Inl) } p.int(i) + p.int(int(f.InlCost)) p.stmtList(f.Inl) if p.trace { p.tracef("\n") @@ -435,6 +455,72 @@ func unidealType(typ *types.Type, val Val) *types.Type { return typ } +// markType recursively visits types reachable from t to identify +// functions whose inline bodies may be needed. +func (p *exporter) markType(t *types.Type) { + if p.marked[t] { + return + } + p.marked[t] = true + + // If this is a named type, mark all of its associated + // methods. Skip interface types because t.Methods contains + // only their unexpanded method set (i.e., exclusive of + // interface embeddings), and the switch statement below + // handles their full method set. + if t.Sym != nil && t.Etype != TINTER { + for _, m := range t.Methods().Slice() { + if exportname(m.Sym.Name) { + p.markType(m.Type) + } + } + } + + // Recursively mark any types that can be produced given a + // value of type t: dereferencing a pointer; indexing an + // array, slice, or map; receiving from a channel; accessing a + // struct field or interface method; or calling a function. + // + // Notably, we don't mark map key or function parameter types, + // because the user already needs some way to construct values + // of those types. + // + // It's not critical for correctness that this algorithm is + // perfect. Worst case, we might miss opportunities to inline + // some function calls in downstream packages. + switch t.Etype { + case TPTR32, TPTR64, TARRAY, TSLICE, TCHAN: + p.markType(t.Elem()) + + case TMAP: + p.markType(t.Val()) + + case TSTRUCT: + for _, f := range t.FieldSlice() { + if exportname(f.Sym.Name) || f.Embedded != 0 { + p.markType(f.Type) + } + } + + case TFUNC: + // If t is the type of a function or method, then + // t.Nname() is its ONAME. Mark its inline body and + // any recursively called functions for export. + inlFlood(asNode(t.Nname())) + + for _, f := range t.Results().FieldSlice() { + p.markType(f.Type) + } + + case TINTER: + for _, f := range t.FieldSlice() { + if exportname(f.Sym.Name) { + p.markType(f.Type) + } + } + } +} + func (p *exporter) obj(sym *types.Sym) { // Exported objects may be from different packages because they // may be re-exported via an exported alias or as dependencies in @@ -504,7 +590,7 @@ func (p *exporter) obj(sym *types.Sym) { p.paramList(sig.Results(), inlineable) var f *Func - if inlineable { + if inlineable && asNode(sym.Def).Func.ExportInline() { f = asNode(sym.Def).Func // TODO(gri) re-examine reexportdeplist: // Because we can trivially export types @@ -590,10 +676,28 @@ func fileLine(n *Node) (file string, line int) { } func isInlineable(n *Node) bool { - if exportInlined && n != nil && n.Func != nil && n.Func.Inl.Len() != 0 { - // when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet. - // currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package - if Debug['l'] < 2 { + if exportInlined && n != nil && n.Func != nil { + // When lazily typechecking inlined bodies, some + // re-exported ones may not have been typechecked yet. + // Currently that can leave unresolved ONONAMEs in + // import-dot-ed packages in the wrong package. + // + // TODO(mdempsky): Having the ExportInline check here + // instead of the outer if statement means we end up + // exporting parameter names even for functions whose + // inline body won't be exported by this package. This + // is currently necessary because we might first + // import a function/method from a package where it + // doesn't need to be re-exported, and then from a + // package where it does. If this happens, we'll need + // the parameter names. + // + // We could initially do without the parameter names, + // and then fill them in when importing the inline + // body. But parameter names are attached to the + // function type, and modifying types after the fact + // is a little sketchy. + if Debug_typecheckinl == 0 && n.Func.ExportInline() { typecheckinl(n) } return true @@ -601,8 +705,6 @@ func isInlineable(n *Node) bool { return false } -var errorInterface *types.Type // lazily initialized - func (p *exporter) typ(t *types.Type) { if t == nil { Fatalf("exporter: nil type") @@ -654,19 +756,7 @@ func (p *exporter) typ(t *types.Type) { p.qualifiedName(tsym) // write underlying type - orig := t.Orig - if orig == types.Errortype { - // The error type is the only predeclared type which has - // a composite underlying type. When we encode that type, - // make sure to encode the underlying interface rather than - // the named type again. See also the comment in universe.go - // regarding the errortype and issue #15920. - if errorInterface == nil { - errorInterface = makeErrorInterface() - } - orig = errorInterface - } - p.typ(orig) + p.typ(t.Orig) // interfaces don't have associated methods if t.Orig.IsInterface() { @@ -677,9 +767,7 @@ func (p *exporter) typ(t *types.Type) { // TODO(gri) Determine if they are already sorted // in which case we can drop this step. var methods []*types.Field - for _, m := range t.Methods().Slice() { - methods = append(methods, m) - } + methods = append(methods, t.Methods().Slice()...) sort.Sort(methodbyname(methods)) p.int(len(methods)) @@ -708,7 +796,7 @@ func (p *exporter) typ(t *types.Type) { p.bool(m.Nointerface()) // record go:nointerface pragma value (see also #16243) var f *Func - if inlineable { + if inlineable && mfn.Func.ExportInline() { f = mfn.Func reexportdeplist(mfn.Func.Inl) } @@ -968,18 +1056,17 @@ func parName(f *types.Field, numbered bool) string { // Take the name from the original, lest we substituted it with ~r%d or ~b%d. // ~r%d is a (formerly) unnamed result. if asNode(f.Nname) != nil { - if asNode(f.Nname).Orig != nil { - s = asNode(f.Nname).Orig.Sym - if s != nil && s.Name[0] == '~' { - if s.Name[1] == 'r' { // originally an unnamed result - return "" // s = nil - } else if s.Name[1] == 'b' { // originally the blank identifier _ - return "_" // belongs to localpkg - } - } - } else { + if asNode(f.Nname).Orig == nil { return "" // s = nil } + s = asNode(f.Nname).Orig.Sym + if s != nil && s.Name[0] == '~' { + if s.Name[1] == 'r' { // originally an unnamed result + return "" // s = nil + } else if s.Name[1] == 'b' { // originally the blank identifier _ + return "_" // belongs to localpkg + } + } } if s == nil { @@ -1204,26 +1291,10 @@ func (p *exporter) expr(n *Node) { p.value(n.Val()) case ONAME: - // Special case: name used as local variable in export. - // _ becomes ~b%d internally; print as _ for export - if n.Sym != nil && n.Sym.Name[0] == '~' && n.Sym.Name[1] == 'b' { - p.op(ONAME) - p.pos(n) - p.string("_") // inlined and customized version of p.sym(n) - break - } - - if n.Sym != nil && !isblank(n) && n.Name.Vargen > 0 { - p.op(ONAME) - p.pos(n) - p.sym(n) - break - } - // Special case: explicit name of func (*T) method(...) is turned into pkg.(*T).method, // but for export, this should be rendered as (*pkg.T).meth. // These nodes have the special property that they are names with a left OTYPE and a right ONAME. - if n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME { + if n.isMethodExpression() { p.op(OXDOT) p.pos(n) p.expr(n.Left) // n.Left.Op == OTYPE @@ -1241,11 +1312,7 @@ func (p *exporter) expr(n *Node) { case OTYPE: p.op(OTYPE) p.pos(n) - if p.bool(n.Type == nil) { - p.sym(n) - } else { - p.typ(n.Type) - } + p.typ(n.Type) // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // should have been resolved by typechecking - handled by default case @@ -1345,7 +1412,7 @@ func (p *exporter) expr(n *Node) { if op == OAPPEND { p.bool(n.Isddd()) } else if n.Isddd() { - Fatalf("exporter: unexpected '...' with %s call", opnames[op]) + Fatalf("exporter: unexpected '...' with %v call", op) } case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG: @@ -1520,8 +1587,8 @@ func (p *exporter) stmt(n *Node) { p.stmtList(n.List) p.stmtList(n.Nbody) - case OFALL, OXFALL: - p.op(OXFALL) + case OFALL: + p.op(OFALL) p.pos(n) case OBREAK, OCONTINUE: diff --git a/src/cmd/compile/internal/gc/bimport.go b/src/cmd/compile/internal/gc/bimport.go index 29629620898..71d20ec37bd 100644 --- a/src/cmd/compile/internal/gc/bimport.go +++ b/src/cmd/compile/internal/gc/bimport.go @@ -187,7 +187,8 @@ func Import(imp *types.Pkg, in *bufio.Reader) { // them only for functions with inlineable bodies. funchdr does // parameter renaming which doesn't matter if we don't have a body. - if f := p.funcList[i]; f != nil { + inlCost := p.int() + if f := p.funcList[i]; f != nil && f.Func.Inl.Len() == 0 { // function not yet imported - read body and set it funchdr(f) body := p.stmtList() @@ -200,7 +201,15 @@ func Import(imp *types.Pkg, in *bufio.Reader) { body = []*Node{nod(OEMPTY, nil, nil)} } f.Func.Inl.Set(body) - funcbody(f) + f.Func.InlCost = int32(inlCost) + if Debug['E'] > 0 && Debug['m'] > 2 && f.Func.Inl.Len() != 0 { + if Debug['m'] > 3 { + fmt.Printf("inl body for %v: %+v\n", f, f.Func.Inl) + } else { + fmt.Printf("inl body for %v: %v\n", f, f.Func.Inl) + } + } + funcbody() } else { // function already imported - read body but discard declarations dclcontext = PDISCARD // throw away any declarations @@ -326,55 +335,59 @@ func idealType(typ *types.Type) *types.Type { func (p *importer) obj(tag int) { switch tag { case constTag: - p.pos() + pos := p.pos() sym := p.qualifiedName() typ := p.typ() val := p.value(typ) - importconst(p.imp, sym, idealType(typ), nodlit(val)) + importconst(p.imp, sym, idealType(typ), npos(pos, nodlit(val))) case aliasTag: - p.pos() + pos := p.pos() sym := p.qualifiedName() typ := p.typ() - importalias(p.imp, sym, typ) + importalias(pos, p.imp, sym, typ) case typeTag: p.typ() case varTag: - p.pos() + pos := p.pos() sym := p.qualifiedName() typ := p.typ() - importvar(p.imp, sym, typ) + importvar(pos, p.imp, sym, typ) case funcTag: - p.pos() + pos := p.pos() sym := p.qualifiedName() params := p.paramList() result := p.paramList() sig := functypefield(nil, params, result) importsym(p.imp, sym, ONAME) - if asNode(sym.Def) != nil && asNode(sym.Def).Op == ONAME { + if old := asNode(sym.Def); old != nil && old.Op == ONAME { // function was imported before (via another import) - if !eqtype(sig, asNode(sym.Def).Type) { - p.formatErrorf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, asNode(sym.Def).Type, sig) + if !eqtype(sig, old.Type) { + p.formatErrorf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, old.Type, sig) } - p.funcList = append(p.funcList, nil) + n := asNode(old.Type.Nname()) + p.funcList = append(p.funcList, n) break } - n := newfuncname(sym) + n := newfuncnamel(pos, sym) n.Type = sig + // TODO(mdempsky): Stop clobbering n.Pos in declare. + savedlineno := lineno + lineno = pos declare(n, PFUNC) + lineno = savedlineno p.funcList = append(p.funcList, n) importlist = append(importlist, n) + sig.SetNname(asTypesNode(n)) + if Debug['E'] > 0 { fmt.Printf("import [%q] func %v \n", p.imp.Path, n) - if Debug['m'] > 2 && n.Func.Inl.Len() != 0 { - fmt.Printf("inl body: %v\n", n.Func.Inl) - } } default: @@ -479,15 +492,20 @@ func (p *importer) typ() *types.Type { var t *types.Type switch i { case namedTag: - p.pos() + pos := p.pos() tsym := p.qualifiedName() - t = pkgtype(p.imp, tsym) + t = pkgtype(pos, p.imp, tsym) p.typList = append(p.typList, t) + dup := !t.IsKind(types.TFORW) // type already imported // read underlying type t0 := p.typ() + // TODO(mdempsky): Stop clobbering n.Pos in declare. + savedlineno := lineno + lineno = pos p.importtype(t, t0) + lineno = savedlineno // interfaces don't have associated methods if t0.IsInterface() { @@ -501,7 +519,7 @@ func (p *importer) typ() *types.Type { // read associated methods for i := p.int(); i > 0; i-- { - p.pos() + mpos := p.pos() sym := p.fieldSym() // during import unexported method names should be in the type's package @@ -514,10 +532,21 @@ func (p *importer) typ() *types.Type { result := p.paramList() nointerface := p.bool() - n := newfuncname(methodname(sym, recv[0].Type)) - n.Type = functypefield(recv[0], params, result) + mt := functypefield(recv[0], params, result) + oldm := addmethod(sym, mt, false, nointerface) + + if dup { + // An earlier import already declared this type and its methods. + // Discard the duplicate method declaration. + n := asNode(oldm.Type.Nname()) + p.funcList = append(p.funcList, n) + continue + } + + n := newfuncnamel(mpos, methodname(sym, recv[0].Type)) + n.Type = mt + n.SetClass(PFUNC) checkwidth(n.Type) - addmethod(sym, n.Type, false, nointerface) p.funcList = append(p.funcList, n) importlist = append(importlist, n) @@ -526,13 +555,10 @@ func (p *importer) typ() *types.Type { // (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled // out by typecheck's lookdot as this $$.ttype. So by providing // this back link here we avoid special casing there. - n.Type.FuncType().Nname = asTypesNode(n) + mt.SetNname(asTypesNode(n)) if Debug['E'] > 0 { fmt.Printf("import [%q] meth %v \n", p.imp.Path, n) - if Debug['m'] > 2 && n.Func.Inl.Len() != 0 { - fmt.Printf("inl body: %v\n", n.Func.Inl) - } } } @@ -616,7 +642,7 @@ func (p *importer) fieldList() (fields []*types.Field) { } func (p *importer) field() *types.Field { - p.pos() + pos := p.pos() sym, alias := p.fieldName() typ := p.typ() note := p.string() @@ -636,7 +662,7 @@ func (p *importer) field() *types.Field { } f.Sym = sym - f.Nname = asTypesNode(newname(sym)) + f.Nname = asTypesNode(newnamel(pos, sym)) f.Type = typ f.Note = note @@ -660,14 +686,14 @@ func (p *importer) methodList() (methods []*types.Field) { } func (p *importer) method() *types.Field { - p.pos() + pos := p.pos() sym := p.methodName() params := p.paramList() result := p.paramList() f := types.NewField() f.Sym = sym - f.Nname = asTypesNode(newname(sym)) + f.Nname = asTypesNode(newnamel(pos, sym)) f.Type = functypefield(fakeRecvField(), params, result) return f } @@ -922,10 +948,10 @@ func (p *importer) node() *Node { // again. Re-introduce explicit uintptr(c) conversion. // (issue 16317). if typ.IsUnsafePtr() { - n = nod(OCONV, n, nil) + n = nodl(pos, OCONV, n, nil) n.Type = types.Types[TUINTPTR] } - n = nod(OCONV, n, nil) + n = nodl(pos, OCONV, n, nil) n.Type = typ } return n @@ -937,11 +963,7 @@ func (p *importer) node() *Node { // unreachable - should have been resolved by typechecking case OTYPE: - pos := p.pos() - if p.bool() { - return npos(pos, mkname(p.sym())) - } - return npos(pos, typenod(p.typ())) + return npos(p.pos(), typenod(p.typ())) // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC: // unreachable - should have been resolved by typechecking @@ -950,21 +972,26 @@ func (p *importer) node() *Node { // unimplemented case OPTRLIT: - n := npos(p.pos(), p.expr()) + pos := p.pos() + n := npos(pos, p.expr()) if !p.bool() /* !implicit, i.e. '&' operator */ { if n.Op == OCOMPLIT { // Special case for &T{...}: turn into (*T){...}. - n.Right = nod(OIND, n.Right, nil) + n.Right = nodl(pos, OIND, n.Right, nil) n.Right.SetImplicit(true) } else { - n = nod(OADDR, n, nil) + n = nodl(pos, OADDR, n, nil) } } return n case OSTRUCTLIT: - n := nodl(p.pos(), OCOMPLIT, nil, typenod(p.typ())) + // TODO(mdempsky): Export position information for OSTRUCTKEY nodes. + savedlineno := lineno + lineno = p.pos() + n := nodl(lineno, OCOMPLIT, nil, typenod(p.typ())) n.List.Set(p.elemList()) // special handling of field names + lineno = savedlineno return n // case OARRAYLIT, OSLICELIT, OMAPLIT: @@ -1128,62 +1155,50 @@ func (p *importer) node() *Node { return nodl(p.pos(), op, p.expr(), nil) case OIF: - types.Markdcl() n := nodl(p.pos(), OIF, nil, nil) n.Ninit.Set(p.stmtList()) n.Left = p.expr() n.Nbody.Set(p.stmtList()) n.Rlist.Set(p.stmtList()) - types.Popdcl() return n case OFOR: - types.Markdcl() n := nodl(p.pos(), OFOR, nil, nil) n.Ninit.Set(p.stmtList()) n.Left, n.Right = p.exprsOrNil() n.Nbody.Set(p.stmtList()) - types.Popdcl() return n case ORANGE: - types.Markdcl() n := nodl(p.pos(), ORANGE, nil, nil) n.List.Set(p.stmtList()) n.Right = p.expr() n.Nbody.Set(p.stmtList()) - types.Popdcl() return n case OSELECT, OSWITCH: - types.Markdcl() n := nodl(p.pos(), op, nil, nil) n.Ninit.Set(p.stmtList()) n.Left, _ = p.exprsOrNil() n.List.Set(p.stmtList()) - types.Popdcl() return n // case OCASE, OXCASE: // unreachable - mapped to OXCASE case below by exporter case OXCASE: - types.Markdcl() n := nodl(p.pos(), OXCASE, nil, nil) - n.Xoffset = int64(types.Block) n.List.Set(p.exprList()) // TODO(gri) eventually we must declare variables for type switch // statements (type switch statements are not yet exported) n.Nbody.Set(p.stmtList()) - types.Popdcl() return n // case OFALL: // unreachable - mapped to OXFALL case below by exporter - case OXFALL: - n := nodl(p.pos(), OXFALL, nil, nil) - n.Xoffset = int64(types.Block) + case OFALL: + n := nodl(p.pos(), OFALL, nil, nil) return n case OBREAK, OCONTINUE: diff --git a/src/cmd/compile/internal/gc/bitset.go b/src/cmd/compile/internal/gc/bitset.go index 90babd5a9f6..ed5eea0a11b 100644 --- a/src/cmd/compile/internal/gc/bitset.go +++ b/src/cmd/compile/internal/gc/bitset.go @@ -14,6 +14,16 @@ func (f *bitset8) set(mask uint8, b bool) { } } +type bitset16 uint16 + +func (f *bitset16) set(mask uint16, b bool) { + if b { + *(*uint16)(f) |= mask + } else { + *(*uint16)(f) &^= mask + } +} + type bitset32 uint32 func (f *bitset32) set(mask uint32, b bool) { diff --git a/src/cmd/compile/internal/gc/builtin.go b/src/cmd/compile/internal/gc/builtin.go index f21a4da4913..0733a460d5b 100644 --- a/src/cmd/compile/internal/gc/builtin.go +++ b/src/cmd/compile/internal/gc/builtin.go @@ -39,116 +39,122 @@ var runtimeDecls = [...]struct { {"concatstring5", funcTag, 29}, {"concatstrings", funcTag, 31}, {"cmpstring", funcTag, 33}, - {"eqstring", funcTag, 34}, - {"intstring", funcTag, 37}, - {"slicebytetostring", funcTag, 39}, - {"slicebytetostringtmp", funcTag, 40}, - {"slicerunetostring", funcTag, 43}, - {"stringtoslicebyte", funcTag, 44}, - {"stringtoslicerune", funcTag, 47}, - {"decoderune", funcTag, 48}, - {"slicecopy", funcTag, 50}, - {"slicestringcopy", funcTag, 51}, - {"convI2I", funcTag, 52}, - {"convT2E", funcTag, 53}, - {"convT2E16", funcTag, 53}, - {"convT2E32", funcTag, 53}, - {"convT2E64", funcTag, 53}, - {"convT2Estring", funcTag, 53}, - {"convT2Eslice", funcTag, 53}, - {"convT2Enoptr", funcTag, 53}, - {"convT2I", funcTag, 53}, - {"convT2I16", funcTag, 53}, - {"convT2I32", funcTag, 53}, - {"convT2I64", funcTag, 53}, - {"convT2Istring", funcTag, 53}, - {"convT2Islice", funcTag, 53}, - {"convT2Inoptr", funcTag, 53}, - {"assertE2I", funcTag, 52}, - {"assertE2I2", funcTag, 54}, - {"assertI2I", funcTag, 52}, - {"assertI2I2", funcTag, 54}, - {"panicdottypeE", funcTag, 55}, - {"panicdottypeI", funcTag, 55}, - {"panicnildottype", funcTag, 56}, - {"ifaceeq", funcTag, 59}, - {"efaceeq", funcTag, 59}, - {"makemap", funcTag, 61}, - {"mapaccess1", funcTag, 62}, - {"mapaccess1_fast32", funcTag, 63}, - {"mapaccess1_fast64", funcTag, 63}, - {"mapaccess1_faststr", funcTag, 63}, - {"mapaccess1_fat", funcTag, 64}, - {"mapaccess2", funcTag, 65}, - {"mapaccess2_fast32", funcTag, 66}, - {"mapaccess2_fast64", funcTag, 66}, - {"mapaccess2_faststr", funcTag, 66}, - {"mapaccess2_fat", funcTag, 67}, - {"mapassign", funcTag, 62}, - {"mapassign_fast32", funcTag, 63}, - {"mapassign_fast64", funcTag, 63}, - {"mapassign_faststr", funcTag, 63}, - {"mapiterinit", funcTag, 68}, - {"mapdelete", funcTag, 68}, - {"mapdelete_fast32", funcTag, 69}, - {"mapdelete_fast64", funcTag, 69}, - {"mapdelete_faststr", funcTag, 69}, - {"mapiternext", funcTag, 70}, - {"makechan", funcTag, 72}, - {"chanrecv1", funcTag, 74}, - {"chanrecv2", funcTag, 75}, - {"chansend1", funcTag, 77}, + {"intstring", funcTag, 36}, + {"slicebytetostring", funcTag, 38}, + {"slicebytetostringtmp", funcTag, 39}, + {"slicerunetostring", funcTag, 42}, + {"stringtoslicebyte", funcTag, 43}, + {"stringtoslicerune", funcTag, 46}, + {"decoderune", funcTag, 47}, + {"slicecopy", funcTag, 49}, + {"slicestringcopy", funcTag, 50}, + {"convI2I", funcTag, 51}, + {"convT2E", funcTag, 52}, + {"convT2E16", funcTag, 52}, + {"convT2E32", funcTag, 52}, + {"convT2E64", funcTag, 52}, + {"convT2Estring", funcTag, 52}, + {"convT2Eslice", funcTag, 52}, + {"convT2Enoptr", funcTag, 52}, + {"convT2I", funcTag, 52}, + {"convT2I16", funcTag, 52}, + {"convT2I32", funcTag, 52}, + {"convT2I64", funcTag, 52}, + {"convT2Istring", funcTag, 52}, + {"convT2Islice", funcTag, 52}, + {"convT2Inoptr", funcTag, 52}, + {"assertE2I", funcTag, 51}, + {"assertE2I2", funcTag, 53}, + {"assertI2I", funcTag, 51}, + {"assertI2I2", funcTag, 53}, + {"panicdottypeE", funcTag, 54}, + {"panicdottypeI", funcTag, 54}, + {"panicnildottype", funcTag, 55}, + {"ifaceeq", funcTag, 58}, + {"efaceeq", funcTag, 58}, + {"fastrand", funcTag, 60}, + {"makemap64", funcTag, 62}, + {"makemap", funcTag, 63}, + {"makemap_small", funcTag, 64}, + {"mapaccess1", funcTag, 65}, + {"mapaccess1_fast32", funcTag, 66}, + {"mapaccess1_fast64", funcTag, 66}, + {"mapaccess1_faststr", funcTag, 66}, + {"mapaccess1_fat", funcTag, 67}, + {"mapaccess2", funcTag, 68}, + {"mapaccess2_fast32", funcTag, 69}, + {"mapaccess2_fast64", funcTag, 69}, + {"mapaccess2_faststr", funcTag, 69}, + {"mapaccess2_fat", funcTag, 70}, + {"mapassign", funcTag, 65}, + {"mapassign_fast32", funcTag, 66}, + {"mapassign_fast32ptr", funcTag, 66}, + {"mapassign_fast64", funcTag, 66}, + {"mapassign_fast64ptr", funcTag, 66}, + {"mapassign_faststr", funcTag, 66}, + {"mapiterinit", funcTag, 71}, + {"mapdelete", funcTag, 71}, + {"mapdelete_fast32", funcTag, 72}, + {"mapdelete_fast64", funcTag, 72}, + {"mapdelete_faststr", funcTag, 72}, + {"mapiternext", funcTag, 73}, + {"makechan64", funcTag, 75}, + {"makechan", funcTag, 76}, + {"chanrecv1", funcTag, 78}, + {"chanrecv2", funcTag, 79}, + {"chansend1", funcTag, 81}, {"closechan", funcTag, 23}, - {"writeBarrier", varTag, 79}, - {"writebarrierptr", funcTag, 80}, - {"typedmemmove", funcTag, 81}, - {"typedmemclr", funcTag, 82}, - {"typedslicecopy", funcTag, 83}, - {"selectnbsend", funcTag, 84}, - {"selectnbrecv", funcTag, 85}, - {"selectnbrecv2", funcTag, 87}, - {"newselect", funcTag, 88}, - {"selectsend", funcTag, 89}, - {"selectrecv", funcTag, 90}, - {"selectdefault", funcTag, 56}, - {"selectgo", funcTag, 91}, + {"writeBarrier", varTag, 83}, + {"writebarrierptr", funcTag, 84}, + {"typedmemmove", funcTag, 85}, + {"typedmemclr", funcTag, 86}, + {"typedslicecopy", funcTag, 87}, + {"selectnbsend", funcTag, 88}, + {"selectnbrecv", funcTag, 89}, + {"selectnbrecv2", funcTag, 91}, + {"newselect", funcTag, 92}, + {"selectsend", funcTag, 93}, + {"selectrecv", funcTag, 94}, + {"selectdefault", funcTag, 55}, + {"selectgo", funcTag, 95}, {"block", funcTag, 5}, - {"makeslice", funcTag, 93}, - {"makeslice64", funcTag, 94}, - {"growslice", funcTag, 95}, - {"memmove", funcTag, 96}, - {"memclrNoHeapPointers", funcTag, 97}, - {"memclrHasPointers", funcTag, 97}, - {"memequal", funcTag, 98}, - {"memequal8", funcTag, 99}, - {"memequal16", funcTag, 99}, - {"memequal32", funcTag, 99}, - {"memequal64", funcTag, 99}, - {"memequal128", funcTag, 99}, - {"int64div", funcTag, 100}, - {"uint64div", funcTag, 101}, - {"int64mod", funcTag, 100}, - {"uint64mod", funcTag, 101}, - {"float64toint64", funcTag, 102}, - {"float64touint64", funcTag, 103}, - {"float64touint32", funcTag, 105}, - {"int64tofloat64", funcTag, 106}, - {"uint64tofloat64", funcTag, 107}, - {"uint32tofloat64", funcTag, 108}, - {"complex128div", funcTag, 109}, - {"racefuncenter", funcTag, 110}, + {"makeslice", funcTag, 97}, + {"makeslice64", funcTag, 98}, + {"growslice", funcTag, 99}, + {"memmove", funcTag, 100}, + {"memclrNoHeapPointers", funcTag, 101}, + {"memclrHasPointers", funcTag, 101}, + {"memequal", funcTag, 102}, + {"memequal8", funcTag, 103}, + {"memequal16", funcTag, 103}, + {"memequal32", funcTag, 103}, + {"memequal64", funcTag, 103}, + {"memequal128", funcTag, 103}, + {"int64div", funcTag, 104}, + {"uint64div", funcTag, 105}, + {"int64mod", funcTag, 104}, + {"uint64mod", funcTag, 105}, + {"float64toint64", funcTag, 106}, + {"float64touint64", funcTag, 107}, + {"float64touint32", funcTag, 108}, + {"int64tofloat64", funcTag, 109}, + {"uint64tofloat64", funcTag, 110}, + {"uint32tofloat64", funcTag, 111}, + {"complex128div", funcTag, 112}, + {"racefuncenter", funcTag, 113}, {"racefuncexit", funcTag, 5}, - {"raceread", funcTag, 110}, - {"racewrite", funcTag, 110}, - {"racereadrange", funcTag, 111}, - {"racewriterange", funcTag, 111}, - {"msanread", funcTag, 111}, - {"msanwrite", funcTag, 111}, + {"raceread", funcTag, 113}, + {"racewrite", funcTag, 113}, + {"racereadrange", funcTag, 114}, + {"racewriterange", funcTag, 114}, + {"msanread", funcTag, 114}, + {"msanwrite", funcTag, 114}, {"support_popcnt", varTag, 11}, + {"support_sse41", varTag, 11}, } func runtimeTypes() []*types.Type { - var typs [112]*types.Type + var typs [115]*types.Type typs[0] = types.Bytetype typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[TANY] @@ -183,83 +189,86 @@ func runtimeTypes() []*types.Type { typs[31] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[30])}, []*Node{anonfield(typs[21])}) typs[32] = types.Types[TINT] typs[33] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[32])}) - typs[34] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[11])}) - typs[35] = types.NewArray(typs[0], 4) - typs[36] = types.NewPtr(typs[35]) - typs[37] = functype(nil, []*Node{anonfield(typs[36]), anonfield(typs[15])}, []*Node{anonfield(typs[21])}) - typs[38] = types.NewSlice(typs[0]) - typs[39] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[38])}, []*Node{anonfield(typs[21])}) - typs[40] = functype(nil, []*Node{anonfield(typs[38])}, []*Node{anonfield(typs[21])}) - typs[41] = types.Runetype - typs[42] = types.NewSlice(typs[41]) - typs[43] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[42])}, []*Node{anonfield(typs[21])}) - typs[44] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21])}, []*Node{anonfield(typs[38])}) - typs[45] = types.NewArray(typs[41], 32) - typs[46] = types.NewPtr(typs[45]) - typs[47] = functype(nil, []*Node{anonfield(typs[46]), anonfield(typs[21])}, []*Node{anonfield(typs[42])}) - typs[48] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[32])}, []*Node{anonfield(typs[41]), anonfield(typs[32])}) - typs[49] = types.Types[TUINTPTR] - typs[50] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2]), anonfield(typs[49])}, []*Node{anonfield(typs[32])}) - typs[51] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])}) - typs[52] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])}) - typs[53] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])}) - typs[54] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[11])}) - typs[55] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) - typs[56] = functype(nil, []*Node{anonfield(typs[1])}, nil) - typs[57] = types.NewPtr(typs[49]) - typs[58] = types.Types[TUNSAFEPTR] - typs[59] = functype(nil, []*Node{anonfield(typs[57]), anonfield(typs[58]), anonfield(typs[58])}, []*Node{anonfield(typs[11])}) - typs[60] = types.NewMap(typs[2], typs[2]) - typs[61] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[60])}) - typs[62] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3])}, []*Node{anonfield(typs[3])}) - typs[63] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[2])}, []*Node{anonfield(typs[3])}) - typs[64] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])}) - typs[65] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[11])}) - typs[66] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[11])}) - typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[11])}) - typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[3])}, nil) - typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[60]), anonfield(typs[2])}, nil) - typs[70] = functype(nil, []*Node{anonfield(typs[3])}, nil) - typs[71] = types.NewChan(typs[2], types.Cboth) - typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[71])}) - typs[73] = types.NewChan(typs[2], types.Crecv) - typs[74] = functype(nil, []*Node{anonfield(typs[73]), anonfield(typs[3])}, nil) - typs[75] = functype(nil, []*Node{anonfield(typs[73]), anonfield(typs[3])}, []*Node{anonfield(typs[11])}) - typs[76] = types.NewChan(typs[2], types.Csend) - typs[77] = functype(nil, []*Node{anonfield(typs[76]), anonfield(typs[3])}, nil) - typs[78] = types.NewArray(typs[0], 3) - typs[79] = tostruct([]*Node{namedfield("enabled", typs[11]), namedfield("pad", typs[78]), namedfield("needed", typs[11]), namedfield("cgo", typs[11]), namedfield("alignme", typs[17])}) - typs[80] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[2])}, nil) - typs[81] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) - typs[82] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil) - typs[83] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])}) - typs[84] = functype(nil, []*Node{anonfield(typs[76]), anonfield(typs[3])}, []*Node{anonfield(typs[11])}) - typs[85] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[73])}, []*Node{anonfield(typs[11])}) - typs[86] = types.NewPtr(typs[11]) - typs[87] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[86]), anonfield(typs[73])}, []*Node{anonfield(typs[11])}) - typs[88] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[8])}, nil) - typs[89] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[76]), anonfield(typs[3])}, nil) - typs[90] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[73]), anonfield(typs[3]), anonfield(typs[86])}, nil) - typs[91] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[32])}) - typs[92] = types.NewSlice(typs[2]) - typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[32])}, []*Node{anonfield(typs[92])}) - typs[94] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[92])}) - typs[95] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[92]), anonfield(typs[32])}, []*Node{anonfield(typs[92])}) - typs[96] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[49])}, nil) - typs[97] = functype(nil, []*Node{anonfield(typs[58]), anonfield(typs[49])}, nil) - typs[98] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[49])}, []*Node{anonfield(typs[11])}) - typs[99] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[11])}) - typs[100] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[15])}) - typs[101] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])}) - typs[102] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[15])}) - typs[103] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[17])}) - typs[104] = types.Types[TUINT32] - typs[105] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[104])}) - typs[106] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[13])}) - typs[107] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[13])}) - typs[108] = functype(nil, []*Node{anonfield(typs[104])}, []*Node{anonfield(typs[13])}) - typs[109] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])}) - typs[110] = functype(nil, []*Node{anonfield(typs[49])}, nil) - typs[111] = functype(nil, []*Node{anonfield(typs[49]), anonfield(typs[49])}, nil) + typs[34] = types.NewArray(typs[0], 4) + typs[35] = types.NewPtr(typs[34]) + typs[36] = functype(nil, []*Node{anonfield(typs[35]), anonfield(typs[15])}, []*Node{anonfield(typs[21])}) + typs[37] = types.NewSlice(typs[0]) + typs[38] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[37])}, []*Node{anonfield(typs[21])}) + typs[39] = functype(nil, []*Node{anonfield(typs[37])}, []*Node{anonfield(typs[21])}) + typs[40] = types.Runetype + typs[41] = types.NewSlice(typs[40]) + typs[42] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[41])}, []*Node{anonfield(typs[21])}) + typs[43] = functype(nil, []*Node{anonfield(typs[25]), anonfield(typs[21])}, []*Node{anonfield(typs[37])}) + typs[44] = types.NewArray(typs[40], 32) + typs[45] = types.NewPtr(typs[44]) + typs[46] = functype(nil, []*Node{anonfield(typs[45]), anonfield(typs[21])}, []*Node{anonfield(typs[41])}) + typs[47] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[32])}, []*Node{anonfield(typs[40]), anonfield(typs[32])}) + typs[48] = types.Types[TUINTPTR] + typs[49] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2]), anonfield(typs[48])}, []*Node{anonfield(typs[32])}) + typs[50] = functype(nil, []*Node{anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])}) + typs[51] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2])}) + typs[52] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, []*Node{anonfield(typs[2])}) + typs[53] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2])}, []*Node{anonfield(typs[2]), anonfield(typs[11])}) + typs[54] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil) + typs[55] = functype(nil, []*Node{anonfield(typs[1])}, nil) + typs[56] = types.NewPtr(typs[48]) + typs[57] = types.Types[TUNSAFEPTR] + typs[58] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[57]), anonfield(typs[57])}, []*Node{anonfield(typs[11])}) + typs[59] = types.Types[TUINT32] + typs[60] = functype(nil, nil, []*Node{anonfield(typs[59])}) + typs[61] = types.NewMap(typs[2], typs[2]) + typs[62] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*Node{anonfield(typs[61])}) + typs[63] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[3])}, []*Node{anonfield(typs[61])}) + typs[64] = functype(nil, nil, []*Node{anonfield(typs[61])}) + typs[65] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3])}, []*Node{anonfield(typs[3])}) + typs[66] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[2])}, []*Node{anonfield(typs[3])}) + typs[67] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3])}) + typs[68] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3])}, []*Node{anonfield(typs[3]), anonfield(typs[11])}) + typs[69] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[2])}, []*Node{anonfield(typs[3]), anonfield(typs[11])}) + typs[70] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3]), anonfield(typs[1])}, []*Node{anonfield(typs[3]), anonfield(typs[11])}) + typs[71] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[3])}, nil) + typs[72] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[61]), anonfield(typs[2])}, nil) + typs[73] = functype(nil, []*Node{anonfield(typs[3])}, nil) + typs[74] = types.NewChan(typs[2], types.Cboth) + typs[75] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15])}, []*Node{anonfield(typs[74])}) + typs[76] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32])}, []*Node{anonfield(typs[74])}) + typs[77] = types.NewChan(typs[2], types.Crecv) + typs[78] = functype(nil, []*Node{anonfield(typs[77]), anonfield(typs[3])}, nil) + typs[79] = functype(nil, []*Node{anonfield(typs[77]), anonfield(typs[3])}, []*Node{anonfield(typs[11])}) + typs[80] = types.NewChan(typs[2], types.Csend) + typs[81] = functype(nil, []*Node{anonfield(typs[80]), anonfield(typs[3])}, nil) + typs[82] = types.NewArray(typs[0], 3) + typs[83] = tostruct([]*Node{namedfield("enabled", typs[11]), namedfield("pad", typs[82]), namedfield("needed", typs[11]), namedfield("cgo", typs[11]), namedfield("alignme", typs[17])}) + typs[84] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[2])}, nil) + typs[85] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil) + typs[86] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[3])}, nil) + typs[87] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[2]), anonfield(typs[2])}, []*Node{anonfield(typs[32])}) + typs[88] = functype(nil, []*Node{anonfield(typs[80]), anonfield(typs[3])}, []*Node{anonfield(typs[11])}) + typs[89] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[77])}, []*Node{anonfield(typs[11])}) + typs[90] = types.NewPtr(typs[11]) + typs[91] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[90]), anonfield(typs[77])}, []*Node{anonfield(typs[11])}) + typs[92] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[8])}, nil) + typs[93] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[80]), anonfield(typs[3])}, nil) + typs[94] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[77]), anonfield(typs[3]), anonfield(typs[90])}, nil) + typs[95] = functype(nil, []*Node{anonfield(typs[1])}, []*Node{anonfield(typs[32])}) + typs[96] = types.NewSlice(typs[2]) + typs[97] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[32]), anonfield(typs[32])}, []*Node{anonfield(typs[96])}) + typs[98] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[96])}) + typs[99] = functype(nil, []*Node{anonfield(typs[1]), anonfield(typs[96]), anonfield(typs[32])}, []*Node{anonfield(typs[96])}) + typs[100] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[48])}, nil) + typs[101] = functype(nil, []*Node{anonfield(typs[57]), anonfield(typs[48])}, nil) + typs[102] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[48])}, []*Node{anonfield(typs[11])}) + typs[103] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[11])}) + typs[104] = functype(nil, []*Node{anonfield(typs[15]), anonfield(typs[15])}, []*Node{anonfield(typs[15])}) + typs[105] = functype(nil, []*Node{anonfield(typs[17]), anonfield(typs[17])}, []*Node{anonfield(typs[17])}) + typs[106] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[15])}) + typs[107] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[17])}) + typs[108] = functype(nil, []*Node{anonfield(typs[13])}, []*Node{anonfield(typs[59])}) + typs[109] = functype(nil, []*Node{anonfield(typs[15])}, []*Node{anonfield(typs[13])}) + typs[110] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[13])}) + typs[111] = functype(nil, []*Node{anonfield(typs[59])}, []*Node{anonfield(typs[13])}) + typs[112] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])}) + typs[113] = functype(nil, []*Node{anonfield(typs[48])}, nil) + typs[114] = functype(nil, []*Node{anonfield(typs[48]), anonfield(typs[48])}, nil) return typs[:] } diff --git a/src/cmd/compile/internal/gc/builtin/runtime.go b/src/cmd/compile/internal/gc/builtin/runtime.go index 7f4846db9db..de17d51d8ab 100644 --- a/src/cmd/compile/internal/gc/builtin/runtime.go +++ b/src/cmd/compile/internal/gc/builtin/runtime.go @@ -48,7 +48,6 @@ func concatstring5(*[32]byte, string, string, string, string, string) string func concatstrings(*[32]byte, []string) string func cmpstring(string, string) int -func eqstring(string, string) bool func intstring(*[4]byte, int64) string func slicebytetostring(*[32]byte, []byte) string func slicebytetostringtmp([]byte) string @@ -78,7 +77,7 @@ func convT2Istring(tab *byte, elem *any) (ret any) func convT2Islice(tab *byte, elem *any) (ret any) func convT2Inoptr(tab *byte, elem *any) (ret any) -// interface type assertions x.(T) +// interface type assertions x.(T) func assertE2I(typ *byte, iface any) (ret any) func assertE2I2(typ *byte, iface any) (ret any, b bool) func assertI2I(typ *byte, iface any) (ret any) @@ -92,8 +91,12 @@ func panicnildottype(want *byte) func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool) func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) +func fastrand() uint32 + // *byte is really *runtime.Type -func makemap(mapType *byte, hint int64, mapbuf *any, bucketbuf *any) (hmap map[any]any) +func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any) +func makemap(mapType *byte, hint int, mapbuf *any) (hmap map[any]any) +func makemap_small() (hmap map[any]any) func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any) func mapaccess1_fast32(mapType *byte, hmap map[any]any, key any) (val *any) func mapaccess1_fast64(mapType *byte, hmap map[any]any, key any) (val *any) @@ -106,7 +109,9 @@ func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pre func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool) func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any) func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any) +func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any) func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any) +func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any) func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any) func mapiterinit(mapType *byte, hmap map[any]any, hiter *any) func mapdelete(mapType *byte, hmap map[any]any, key *any) @@ -116,7 +121,8 @@ func mapdelete_faststr(mapType *byte, hmap map[any]any, key any) func mapiternext(hiter *any) // *byte is really *runtime.Type -func makechan(chanType *byte, hint int64) (hchan chan any) +func makechan64(chanType *byte, size int64) (hchan chan any) +func makechan(chanType *byte, size int) (hchan chan any) func chanrecv1(hchan <-chan any, elem *any) func chanrecv2(hchan <-chan any, elem *any) bool func chansend1(hchan chan<- any, elem *any) @@ -190,3 +196,4 @@ func msanwrite(addr, size uintptr) // architecture variants var support_popcnt bool +var support_sse41 bool diff --git a/src/cmd/compile/internal/gc/bv.go b/src/cmd/compile/internal/gc/bv.go index 72f29e82538..03c4b9d8297 100644 --- a/src/cmd/compile/internal/gc/bv.go +++ b/src/cmd/compile/internal/gc/bv.go @@ -5,9 +5,9 @@ package gc const ( - WORDBITS = 32 - WORDMASK = WORDBITS - 1 - WORDSHIFT = 5 + wordBits = 32 + wordMask = wordBits - 1 + wordShift = 5 ) // A bvec is a bit vector. @@ -17,7 +17,7 @@ type bvec struct { } func bvalloc(n int32) bvec { - nword := (n + WORDBITS - 1) / WORDBITS + nword := (n + wordBits - 1) / wordBits return bvec{n, make([]uint32, nword)} } @@ -28,7 +28,7 @@ type bulkBvec struct { } func bvbulkalloc(nbit int32, count int32) bulkBvec { - nword := (nbit + WORDBITS - 1) / WORDBITS + nword := (nbit + wordBits - 1) / wordBits size := int64(nword) * int64(count) if int64(int32(size*4)) != size*4 { Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size) @@ -66,24 +66,24 @@ func (bv bvec) Get(i int32) bool { if i < 0 || i >= bv.n { Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n) } - mask := uint32(1 << uint(i%WORDBITS)) - return bv.b[i>>WORDSHIFT]&mask != 0 + mask := uint32(1 << uint(i%wordBits)) + return bv.b[i>>wordShift]&mask != 0 } func (bv bvec) Set(i int32) { if i < 0 || i >= bv.n { Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n) } - mask := uint32(1 << uint(i%WORDBITS)) - bv.b[i/WORDBITS] |= mask + mask := uint32(1 << uint(i%wordBits)) + bv.b[i/wordBits] |= mask } func (bv bvec) Unset(i int32) { if i < 0 || i >= bv.n { Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n) } - mask := uint32(1 << uint(i%WORDBITS)) - bv.b[i/WORDBITS] &^= mask + mask := uint32(1 << uint(i%wordBits)) + bv.b[i/wordBits] &^= mask } // bvnext returns the smallest index >= i for which bvget(bv, i) == 1. @@ -94,11 +94,11 @@ func (bv bvec) Next(i int32) int32 { } // Jump i ahead to next word with bits. - if bv.b[i>>WORDSHIFT]>>uint(i&WORDMASK) == 0 { - i &^= WORDMASK - i += WORDBITS - for i < bv.n && bv.b[i>>WORDSHIFT] == 0 { - i += WORDBITS + if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 { + i &^= wordMask + i += wordBits + for i < bv.n && bv.b[i>>wordShift] == 0 { + i += wordBits } } @@ -107,7 +107,7 @@ func (bv bvec) Next(i int32) int32 { } // Find 1 bit. - w := bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK) + w := bv.b[i>>wordShift] >> uint(i&wordMask) for w&1 == 0 { w >>= 1 @@ -118,8 +118,8 @@ func (bv bvec) Next(i int32) int32 { } func (bv bvec) IsEmpty() bool { - for i := int32(0); i < bv.n; i += WORDBITS { - if bv.b[i>>WORDSHIFT] != 0 { + for i := int32(0); i < bv.n; i += wordBits { + if bv.b[i>>wordShift] != 0 { return false } } @@ -129,7 +129,7 @@ func (bv bvec) IsEmpty() bool { func (bv bvec) Not() { i := int32(0) w := int32(0) - for ; i < bv.n; i, w = i+WORDBITS, w+1 { + for ; i < bv.n; i, w = i+wordBits, w+1 { bv.b[w] = ^bv.b[w] } } diff --git a/src/cmd/compile/internal/gc/closure.go b/src/cmd/compile/internal/gc/closure.go index 143e1969c7d..d3af16e1764 100644 --- a/src/cmd/compile/internal/gc/closure.go +++ b/src/cmd/compile/internal/gc/closure.go @@ -19,7 +19,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node { n.Func.Depth = funcdepth n.Func.Outerfunc = Curfn - old := p.funchdr(n, expr.Pos()) + old := p.funchdr(n) // steal ntype's argument names and // leave a fresh copy in their place. @@ -60,7 +60,7 @@ func (p *noder) funcLit(expr *syntax.FuncLit) *Node { n.Nbody.Set(body) n.Func.Endlineno = lineno - p.funcbody(n, expr.Body.Rbrace, old) + p.funcbody(old) // closure-specific variables are hanging off the // ordinary ones in the symbol table; see oldname. @@ -463,9 +463,8 @@ func walkclosure(func_ *Node, init *Nodes) *Node { Warnl(func_.Pos, "closure converted to global") } return func_.Func.Closure.Func.Nname - } else { - closuredebugruntimecheck(func_) } + closuredebugruntimecheck(func_) // Create closure in the form of a composite literal. // supposing the closure captures an int i and a string s @@ -481,28 +480,29 @@ func walkclosure(func_ *Node, init *Nodes) *Node { // the struct is unnamed so that closures in multiple packages with the // same struct type can share the descriptor. - typ := nod(OTSTRUCT, nil, nil) - - typ.List.Set1(namedfield(".F", types.Types[TUINTPTR])) + fields := []*Node{ + namedfield(".F", types.Types[TUINTPTR]), + } for _, v := range func_.Func.Cvars.Slice() { if v.Op == OXXX { continue } - typ1 := typenod(v.Type) + typ := v.Type if !v.Name.Byval() { - typ1 = nod(OIND, typ1, nil) + typ = types.NewPtr(typ) } - typ.List.Append(nod(ODCLFIELD, newname(v.Sym), typ1)) + fields = append(fields, symfield(v.Sym, typ)) } + typ := tostruct(fields) + typ.SetNoalg(true) - clos := nod(OCOMPLIT, nil, nod(OIND, typ, nil)) + clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil)) clos.Esc = func_.Esc clos.Right.SetImplicit(true) clos.List.Set(append([]*Node{nod(OCFUNC, func_.Func.Closure.Func.Nname, nil)}, func_.Func.Enter.Slice()...)) // Force type conversion from *struct to the func type. clos = nod(OCONVNOP, clos, nil) - clos.Type = func_.Type clos = typecheck(clos, Erv) @@ -646,7 +646,7 @@ func makepartialcall(fn *Node, t0 *types.Type, meth *types.Sym) *Node { call := nod(OCALL, nodSym(OXDOT, ptr, meth), nil) call.List.Set(callargs) call.SetIsddd(ddd) - if t0.Results().NumFields() == 0 { + if t0.NumResults() == 0 { body = append(body, call) } else { n := nod(OAS2, nil, nil) @@ -683,11 +683,13 @@ func walkpartialcall(n *Node, init *Nodes) *Node { checknil(n.Left, init) } - typ := nod(OTSTRUCT, nil, nil) - typ.List.Set1(namedfield("F", types.Types[TUINTPTR])) - typ.List.Append(namedfield("R", n.Left.Type)) + typ := tostruct([]*Node{ + namedfield("F", types.Types[TUINTPTR]), + namedfield("R", n.Left.Type), + }) + typ.SetNoalg(true) - clos := nod(OCOMPLIT, nil, nod(OIND, typ, nil)) + clos := nod(OCOMPLIT, nil, nod(OIND, typenod(typ), nil)) clos.Esc = n.Esc clos.Right.SetImplicit(true) clos.List.Set1(nod(OCFUNC, n.Func.Nname, nil)) @@ -695,7 +697,6 @@ func walkpartialcall(n *Node, init *Nodes) *Node { // Force type conversion from *struct to the func type. clos = nod(OCONVNOP, clos, nil) - clos.Type = n.Type clos = typecheck(clos, Erv) diff --git a/src/cmd/compile/internal/gc/const.go b/src/cmd/compile/internal/gc/const.go index a465d4a7bb5..dcc16b6decd 100644 --- a/src/cmd/compile/internal/gc/const.go +++ b/src/cmd/compile/internal/gc/const.go @@ -12,7 +12,7 @@ import ( ) // Ctype describes the constant kind of an "ideal" (untyped) constant. -type Ctype int8 +type Ctype uint8 const ( CTxxx Ctype = iota @@ -247,7 +247,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node { return n - // target is invalid type for a constant? leave alone. + // target is invalid type for a constant? leave alone. case OLITERAL: if !okforconst[t.Etype] && n.Type.Etype != TNIL { return defaultlitreuse(n, nil, reuse) @@ -297,7 +297,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node { ct := consttype(n) var et types.EType - if ct < 0 { + if ct == 0 { goto bad } @@ -408,7 +408,7 @@ func convlit1(n *Node, t *types.Type, explicit bool, reuse canReuseNode) *Node { bad: if !n.Diag() { if !t.Broke() { - yyerror("cannot convert %v to type %v", n, t) + yyerror("cannot convert %L to type %v", n, t) } n.SetDiag(true) } @@ -591,7 +591,7 @@ func tostr(v Val) Val { func consttype(n *Node) Ctype { if n == nil || n.Op != OLITERAL { - return -1 + return 0 } return n.Val().Ctype() } @@ -693,7 +693,7 @@ func evconst(n *Node) { if nl == nil || nl.Type == nil { return } - if consttype(nl) < 0 { + if consttype(nl) == 0 { return } wl := nl.Type.Etype @@ -840,7 +840,7 @@ func evconst(n *Node) { if nr.Type == nil { return } - if consttype(nr) < 0 { + if consttype(nr) == 0 { return } wr = nr.Type.Etype @@ -1195,8 +1195,6 @@ func evconst(n *Node) { goto setfalse } - goto ret - ret: norig = saveorig(n) *n = *nl @@ -1375,7 +1373,8 @@ func defaultlitreuse(n *Node, t *types.Type, reuse canReuseNode) *Node { return convlit(n, t) } - if n.Val().Ctype() == CTNIL { + switch n.Val().Ctype() { + case CTNIL: lineno = lno if !n.Diag() { yyerror("use of untyped nil") @@ -1383,17 +1382,13 @@ func defaultlitreuse(n *Node, t *types.Type, reuse canReuseNode) *Node { } n.Type = nil - break - } - - if n.Val().Ctype() == CTSTR { + case CTSTR: t1 := types.Types[TSTRING] n = convlit1(n, t1, false, reuse) - break + default: + yyerror("defaultlit: unknown literal: %v", n) } - yyerror("defaultlit: unknown literal: %v", n) - case CTxxx: Fatalf("defaultlit: idealkind is CTxxx: %+v", n) diff --git a/src/cmd/compile/internal/gc/dcl.go b/src/cmd/compile/internal/gc/dcl.go index b8a5a90a036..2756707aef1 100644 --- a/src/cmd/compile/internal/gc/dcl.go +++ b/src/cmd/compile/internal/gc/dcl.go @@ -5,7 +5,9 @@ package gc import ( + "bytes" "cmd/compile/internal/types" + "cmd/internal/obj" "cmd/internal/src" "fmt" "strings" @@ -83,12 +85,14 @@ func declare(n *Node, ctxt Class) { yyerror("cannot declare name %v", s) } - if ctxt == PEXTERN && s.Name == "init" { - yyerror("cannot declare init - must be func") - } - gen := 0 if ctxt == PEXTERN { + if s.Name == "init" { + yyerror("cannot declare init - must be func") + } + if s.Name == "main" && localpkg.Name == "main" { + yyerror("cannot declare main - must be func") + } externdcl = append(externdcl, n) } else { if Curfn == nil && ctxt == PAUTO { @@ -212,7 +216,13 @@ func newnoname(s *types.Sym) *Node { // newfuncname generates a new name node for a function or method. // TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360. func newfuncname(s *types.Sym) *Node { - n := newname(s) + return newfuncnamel(lineno, s) +} + +// newfuncnamel generates a new name node for a function or method. +// TODO(rsc): Use an ODCLFUNC node instead. See comment in CL 7360. +func newfuncnamel(pos src.XPos, s *types.Sym) *Node { + n := newnamel(pos, s) n.Func = new(Func) n.Func.SetIsHiddenClosure(Curfn != nil) return n @@ -227,11 +237,15 @@ func dclname(s *types.Sym) *Node { } func typenod(t *types.Type) *Node { + return typenodl(src.NoXPos, t) +} + +func typenodl(pos src.XPos, t *types.Type) *Node { // if we copied another type with *t = *u // then t->nod might be out of date, so // check t->nod->type too if asNode(t.Nod) == nil || asNode(t.Nod).Type != t { - t.Nod = asTypesNode(nod(OTYPE, nil, nil)) + t.Nod = asTypesNode(nodl(pos, OTYPE, nil, nil)) asNode(t.Nod).Type = t asNode(t.Nod).Sym = t.Sym } @@ -244,7 +258,11 @@ func anonfield(typ *types.Type) *Node { } func namedfield(s string, typ *types.Type) *Node { - return nod(ODCLFIELD, newname(lookup(s)), typenod(typ)) + return symfield(lookup(s), typ) +} + +func symfield(s *types.Sym, typ *types.Type) *Node { + return nod(ODCLFIELD, newname(s), typenod(typ)) } // oldname returns the Node that declares symbol s in the current scope. @@ -519,7 +537,7 @@ func funcstart(n *Node) { // finish the body. // called in auto-declaration context. // returns in extern-declaration context. -func funcbody(n *Node) { +func funcbody() { // change the declaration context from auto to extern if dclcontext != PAUTO { Fatalf("funcbody: unexpected dclcontext %d", dclcontext) @@ -742,7 +760,7 @@ func tointerface(l []*Node) *types.Type { return t } -func tointerface0(t *types.Type, l []*Node) *types.Type { +func tointerface0(t *types.Type, l []*Node) { if t == nil || !t.IsInterface() { Fatalf("interface expected") } @@ -756,35 +774,6 @@ func tointerface0(t *types.Type, l []*Node) *types.Type { fields = append(fields, f) } t.SetInterface(fields) - - return t -} - -func embedded(s *types.Sym, pkg *types.Pkg) *Node { - const ( - CenterDot = 0xB7 - ) - // Names sometimes have disambiguation junk - // appended after a center dot. Discard it when - // making the name for the embedded struct field. - name := s.Name - - if i := strings.Index(s.Name, string(CenterDot)); i >= 0 { - name = s.Name[:i] - } - - var n *Node - if exportname(name) { - n = newname(lookup(name)) - } else if s.Pkg == builtinpkg { - // The name of embedded builtins belongs to pkg. - n = newname(pkg.Lookup(name)) - } else { - n = newname(s.Pkg.Lookup(name)) - } - n = nod(ODCLFIELD, n, oldname(s)) - n.SetEmbedded(true) - return n } func fakeRecv() *Node { @@ -949,7 +938,8 @@ func methodname(s *types.Sym, recv *types.Type) *types.Sym { // Add a method, declared as a function. // - msym is the method symbol // - t is function type (with receiver) -func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) { +// Returns a pointer to the existing or added Field. +func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field { if msym == nil { Fatalf("no method symbol") } @@ -958,7 +948,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) { rf := t.Recv() // ptr to this structure if rf == nil { yyerror("missing receiver") - return + return nil } mt := methtype(rf.Type) @@ -968,7 +958,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) { if t != nil && t.IsPtr() { if t.Sym != nil { yyerror("invalid receiver type %v (%v is a pointer type)", pa, t) - return + return nil } t = t.Elem() } @@ -987,23 +977,23 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) { // but just in case, fall back to generic error. yyerror("invalid receiver type %v (%L / %L)", pa, pa, t) } - return + return nil } - if local && !mt.Local() { + if local && mt.Sym.Pkg != localpkg { yyerror("cannot define new methods on non-local type %v", mt) - return + return nil } if msym.IsBlank() { - return + return nil } if mt.IsStruct() { for _, f := range mt.Fields().Slice() { if f.Sym == msym { yyerror("type %v has both field and method named %v", mt, msym) - return + return nil } } } @@ -1017,7 +1007,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) { if !eqtype(t, f.Type) || !eqtype(t.Recv().Type, f.Type.Recv().Type) { yyerror("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t) } - return + return f } f := types.NewField() @@ -1027,6 +1017,7 @@ func addmethod(msym *types.Sym, t *types.Type, local, nointerface bool) { f.SetNointerface(nointerface) mt.Methods().Append(f) + return f } func funccompile(n *Node) { @@ -1096,9 +1087,10 @@ func makefuncsym(s *types.Sym) { if s.IsBlank() { return } - if compiling_runtime && s.Name == "getg" { - // runtime.getg() is not a real function and so does - // not get a funcsym. + if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") { + // runtime.getg(), getclosureptr(), getcallerpc(), and + // getcallersp() are not real functions and so do not + // get funcsyms. return } if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed { @@ -1122,123 +1114,175 @@ func dclfunc(sym *types.Sym, tfn *Node) *Node { } type nowritebarrierrecChecker struct { - curfn *Node - stable bool + // extraCalls contains extra function calls that may not be + // visible during later analysis. It maps from the ODCLFUNC of + // the caller to a list of callees. + extraCalls map[*Node][]nowritebarrierrecCall - // best maps from the ODCLFUNC of each visited function that - // recursively invokes a write barrier to the called function - // on the shortest path to a write barrier. - best map[*Node]nowritebarrierrecCall + // curfn is the current function during AST walks. + curfn *Node } type nowritebarrierrecCall struct { - target *Node - depth int - lineno src.XPos + target *Node // ODCLFUNC of caller or callee + lineno src.XPos // line of call } -func checknowritebarrierrec() { - c := nowritebarrierrecChecker{ - best: make(map[*Node]nowritebarrierrecCall), +type nowritebarrierrecCallSym struct { + target *obj.LSym // LSym of callee + lineno src.XPos // line of call +} + +// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It +// must be called before transformclosure and walk. +func newNowritebarrierrecChecker() *nowritebarrierrecChecker { + c := &nowritebarrierrecChecker{ + extraCalls: make(map[*Node][]nowritebarrierrecCall), } - visitBottomUp(xtop, func(list []*Node, recursive bool) { - // Functions with write barriers have depth 0. - for _, n := range list { - if n.Func.WBPos.IsKnown() && n.Func.Pragma&Nowritebarrier != 0 { - yyerrorl(n.Func.WBPos, "write barrier prohibited") - } - if n.Func.WBPos.IsKnown() && n.Func.Pragma&Yeswritebarrierrec == 0 { - c.best[n] = nowritebarrierrecCall{target: nil, depth: 0, lineno: n.Func.WBPos} - } + + // Find all systemstack calls and record their targets. In + // general, flow analysis can't see into systemstack, but it's + // important to handle it for this check, so we model it + // directly. This has to happen before transformclosure since + // it's a lot harder to work out the argument after. + for _, n := range xtop { + if n.Op != ODCLFUNC { + continue } - - // Propagate write barrier depth up from callees. In - // the recursive case, we have to update this at most - // len(list) times and can stop when we an iteration - // that doesn't change anything. - for _ = range list { - c.stable = false - for _, n := range list { - if n.Func.Pragma&Yeswritebarrierrec != 0 { - // Don't propagate write - // barrier up to a - // yeswritebarrierrec function. - continue - } - if !n.Func.WBPos.IsKnown() { - c.curfn = n - c.visitcodelist(n.Nbody) - } - } - if c.stable { - break - } - } - - // Check nowritebarrierrec functions. - for _, n := range list { - if n.Func.Pragma&Nowritebarrierrec == 0 { - continue - } - call, hasWB := c.best[n] - if !hasWB { - continue - } - - // Build the error message in reverse. - err := "" - for call.target != nil { - err = fmt.Sprintf("\n\t%v: called by %v%s", linestr(call.lineno), n.Func.Nname, err) - n = call.target - call = c.best[n] - } - err = fmt.Sprintf("write barrier prohibited by caller; %v%s", n.Func.Nname, err) - yyerrorl(n.Func.WBPos, err) - } - }) + c.curfn = n + inspect(n, c.findExtraCalls) + } + c.curfn = nil + return c } -func (c *nowritebarrierrecChecker) visitcodelist(l Nodes) { - for _, n := range l.Slice() { - c.visitcode(n) +func (c *nowritebarrierrecChecker) findExtraCalls(n *Node) bool { + if n.Op != OCALLFUNC { + return true } -} - -func (c *nowritebarrierrecChecker) visitcode(n *Node) { - if n == nil { - return - } - - if n.Op == OCALLFUNC || n.Op == OCALLMETH { - c.visitcall(n) - } - - c.visitcodelist(n.Ninit) - c.visitcode(n.Left) - c.visitcode(n.Right) - c.visitcodelist(n.List) - c.visitcodelist(n.Nbody) - c.visitcodelist(n.Rlist) -} - -func (c *nowritebarrierrecChecker) visitcall(n *Node) { fn := n.Left - if n.Op == OCALLMETH { - fn = asNode(n.Left.Sym.Def) - } if fn == nil || fn.Op != ONAME || fn.Class() != PFUNC || fn.Name.Defn == nil { - return + return true + } + if !isRuntimePkg(fn.Sym.Pkg) || fn.Sym.Name != "systemstack" { + return true } - defn := fn.Name.Defn - fnbest, ok := c.best[defn] - if !ok { - return + var callee *Node + arg := n.List.First() + switch arg.Op { + case ONAME: + callee = arg.Name.Defn + case OCLOSURE: + callee = arg.Func.Closure + default: + Fatalf("expected ONAME or OCLOSURE node, got %+v", arg) } - best, ok := c.best[c.curfn] - if ok && fnbest.depth+1 >= best.depth { - return + if callee.Op != ODCLFUNC { + Fatalf("expected ODCLFUNC node, got %+v", callee) + } + c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos}) + return true +} + +// recordCall records a call from ODCLFUNC node "from", to function +// symbol "to" at position pos. +// +// This should be done as late as possible during compilation to +// capture precise call graphs. The target of the call is an LSym +// because that's all we know after we start SSA. +// +// This can be called concurrently for different from Nodes. +func (c *nowritebarrierrecChecker) recordCall(from *Node, to *obj.LSym, pos src.XPos) { + if from.Op != ODCLFUNC { + Fatalf("expected ODCLFUNC, got %v", from) + } + // We record this information on the *Func so this is + // concurrent-safe. + fn := from.Func + if fn.nwbrCalls == nil { + fn.nwbrCalls = new([]nowritebarrierrecCallSym) + } + *fn.nwbrCalls = append(*fn.nwbrCalls, nowritebarrierrecCallSym{to, pos}) +} + +func (c *nowritebarrierrecChecker) check() { + // We walk the call graph as late as possible so we can + // capture all calls created by lowering, but this means we + // only get to see the obj.LSyms of calls. symToFunc lets us + // get back to the ODCLFUNCs. + symToFunc := make(map[*obj.LSym]*Node) + // funcs records the back-edges of the BFS call graph walk. It + // maps from the ODCLFUNC of each function that must not have + // write barriers to the call that inhibits them. Functions + // that are directly marked go:nowritebarrierrec are in this + // map with a zero-valued nowritebarrierrecCall. This also + // acts as the set of marks for the BFS of the call graph. + funcs := make(map[*Node]nowritebarrierrecCall) + // q is the queue of ODCLFUNC Nodes to visit in BFS order. + var q nodeQueue + + for _, n := range xtop { + if n.Op != ODCLFUNC { + continue + } + + symToFunc[n.Func.lsym] = n + + // Make nowritebarrierrec functions BFS roots. + if n.Func.Pragma&Nowritebarrierrec != 0 { + funcs[n] = nowritebarrierrecCall{} + q.pushRight(n) + } + // Check go:nowritebarrier functions. + if n.Func.Pragma&Nowritebarrier != 0 && n.Func.WBPos.IsKnown() { + yyerrorl(n.Func.WBPos, "write barrier prohibited") + } + } + + // Perform a BFS of the call graph from all + // go:nowritebarrierrec functions. + enqueue := func(src, target *Node, pos src.XPos) { + if target.Func.Pragma&Yeswritebarrierrec != 0 { + // Don't flow into this function. + return + } + if _, ok := funcs[target]; ok { + // Already found a path to target. + return + } + + // Record the path. + funcs[target] = nowritebarrierrecCall{target: src, lineno: pos} + q.pushRight(target) + } + for !q.empty() { + fn := q.popLeft() + + // Check fn. + if fn.Func.WBPos.IsKnown() { + var err bytes.Buffer + call := funcs[fn] + for call.target != nil { + fmt.Fprintf(&err, "\n\t%v: called by %v", linestr(call.lineno), call.target.Func.Nname) + call = funcs[call.target] + } + yyerrorl(fn.Func.WBPos, "write barrier prohibited by caller; %v%s", fn.Func.Nname, err.String()) + continue + } + + // Enqueue fn's calls. + for _, callee := range c.extraCalls[fn] { + enqueue(fn, callee.target, callee.lineno) + } + if fn.Func.nwbrCalls == nil { + continue + } + for _, callee := range *fn.Func.nwbrCalls { + target := symToFunc[callee.target] + if target != nil { + enqueue(fn, target, callee.lineno) + } + } } - c.best[c.curfn] = nowritebarrierrecCall{target: defn, depth: fnbest.depth + 1, lineno: n.Pos} - c.stable = false } diff --git a/src/cmd/compile/internal/gc/dwinl.go b/src/cmd/compile/internal/gc/dwinl.go new file mode 100644 index 00000000000..f76bacc5b91 --- /dev/null +++ b/src/cmd/compile/internal/gc/dwinl.go @@ -0,0 +1,317 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "cmd/internal/dwarf" + "cmd/internal/obj" + "cmd/internal/src" + "sort" + "strings" +) + +// To identify variables by original source position. +type varPos struct { + DeclFile string + DeclLine uint + DeclCol uint +} + +// This is the main entry point for collection of raw material to +// drive generation of DWARF "inlined subroutine" DIEs. See proposal +// 22080 for more details and background info. +func assembleInlines(fnsym *obj.LSym, fn *Node, dwVars []*dwarf.Var) dwarf.InlCalls { + var inlcalls dwarf.InlCalls + + if Debug_gendwarfinl != 0 { + Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name) + } + + // This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls + imap := make(map[int]int) + + // Walk progs to build up the InlCalls data structure + var prevpos src.XPos + for p := fnsym.Func.Text; p != nil; p = p.Link { + if p.Pos == prevpos { + continue + } + ii := posInlIndex(p.Pos) + if ii >= 0 { + insertInlCall(&inlcalls, ii, imap) + } + prevpos = p.Pos + } + + // This is used to partition DWARF vars by inline index. Vars not + // produced by the inliner will wind up in the vmap[0] entry. + vmap := make(map[int32][]*dwarf.Var) + + // Now walk the dwarf vars and partition them based on whether they + // were produced by the inliner (dwv.InlIndex > 0) or were original + // vars/params from the function (dwv.InlIndex == 0). + for _, dwv := range dwVars { + + vmap[dwv.InlIndex] = append(vmap[dwv.InlIndex], dwv) + + // Zero index => var was not produced by an inline + if dwv.InlIndex == 0 { + continue + } + + // Look up index in our map, then tack the var in question + // onto the vars list for the correct inlined call. + ii := int(dwv.InlIndex) - 1 + idx, ok := imap[ii] + if !ok { + // We can occasionally encounter a var produced by the + // inliner for which there is no remaining prog; add a new + // entry to the call list in this scenario. + idx = insertInlCall(&inlcalls, ii, imap) + } + inlcalls.Calls[idx].InlVars = + append(inlcalls.Calls[idx].InlVars, dwv) + } + + // Post process the map above to assign child indices to vars. For + // variables that weren't produced by an inline, sort them + // according to class and name and assign indices that way. For + // vars produced by an inline, assign child index by looking up + // the var name in the origin pre-optimization dcl list for the + // inlined function. + for ii, sl := range vmap { + if ii == 0 { + sort.Sort(byClassThenName(sl)) + for j := 0; j < len(sl); j++ { + sl[j].ChildIndex = int32(j) + } + } else { + // Assign child index based on pre-inlined decls + ifnlsym := Ctxt.InlTree.InlinedFunction(int(ii - 1)) + dcl, _ := preInliningDcls(ifnlsym) + m := make(map[varPos]int) + for i := 0; i < len(dcl); i++ { + n := dcl[i] + pos := Ctxt.InnermostPos(n.Pos) + vp := varPos{ + DeclFile: pos.Base().SymFilename(), + DeclLine: pos.Line(), + DeclCol: pos.Col(), + } + m[vp] = i + } + for j := 0; j < len(sl); j++ { + vp := varPos{ + DeclFile: sl[j].DeclFile, + DeclLine: sl[j].DeclLine, + DeclCol: sl[j].DeclCol, + } + if idx, found := m[vp]; found { + sl[j].ChildIndex = int32(idx) + } else { + Fatalf("unexpected: can't find var %s in preInliningDcls for %v\n", sl[j].Name, Ctxt.InlTree.InlinedFunction(int(ii-1))) + } + } + } + } + + // Make a second pass through the progs to compute PC ranges + // for the various inlined calls. + curii := -1 + var crange *dwarf.Range + var prevp *obj.Prog + for p := fnsym.Func.Text; p != nil; prevp, p = p, p.Link { + if prevp != nil && p.Pos == prevp.Pos { + continue + } + ii := posInlIndex(p.Pos) + if ii == curii { + continue + } else { + // Close out the current range + endRange(crange, prevp) + + // Begin new range + crange = beginRange(inlcalls.Calls, p, ii, imap) + curii = ii + } + } + if prevp != nil { + endRange(crange, prevp) + } + + // Debugging + if Debug_gendwarfinl != 0 { + dumpInlCalls(inlcalls) + dumpInlVars(dwVars) + } + + return inlcalls +} + +// Secondary hook for DWARF inlined subroutine generation. This is called +// late in the compilation when it is determined that we need an +// abstract function DIE for an inlined routine imported from a +// previously compiled package. +func genAbstractFunc(fn *obj.LSym) { + ifn := Ctxt.DwFixups.GetPrecursorFunc(fn) + if ifn == nil { + Ctxt.Diag("failed to locate precursor fn for %v", fn) + return + } + if Debug_gendwarfinl != 0 { + Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name) + } + Ctxt.DwarfAbstractFunc(ifn, fn, myimportpath) +} + +func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int { + callIdx, found := imap[inlIdx] + if found { + return callIdx + } + + // Haven't seen this inline yet. Visit parent of inline if there + // is one. We do this first so that parents appear before their + // children in the resulting table. + parCallIdx := -1 + parInlIdx := Ctxt.InlTree.Parent(inlIdx) + if parInlIdx >= 0 { + parCallIdx = insertInlCall(dwcalls, parInlIdx, imap) + } + + // Create new entry for this inline + inlinedFn := Ctxt.InlTree.InlinedFunction(int(inlIdx)) + callXPos := Ctxt.InlTree.CallPos(int(inlIdx)) + absFnSym := Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn) + pb := Ctxt.PosTable.Pos(callXPos).Base() + callFileSym := Ctxt.Lookup(pb.SymFilename()) + ic := dwarf.InlCall{ + InlIndex: inlIdx, + CallFile: callFileSym, + CallLine: uint32(callXPos.Line()), + AbsFunSym: absFnSym, + Root: parCallIdx == -1, + } + dwcalls.Calls = append(dwcalls.Calls, ic) + callIdx = len(dwcalls.Calls) - 1 + imap[inlIdx] = callIdx + + if parCallIdx != -1 { + // Add this inline to parent's child list + dwcalls.Calls[parCallIdx].Children = append(dwcalls.Calls[parCallIdx].Children, callIdx) + } + + return callIdx +} + +// Given a src.XPos, return its associated inlining index if it +// corresponds to something created as a result of an inline, or -1 if +// there is no inline info. Note that the index returned will refer to +// the deepest call in the inlined stack, e.g. if you have "A calls B +// calls C calls D" and all three callees are inlined (B, C, and D), +// the index for a node from the inlined body of D will refer to the +// call to D from C. Whew. +func posInlIndex(xpos src.XPos) int { + pos := Ctxt.PosTable.Pos(xpos) + if b := pos.Base(); b != nil { + ii := b.InliningIndex() + if ii >= 0 { + return ii + } + } + return -1 +} + +func endRange(crange *dwarf.Range, p *obj.Prog) { + if crange == nil { + return + } + crange.End = p.Pc +} + +func beginRange(calls []dwarf.InlCall, p *obj.Prog, ii int, imap map[int]int) *dwarf.Range { + if ii == -1 { + return nil + } + callIdx, found := imap[ii] + if !found { + Fatalf("internal error: can't find inlIndex %d in imap for prog at %d\n", ii, p.Pc) + } + call := &calls[callIdx] + + // Set up range and append to correct inlined call + call.Ranges = append(call.Ranges, dwarf.Range{Start: p.Pc, End: -1}) + return &call.Ranges[len(call.Ranges)-1] +} + +func cmpDwarfVar(a, b *dwarf.Var) bool { + // named before artificial + aart := 0 + if strings.HasPrefix(a.Name, "~r") { + aart = 1 + } + bart := 0 + if strings.HasPrefix(b.Name, "~r") { + bart = 1 + } + if aart != bart { + return aart < bart + } + + // otherwise sort by name + return a.Name < b.Name +} + +// byClassThenName implements sort.Interface for []*dwarf.Var using cmpDwarfVar. +type byClassThenName []*dwarf.Var + +func (s byClassThenName) Len() int { return len(s) } +func (s byClassThenName) Less(i, j int) bool { return cmpDwarfVar(s[i], s[j]) } +func (s byClassThenName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) { + for i := 0; i < ilevel; i += 1 { + Ctxt.Logf(" ") + } + ic := inlcalls.Calls[idx] + callee := Ctxt.InlTree.InlinedFunction(ic.InlIndex) + Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name) + for _, f := range ic.InlVars { + Ctxt.Logf(" %v", f.Name) + } + Ctxt.Logf(" ) C: (") + for _, k := range ic.Children { + Ctxt.Logf(" %v", k) + } + Ctxt.Logf(" ) R:") + for _, r := range ic.Ranges { + Ctxt.Logf(" [%d,%d)", r.Start, r.End) + } + Ctxt.Logf("\n") + for _, k := range ic.Children { + dumpInlCall(inlcalls, k, ilevel+1) + } + +} + +func dumpInlCalls(inlcalls dwarf.InlCalls) { + n := len(inlcalls.Calls) + for k := 0; k < n; k += 1 { + if inlcalls.Calls[k].Root { + dumpInlCall(inlcalls, k, 0) + } + } +} + +func dumpInlVars(dwvars []*dwarf.Var) { + for i, dwv := range dwvars { + typ := "local" + if dwv.Abbrev == dwarf.DW_ABRV_PARAM_LOCLIST || dwv.Abbrev == dwarf.DW_ABRV_PARAM { + typ = "param" + } + Ctxt.Logf("V%d: %s CI:%d II:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, typ) + } +} diff --git a/src/cmd/compile/internal/gc/esc.go b/src/cmd/compile/internal/gc/esc.go index 87a5b7f29f5..03c0adafd51 100644 --- a/src/cmd/compile/internal/gc/esc.go +++ b/src/cmd/compile/internal/gc/esc.go @@ -129,20 +129,17 @@ func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 { min = v.visitcodelist(n.Nbody, min) min = v.visitcodelist(n.Rlist, min) - if n.Op == OCALLFUNC || n.Op == OCALLMETH { - fn := n.Left - if n.Op == OCALLMETH { - fn = asNode(n.Left.Sym.Def) - } + switch n.Op { + case OCALLFUNC, OCALLMETH: + fn := asNode(n.Left.Type.Nname()) if fn != nil && fn.Op == ONAME && fn.Class() == PFUNC && fn.Name.Defn != nil { m := v.visit(fn.Name.Defn) if m < min { min = m } } - } - if n.Op == OCLOSURE { + case OCLOSURE: m := v.visit(n.Func.Closure) if m < min { min = m @@ -176,12 +173,6 @@ func (v *bottomUpVisitor) visitcode(n *Node, min uint32) uint32 { // then the value can stay on the stack. If the value new(T) does // not escape, then new(T) can be rewritten into a stack allocation. // The same is true of slice literals. -// -// If optimizations are disabled (-N), this code is not used. -// Instead, the compiler assumes that any value whose address -// is taken without being immediately dereferenced -// needs to be moved to the heap, and new(T) and slice -// literals are always real allocations. func escapes(all []*Node) { visitBottomUp(all, escAnalyze) @@ -205,9 +196,7 @@ const ( // allowed level when a loop is encountered. Using -2 suffices to // pass all the tests we have written so far, which we assume matches // the level of complexity we want the escape analysis code to handle. -const ( - MinLevel = -2 -) +const MinLevel = -2 // A Level encodes the reference state and context applied to // (stack, heap) allocated memory. @@ -679,7 +668,7 @@ func (e *EscState) esc(n *Node, parent *Node) { // Big stuff escapes unconditionally // "Big" conditions that were scattered around in walk have been gathered here if n.Esc != EscHeap && n.Type != nil && - (n.Type.Width > MaxStackVarSize || + (n.Type.Width > maxStackVarSize || (n.Op == ONEW || n.Op == OPTRLIT) && n.Type.Elem().Width >= 1<<16 || n.Op == OMAKESLICE && !isSmallMakeSlice(n)) { if Debug['m'] > 2 { @@ -691,7 +680,18 @@ func (e *EscState) esc(n *Node, parent *Node) { } e.esc(n.Left, n) + + if n.Op == ORANGE { + // ORANGE node's Right is evaluated before the loop + e.loopdepth-- + } + e.esc(n.Right, n) + + if n.Op == ORANGE { + e.loopdepth++ + } + e.esclist(n.Nbody, n) e.esclist(n.List, n) e.esclist(n.Rlist, n) @@ -848,7 +848,7 @@ func (e *EscState) esc(n *Node, parent *Node) { case ORETURN: retList := n.List - if retList.Len() == 1 && Curfn.Type.Results().NumFields() > 1 { + if retList.Len() == 1 && Curfn.Type.NumResults() > 1 { // OAS2FUNC in disguise // esccall already done on n.List.First() // tie e.nodeEscState(n.List.First()).Retval to Curfn.Func.Dcl PPARAMOUT's @@ -1279,16 +1279,14 @@ func parsetag(note string) uint16 { // to the second output (and if there are more than two outputs, there is no flow to those.) func describeEscape(em uint16) string { var s string - if em&EscMask == EscUnknown { + switch em & EscMask { + case EscUnknown: s = "EscUnknown" - } - if em&EscMask == EscNone { + case EscNone: s = "EscNone" - } - if em&EscMask == EscHeap { + case EscHeap: s = "EscHeap" - } - if em&EscMask == EscReturn { + case EscReturn: s = "EscReturn" } if em&EscContentEscapes != 0 { @@ -1554,20 +1552,20 @@ func (e *EscState) esccall(call *Node, parent *Node) { call.Right = arg } e.escassignWhyWhere(n, arg, "arg to recursive call", call) // TODO this message needs help. - if arg != args[0] { - // "..." arguments are untracked - for _, a := range args { - if Debug['m'] > 3 { - fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a) - } - e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call) - } - // No more PPARAM processing, but keep - // going for PPARAMOUT. - args = nil + if arg == args[0] { + args = args[1:] continue } - args = args[1:] + // "..." arguments are untracked + for _, a := range args { + if Debug['m'] > 3 { + fmt.Printf("%v::esccall:: ... <- %S, untracked\n", linestr(lineno), a) + } + e.escassignSinkWhyWhere(arg, a, "... arg to recursive call", call) + } + // No more PPARAM processing, but keep + // going for PPARAMOUT. + args = nil case PPARAMOUT: cE.Retval.Append(n) diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go index a92a41c5ceb..c5d5c52205d 100644 --- a/src/cmd/compile/internal/gc/export.go +++ b/src/cmd/compile/internal/gc/export.go @@ -9,6 +9,7 @@ import ( "bytes" "cmd/compile/internal/types" "cmd/internal/bio" + "cmd/internal/src" "fmt" "unicode" "unicode/utf8" @@ -18,7 +19,7 @@ var ( Debug_export int // if set, print debugging information about export data ) -func exportf(format string, args ...interface{}) { +func exportf(bout *bio.Writer, format string, args ...interface{}) { fmt.Fprintf(bout, format, args...) if Debug_export != 0 { fmt.Printf(format, args...) @@ -82,7 +83,7 @@ func autoexport(n *Node, ctxt Class) { if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN { return } - if n.Type != nil && n.Type.IsKind(TFUNC) && n.Type.Recv() != nil { // method + if n.Type != nil && n.Type.IsKind(TFUNC) && n.IsMethod() { return } @@ -111,10 +112,10 @@ func reexportdep(n *Node) { switch n.Op { case ONAME: switch n.Class() { - // methods will be printed along with their type - // nodes for T.Method expressions case PFUNC: - if n.Left != nil && n.Left.Op == OTYPE { + // methods will be printed along with their type + // nodes for T.Method expressions + if n.isMethodExpression() { break } @@ -221,14 +222,14 @@ func (x methodbyname) Len() int { return len(x) } func (x methodbyname) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x methodbyname) Less(i, j int) bool { return x[i].Sym.Name < x[j].Sym.Name } -func dumpexport() { +func dumpexport(bout *bio.Writer) { if buildid != "" { - exportf("build id %q\n", buildid) + exportf(bout, "build id %q\n", buildid) } size := 0 // size of export section without enclosing markers // The linker also looks for the $$ marker - use char after $$ to distinguish format. - exportf("\n$$B\n") // indicate binary export format + exportf(bout, "\n$$B\n") // indicate binary export format if debugFormat { // save a copy of the export data var copy bytes.Buffer @@ -252,7 +253,7 @@ func dumpexport() { } else { size = export(bout.Writer, Debug_export != 0) } - exportf("\n$$\n") + exportf(bout, "\n$$\n") if Debug_export != 0 { fmt.Printf("export data size = %d bytes\n", size) @@ -280,12 +281,12 @@ func importsym(pkg *types.Pkg, s *types.Sym, op Op) { // pkgtype returns the named type declared by symbol s. // If no such type has been declared yet, a forward declaration is returned. // pkg is the package being imported -func pkgtype(pkg *types.Pkg, s *types.Sym) *types.Type { +func pkgtype(pos src.XPos, pkg *types.Pkg, s *types.Sym) *types.Type { importsym(pkg, s, OTYPE) if asNode(s.Def) == nil || asNode(s.Def).Op != OTYPE { t := types.New(TFORW) t.Sym = s - s.Def = asTypesNode(typenod(t)) + s.Def = asTypesNode(typenodl(pos, t)) asNode(s.Def).Name = new(Name) } @@ -326,7 +327,7 @@ func importconst(pkg *types.Pkg, s *types.Sym, t *types.Type, n *Node) { // importvar declares symbol s as an imported variable with type t. // pkg is the package being imported -func importvar(pkg *types.Pkg, s *types.Sym, t *types.Type) { +func importvar(pos src.XPos, pkg *types.Pkg, s *types.Sym, t *types.Type) { importsym(pkg, s, ONAME) if asNode(s.Def) != nil && asNode(s.Def).Op == ONAME { if eqtype(t, asNode(s.Def).Type) { @@ -335,7 +336,7 @@ func importvar(pkg *types.Pkg, s *types.Sym, t *types.Type) { yyerror("inconsistent definition for var %v during import\n\t%v (in %q)\n\t%v (in %q)", s, asNode(s.Def).Type, s.Importdef.Path, t, pkg.Path) } - n := newname(s) + n := newnamel(pos, s) s.Importdef = pkg n.Type = t declare(n, PEXTERN) @@ -347,7 +348,7 @@ func importvar(pkg *types.Pkg, s *types.Sym, t *types.Type) { // importalias declares symbol s as an imported type alias with type t. // pkg is the package being imported -func importalias(pkg *types.Pkg, s *types.Sym, t *types.Type) { +func importalias(pos src.XPos, pkg *types.Pkg, s *types.Sym, t *types.Type) { importsym(pkg, s, OTYPE) if asNode(s.Def) != nil && asNode(s.Def).Op == OTYPE { if eqtype(t, asNode(s.Def).Type) { @@ -356,7 +357,7 @@ func importalias(pkg *types.Pkg, s *types.Sym, t *types.Type) { yyerror("inconsistent definition for type alias %v during import\n\t%v (in %q)\n\t%v (in %q)", s, asNode(s.Def).Type, s.Importdef.Path, t, pkg.Path) } - n := newname(s) + n := newnamel(pos, s) n.Op = OTYPE s.Importdef = pkg n.Type = t @@ -386,10 +387,10 @@ func dumpasmhdr() { if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() { break } - fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width)) - for _, t := range t.Fields().Slice() { - if !t.Sym.IsBlank() { - fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Offset)) + fmt.Fprintf(b, "#define %s__size %d\n", n.Sym.Name, int(t.Width)) + for _, f := range t.Fields().Slice() { + if !f.Sym.IsBlank() { + fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, f.Sym.Name, int(f.Offset)) } } } diff --git a/src/cmd/compile/internal/gc/float_test.go b/src/cmd/compile/internal/gc/float_test.go index f906f3a228c..4cb9532e556 100644 --- a/src/cmd/compile/internal/gc/float_test.go +++ b/src/cmd/compile/internal/gc/float_test.go @@ -4,7 +4,10 @@ package gc -import "testing" +import ( + "math" + "testing" +) // For GO386=387, make sure fucomi* opcodes are not used // for comparison operations. @@ -31,6 +34,107 @@ func TestFloatCompare(t *testing.T) { } } +func TestFloatCompareFolded(t *testing.T) { + // float64 comparisons + d1, d3, d5, d9 := float64(1), float64(3), float64(5), float64(9) + if d3 == d5 { + t.Errorf("d3 == d5 returned true") + } + if d3 != d3 { + t.Errorf("d3 != d3 returned true") + } + if d3 > d5 { + t.Errorf("d3 > d5 returned true") + } + if d3 >= d9 { + t.Errorf("d3 >= d9 returned true") + } + if d5 < d1 { + t.Errorf("d5 < d1 returned true") + } + if d9 <= d1 { + t.Errorf("d9 <= d1 returned true") + } + if math.NaN() == math.NaN() { + t.Errorf("math.NaN() == math.NaN() returned true") + } + if math.NaN() >= math.NaN() { + t.Errorf("math.NaN() >= math.NaN() returned true") + } + if math.NaN() <= math.NaN() { + t.Errorf("math.NaN() <= math.NaN() returned true") + } + if math.Copysign(math.NaN(), -1) < math.NaN() { + t.Errorf("math.Copysign(math.NaN(), -1) < math.NaN() returned true") + } + if math.Inf(1) != math.Inf(1) { + t.Errorf("math.Inf(1) != math.Inf(1) returned true") + } + if math.Inf(-1) != math.Inf(-1) { + t.Errorf("math.Inf(-1) != math.Inf(-1) returned true") + } + if math.Copysign(0, -1) != 0 { + t.Errorf("math.Copysign(0, -1) != 0 returned true") + } + if math.Copysign(0, -1) < 0 { + t.Errorf("math.Copysign(0, -1) < 0 returned true") + } + if 0 > math.Copysign(0, -1) { + t.Errorf("0 > math.Copysign(0, -1) returned true") + } + + // float32 comparisons + s1, s3, s5, s9 := float32(1), float32(3), float32(5), float32(9) + if s3 == s5 { + t.Errorf("s3 == s5 returned true") + } + if s3 != s3 { + t.Errorf("s3 != s3 returned true") + } + if s3 > s5 { + t.Errorf("s3 > s5 returned true") + } + if s3 >= s9 { + t.Errorf("s3 >= s9 returned true") + } + if s5 < s1 { + t.Errorf("s5 < s1 returned true") + } + if s9 <= s1 { + t.Errorf("s9 <= s1 returned true") + } + sPosNaN, sNegNaN := float32(math.NaN()), float32(math.Copysign(math.NaN(), -1)) + if sPosNaN == sPosNaN { + t.Errorf("sPosNaN == sPosNaN returned true") + } + if sPosNaN >= sPosNaN { + t.Errorf("sPosNaN >= sPosNaN returned true") + } + if sPosNaN <= sPosNaN { + t.Errorf("sPosNaN <= sPosNaN returned true") + } + if sNegNaN < sPosNaN { + t.Errorf("sNegNaN < sPosNaN returned true") + } + sPosInf, sNegInf := float32(math.Inf(1)), float32(math.Inf(-1)) + if sPosInf != sPosInf { + t.Errorf("sPosInf != sPosInf returned true") + } + if sNegInf != sNegInf { + t.Errorf("sNegInf != sNegInf returned true") + } + sNegZero := float32(math.Copysign(0, -1)) + if sNegZero != 0 { + t.Errorf("sNegZero != 0 returned true") + } + if sNegZero < 0 { + t.Errorf("sNegZero < 0 returned true") + } + if 0 > sNegZero { + t.Errorf("0 > sNegZero returned true") + } +} + // For GO386=387, make sure fucomi* opcodes are not used // for float->int conversions. @@ -95,6 +199,16 @@ func cvt12(a float32) uint { return uint(a) } +//go:noinline +func f2i64p(v float64) *int64 { + return ip64(int64(v / 0.1)) +} + +//go:noinline +func ip64(v int64) *int64 { + return &v +} + func TestFloatConvert(t *testing.T) { if got := cvt1(3.5); got != 3 { t.Errorf("cvt1 got %d, wanted 3", got) @@ -132,6 +246,120 @@ func TestFloatConvert(t *testing.T) { if got := cvt12(3.5); got != 3 { t.Errorf("cvt12 got %d, wanted 3", got) } + if got := *f2i64p(10); got != 100 { + t.Errorf("f2i64p got %d, wanted 100", got) + } +} + +func TestFloatConvertFolded(t *testing.T) { + // Assign constants to variables so that they are (hopefully) constant folded + // by the SSA backend rather than the frontend. + u64, u32, u16, u8 := uint64(1<<63), uint32(1<<31), uint16(1<<15), uint8(1<<7) + i64, i32, i16, i8 := int64(-1<<63), int32(-1<<31), int16(-1<<15), int8(-1<<7) + du64, du32, du16, du8 := float64(1<<63), float64(1<<31), float64(1<<15), float64(1<<7) + di64, di32, di16, di8 := float64(-1<<63), float64(-1<<31), float64(-1<<15), float64(-1<<7) + su64, su32, su16, su8 := float32(1<<63), float32(1<<31), float32(1<<15), float32(1<<7) + si64, si32, si16, si8 := float32(-1<<63), float32(-1<<31), float32(-1<<15), float32(-1<<7) + + // integer to float + if float64(u64) != du64 { + t.Errorf("float64(u64) != du64") + } + if float64(u32) != du32 { + t.Errorf("float64(u32) != du32") + } + if float64(u16) != du16 { + t.Errorf("float64(u16) != du16") + } + if float64(u8) != du8 { + t.Errorf("float64(u8) != du8") + } + if float64(i64) != di64 { + t.Errorf("float64(i64) != di64") + } + if float64(i32) != di32 { + t.Errorf("float64(i32) != di32") + } + if float64(i16) != di16 { + t.Errorf("float64(i16) != di16") + } + if float64(i8) != di8 { + t.Errorf("float64(i8) != di8") + } + if float32(u64) != su64 { + t.Errorf("float32(u64) != su64") + } + if float32(u32) != su32 { + t.Errorf("float32(u32) != su32") + } + if float32(u16) != su16 { + t.Errorf("float32(u16) != su16") + } + if float32(u8) != su8 { + t.Errorf("float32(u8) != su8") + } + if float32(i64) != si64 { + t.Errorf("float32(i64) != si64") + } + if float32(i32) != si32 { + t.Errorf("float32(i32) != si32") + } + if float32(i16) != si16 { + t.Errorf("float32(i16) != si16") + } + if float32(i8) != si8 { + t.Errorf("float32(i8) != si8") + } + + // float to integer + if uint64(du64) != u64 { + t.Errorf("uint64(du64) != u64") + } + if uint32(du32) != u32 { + t.Errorf("uint32(du32) != u32") + } + if uint16(du16) != u16 { + t.Errorf("uint16(du16) != u16") + } + if uint8(du8) != u8 { + t.Errorf("uint8(du8) != u8") + } + if int64(di64) != i64 { + t.Errorf("int64(di64) != i64") + } + if int32(di32) != i32 { + t.Errorf("int32(di32) != i32") + } + if int16(di16) != i16 { + t.Errorf("int16(di16) != i16") + } + if int8(di8) != i8 { + t.Errorf("int8(di8) != i8") + } + if uint64(su64) != u64 { + t.Errorf("uint64(su64) != u64") + } + if uint32(su32) != u32 { + t.Errorf("uint32(su32) != u32") + } + if uint16(su16) != u16 { + t.Errorf("uint16(su16) != u16") + } + if uint8(su8) != u8 { + t.Errorf("uint8(su8) != u8") + } + if int64(si64) != i64 { + t.Errorf("int64(si64) != i64") + } + if int32(si32) != i32 { + t.Errorf("int32(si32) != i32") + } + if int16(si16) != i16 { + t.Errorf("int16(si16) != i16") + } + if int8(si8) != i8 { + t.Errorf("int8(si8) != i8") + } } var sinkFloat float64 diff --git a/src/cmd/compile/internal/gc/fmt.go b/src/cmd/compile/internal/gc/fmt.go index 2f56d8ab51c..4b2fdb0dca7 100644 --- a/src/cmd/compile/internal/gc/fmt.go +++ b/src/cmd/compile/internal/gc/fmt.go @@ -204,11 +204,6 @@ var goopnames = []string{ OSUB: "-", OSWITCH: "switch", OXOR: "^", - OXFALL: "fallthrough", -} - -func (o Op) String() string { - return fmt.Sprint(o) } func (o Op) GoString() string { @@ -227,28 +222,14 @@ func (o Op) format(s fmt.State, verb rune, mode fmtMode) { func (o Op) oconv(s fmt.State, flag FmtFlag, mode fmtMode) { if flag&FmtSharp != 0 || mode != FDbg { - if o >= 0 && int(o) < len(goopnames) && goopnames[o] != "" { + if int(o) < len(goopnames) && goopnames[o] != "" { fmt.Fprint(s, goopnames[o]) return } } - if o >= 0 && int(o) < len(opnames) && opnames[o] != "" { - fmt.Fprint(s, opnames[o]) - return - } - - fmt.Fprintf(s, "O-%d", int(o)) -} - -var classnames = []string{ - "Pxxx", - "PEXTERN", - "PAUTO", - "PAUTOHEAP", - "PPARAM", - "PPARAMOUT", - "PFUNC", + // 'o.String()' instead of just 'o' to avoid infinite recursion + fmt.Fprint(s, o.String()) } type ( @@ -448,11 +429,7 @@ func (n *Node) jconv(s fmt.State, flag FmtFlag) { } if n.Class() != 0 { - if int(n.Class()) < len(classnames) { - fmt.Fprintf(s, " class(%s)", classnames[n.Class()]) - } else { - fmt.Fprintf(s, " class(%d?)", n.Class()) - } + fmt.Fprintf(s, " class(%v)", n.Class()) } if n.Colas() { @@ -814,7 +791,7 @@ func typefmt(t *types.Type, flag FmtFlag, mode fmtMode, depth int) string { } buf = append(buf, tmodeString(t.Params(), mode, depth)...) - switch t.Results().NumFields() { + switch t.NumResults() { case 0: // nothing to do @@ -1080,11 +1057,7 @@ func (n *Node) stmtfmt(s fmt.State, mode fmtMode) { } mode.Fprintf(s, ": %v", n.Nbody) - case OBREAK, - OCONTINUE, - OGOTO, - OFALL, - OXFALL: + case OBREAK, OCONTINUE, OGOTO, OFALL: if n.Left != nil { mode.Fprintf(s, "%#v %v", n.Op, n.Left) } else { @@ -1219,7 +1192,6 @@ var opprec = []int{ OSELECT: -1, OSWITCH: -1, OXCASE: -1, - OXFALL: -1, OEND: 0, } @@ -1543,13 +1515,11 @@ func (n *Node) exprfmt(s fmt.State, prec int, mode fmtMode) { n.Right.exprfmt(s, nprec+1, mode) case OADDSTR: - i := 0 - for _, n1 := range n.List.Slice() { + for i, n1 := range n.List.Slice() { if i != 0 { fmt.Fprint(s, " + ") } n1.exprfmt(s, nprec, mode) - i++ } case OCMPSTR, OCMPIFACE: diff --git a/src/cmd/compile/internal/gc/gen.go b/src/cmd/compile/internal/gc/gen.go index 626d282c184..f9b4584cf6b 100644 --- a/src/cmd/compile/internal/gc/gen.go +++ b/src/cmd/compile/internal/gc/gen.go @@ -11,7 +11,7 @@ import ( "strconv" ) -func Sysfunc(name string) *obj.LSym { +func sysfunc(name string) *obj.LSym { return Runtimepkg.Lookup(name).Linksym() } @@ -39,7 +39,7 @@ func autotmpname(n int) string { } // make a new Node off the books -func tempnamel(pos src.XPos, curfn *Node, nn *Node, t *types.Type) { +func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node { if curfn == nil { Fatalf("no curfn for tempname") } @@ -61,23 +61,15 @@ func tempnamel(pos src.XPos, curfn *Node, nn *Node, t *types.Type) { n.SetClass(PAUTO) n.Esc = EscNever n.Name.Curfn = curfn + n.Name.SetUsed(true) n.Name.SetAutoTemp(true) curfn.Func.Dcl = append(curfn.Func.Dcl, n) dowidth(t) - *nn = *n + + return n.Orig } func temp(t *types.Type) *Node { - var n Node - tempnamel(lineno, Curfn, &n, t) - asNode(n.Sym.Def).Name.SetUsed(true) - return n.Orig -} - -func tempAt(pos src.XPos, curfn *Node, t *types.Type) *Node { - var n Node - tempnamel(pos, curfn, &n, t) - asNode(n.Sym.Def).Name.SetUsed(true) - return n.Orig + return tempAt(lineno, Curfn, t) } diff --git a/src/cmd/compile/internal/gc/go.go b/src/cmd/compile/internal/gc/go.go index b1ead93c346..dc94cf4f98b 100644 --- a/src/cmd/compile/internal/gc/go.go +++ b/src/cmd/compile/internal/gc/go.go @@ -7,7 +7,6 @@ package gc import ( "cmd/compile/internal/ssa" "cmd/compile/internal/types" - "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/src" "sync" @@ -15,7 +14,7 @@ import ( const ( BADWIDTH = types.BADWIDTH - MaxStackVarSize = 10 * 1024 * 1024 + maxStackVarSize = 10 * 1024 * 1024 ) // isRuntimePkg reports whether p is package runtime. @@ -85,8 +84,6 @@ var outfile string var linkobj string var dolinkobj bool -var bout *bio.Writer - // nerrors is the number of compiler errors reported // since the last call to saveerrors. var nerrors int @@ -201,7 +198,7 @@ var compiling_runtime bool // Compiling the standard library var compiling_std bool -var compiling_wrappers int +var compiling_wrappers bool var use_writebarrier bool @@ -222,6 +219,11 @@ var instrumenting bool // Whether we are tracking lexical scopes for DWARF. var trackScopes bool +// Controls generation of DWARF inlined instance records. Zero +// disables, 1 emits inlined routines but suppresses var info, +// and 2 emits inlined routines with tracking of formals/locals. +var genDwarfInline int + var debuglive int var Ctxt *obj.Link @@ -241,9 +243,10 @@ var autogeneratedPos src.XPos type Arch struct { LinkArch *obj.LinkArch - REGSP int - MAXWIDTH int64 - Use387 bool // should 386 backend use 387 FP instructions instead of sse2. + REGSP int + MAXWIDTH int64 + Use387 bool // should 386 backend use 387 FP instructions instead of sse2. + SoftFloat bool PadFrame func(int64) int64 ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog @@ -290,6 +293,7 @@ var ( goschedguarded, writeBarrier, writebarrierptr, + gcWriteBarrier, typedmemmove, typedmemclr, Udiv *obj.LSym diff --git a/src/cmd/compile/internal/gc/gsubr.go b/src/cmd/compile/internal/gc/gsubr.go index b25709b9999..d074900d983 100644 --- a/src/cmd/compile/internal/gc/gsubr.go +++ b/src/cmd/compile/internal/gc/gsubr.go @@ -37,7 +37,7 @@ import ( "cmd/internal/src" ) -var sharedProgArray *[10000]obj.Prog = new([10000]obj.Prog) // *T instead of T to work around issue 19839 +var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839 // Progs accumulates Progs for a function and converts them into machine code. type Progs struct { @@ -70,13 +70,13 @@ func newProgs(fn *Node, worker int) *Progs { } func (pp *Progs) NewProg() *obj.Prog { + var p *obj.Prog if pp.cacheidx < len(pp.progcache) { - p := &pp.progcache[pp.cacheidx] - p.Ctxt = Ctxt + p = &pp.progcache[pp.cacheidx] pp.cacheidx++ - return p + } else { + p = new(obj.Prog) } - p := new(obj.Prog) p.Ctxt = Ctxt return p } @@ -84,7 +84,7 @@ func (pp *Progs) NewProg() *obj.Prog { // Flush converts from pp to machine code. func (pp *Progs) Flush() { plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn} - obj.Flushplist(Ctxt, plist, pp.NewProg) + obj.Flushplist(Ctxt, plist, pp.NewProg, myimportpath) } // Free clears pp and any associated resources. diff --git a/src/cmd/compile/internal/gc/init.go b/src/cmd/compile/internal/gc/init.go index 93ae2410cd9..180cbcfda2b 100644 --- a/src/cmd/compile/internal/gc/init.go +++ b/src/cmd/compile/internal/gc/init.go @@ -198,7 +198,7 @@ func fninit(n []*Node) { exportsym(fn.Func.Nname) fn.Nbody.Set(r) - funcbody(fn) + funcbody() Curfn = fn fn = typecheck(fn, Etop) @@ -208,8 +208,7 @@ func fninit(n []*Node) { } func (n *Node) checkInitFuncSignature() { - ft := n.Type.FuncType() - if ft.Receiver.Fields().Len()+ft.Params.Fields().Len()+ft.Results.Fields().Len() > 0 { + if n.Type.NumRecvs()+n.Type.NumParams()+n.Type.NumResults() > 0 { Fatalf("init function cannot have receiver, params, or results: %v (%v)", n, n.Type) } } diff --git a/src/cmd/compile/internal/gc/inl.go b/src/cmd/compile/internal/gc/inl.go index dfa13e3c3b3..0e8ef196af7 100644 --- a/src/cmd/compile/internal/gc/inl.go +++ b/src/cmd/compile/internal/gc/inl.go @@ -8,29 +8,34 @@ // expand calls to inlinable functions. // // The debug['l'] flag controls the aggressiveness. Note that main() swaps level 0 and 1, -// making 1 the default and -l disable. -ll and more is useful to flush out bugs. -// These additional levels (beyond -l) may be buggy and are not supported. +// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and +// are not supported. // 0: disabled // 1: 80-nodes leaf functions, oneliners, lazy typechecking (default) -// 2: early typechecking of all imported bodies +// 2: (unassigned) // 3: allow variadic functions -// 4: allow non-leaf functions , (breaks runtime.Caller) +// 4: allow non-leaf functions // -// At some point this may get another default and become switch-offable with -N. +// At some point this may get another default and become switch-offable with -N. // -// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying -// which calls get inlined or not, more is for debugging, and may go away at any point. +// The -d typcheckinl flag enables early typechecking of all imported bodies, +// which is useful to flush out bugs. +// +// The debug['m'] flag enables diagnostic output. a single -m is useful for verifying +// which calls get inlined or not, more is for debugging, and may go away at any point. // // TODO: // - inline functions with ... args -// - handle T.meth(f()) with func f() (t T, arg, arg, ) package gc import ( "cmd/compile/internal/types" + "cmd/internal/obj" "cmd/internal/src" "fmt" + "sort" + "strings" ) // Get the function's package. For ordinary functions it's on the ->sym, but for imported methods @@ -118,6 +123,15 @@ func caninl(fn *Node) { return } + // The nowritebarrierrec checker currently works at function + // granularity, so inlining yeswritebarrierrec functions can + // confuse it (#22342). As a workaround, disallow inlining + // them for now. + if fn.Func.Pragma&Yeswritebarrierrec != 0 { + reason = "marked go:yeswritebarrierrec" + return + } + // If fn has no body (is defined outside of Go), cannot inline it. if fn.Nbody.Len() == 0 { reason = "no function body" @@ -150,6 +164,12 @@ func caninl(fn *Node) { return } + n := fn.Func.Nname + if n.Func.InlinabilityChecked() { + return + } + defer n.Func.SetInlinabilityChecked(true) + const maxBudget = 80 visitor := hairyVisitor{budget: maxBudget} if visitor.visitList(fn.Nbody) { @@ -157,15 +177,13 @@ func caninl(fn *Node) { return } if visitor.budget < 0 { - reason = "function too complex" + reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", maxBudget-visitor.budget, maxBudget) return } savefn := Curfn Curfn = fn - n := fn.Func.Nname - n.Func.Inl.Set(fn.Nbody.Slice()) fn.Nbody.Set(inlcopylist(n.Func.Inl.Slice())) inldcl := inlcopylist(n.Name.Defn.Func.Dcl) @@ -185,6 +203,43 @@ func caninl(fn *Node) { Curfn = savefn } +// inlFlood marks n's inline body for export and recursively ensures +// all called functions are marked too. +func inlFlood(n *Node) { + if n == nil { + return + } + if n.Op != ONAME || n.Class() != PFUNC { + Fatalf("inlFlood: unexpected %v, %v, %v", n, n.Op, n.Class()) + } + if n.Func == nil { + // TODO(mdempsky): Should init have a Func too? + if n.Sym.Name == "init" { + return + } + Fatalf("inlFlood: missing Func on %v", n) + } + if n.Func.Inl.Len() == 0 { + return + } + + if n.Func.ExportInline() { + return + } + n.Func.SetExportInline(true) + + typecheckinl(n) + + // Recursively flood any functions called by this one. + inspectList(n.Func.Inl, func(n *Node) bool { + switch n.Op { + case OCALLFUNC, OCALLMETH: + inlFlood(asNode(n.Left.Type.Nname())) + } + return true + }) +} + // hairyVisitor visits a function body to determine its inlining // hairiness and whether or not it can be inlined. type hairyVisitor struct { @@ -228,8 +283,28 @@ func (v *hairyVisitor) visit(n *Node) bool { v.budget -= fn.InlCost break } + if n.Left.Op == OCLOSURE { + if fn := inlinableClosure(n.Left); fn != nil { + v.budget -= fn.Func.InlCost + break + } + } else if n.Left.Op == ONAME && n.Left.Name != nil && n.Left.Name.Defn != nil { + // NB: this case currently cannot trigger since closure definition + // prevents inlining + // NB: ideally we would also handle captured variables defined as + // closures in the outer scope this brings us back to the idea of + // function value propagation, which if available would both avoid + // the "reassigned" check and neatly handle multiple use cases in a + // single code path + if d := n.Left.Name.Defn; d.Op == OAS && d.Right.Op == OCLOSURE { + if fn := inlinableClosure(d.Right); fn != nil { + v.budget -= fn.Func.InlCost + break + } + } + } - if n.isMethodCalledAsFunction() { + if n.Left.isMethodExpression() { if d := asNode(n.Left.Sym.Def); d != nil && d.Func.Inl.Len() != 0 { v.budget -= d.Func.InlCost break @@ -279,6 +354,10 @@ func (v *hairyVisitor) visit(n *Node) bool { ORETJMP: v.reason = "unhandled op " + n.Op.String() return true + + case ODCLCONST, OEMPTY, OFALL, OLABEL: + // These nodes don't produce code; omit from inlining budget. + return false } v.budget-- @@ -293,8 +372,8 @@ func (v *hairyVisitor) visit(n *Node) bool { v.budget -= 2 } - if v.budget < 0 { - v.reason = "function too complex" + // When debugging, don't stop early, to get full cost of inlining this function + if v.budget < 0 && Debug['m'] < 2 { return true } @@ -517,8 +596,39 @@ func inlnode(n *Node) *Node { } if n.Left.Func != nil && n.Left.Func.Inl.Len() != 0 && !isIntrinsicCall(n) { // normal case n = mkinlcall(n, n.Left, n.Isddd()) - } else if n.isMethodCalledAsFunction() && asNode(n.Left.Sym.Def) != nil { + } else if n.Left.isMethodExpression() && asNode(n.Left.Sym.Def) != nil { n = mkinlcall(n, asNode(n.Left.Sym.Def), n.Isddd()) + } else if n.Left.Op == OCLOSURE { + if f := inlinableClosure(n.Left); f != nil { + n = mkinlcall(n, f, n.Isddd()) + } + } else if n.Left.Op == ONAME && n.Left.Name != nil && n.Left.Name.Defn != nil { + if d := n.Left.Name.Defn; d.Op == OAS && d.Right.Op == OCLOSURE { + if f := inlinableClosure(d.Right); f != nil { + // NB: this check is necessary to prevent indirect re-assignment of the variable + // having the address taken after the invocation or only used for reads is actually fine + // but we have no easy way to distinguish the safe cases + if d.Left.Addrtaken() { + if Debug['m'] > 1 { + fmt.Printf("%v: cannot inline escaping closure variable %v\n", n.Line(), n.Left) + } + break + } + + // ensure the variable is never re-assigned + if unsafe, a := reassigned(n.Left); unsafe { + if Debug['m'] > 1 { + if a != nil { + fmt.Printf("%v: cannot inline re-assigned closure variable at %v: %v\n", n.Line(), a.Line(), a) + } else { + fmt.Printf("%v: cannot inline global closure variable %v\n", n.Line(), n.Left) + } + } + break + } + n = mkinlcall(n, f, n.Isddd()) + } + } } case OCALLMETH: @@ -542,6 +652,98 @@ func inlnode(n *Node) *Node { return n } +// inlinableClosure takes an OCLOSURE node and follows linkage to the matching ONAME with +// the inlinable body. Returns nil if the function is not inlinable. +func inlinableClosure(n *Node) *Node { + c := n.Func.Closure + caninl(c) + f := c.Func.Nname + if f == nil || f.Func.Inl.Len() == 0 { + return nil + } + return f +} + +// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean +// indicating whether the name has any assignments other than its declaration. +// The second return value is the first such assignment encountered in the walk, if any. It is mostly +// useful for -m output documenting the reason for inhibited optimizations. +// NB: global variables are always considered to be re-assigned. +// TODO: handle initial declaration not including an assignment and followed by a single assignment? +func reassigned(n *Node) (bool, *Node) { + if n.Op != ONAME { + Fatalf("reassigned %v", n) + } + // no way to reliably check for no-reassignment of globals, assume it can be + if n.Name.Curfn == nil { + return true, nil + } + f := n.Name.Curfn + // There just might be a good reason for this although this can be pretty surprising: + // local variables inside a closure have Curfn pointing to the OCLOSURE node instead + // of the corresponding ODCLFUNC. + // We need to walk the function body to check for reassignments so we follow the + // linkage to the ODCLFUNC node as that is where body is held. + if f.Op == OCLOSURE { + f = f.Func.Closure + } + v := reassignVisitor{name: n} + a := v.visitList(f.Nbody) + return a != nil, a +} + +type reassignVisitor struct { + name *Node +} + +func (v *reassignVisitor) visit(n *Node) *Node { + if n == nil { + return nil + } + switch n.Op { + case OAS: + if n.Left == v.name && n != v.name.Name.Defn { + return n + } + return nil + case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE: + for _, p := range n.List.Slice() { + if p == v.name && n != v.name.Name.Defn { + return n + } + } + return nil + } + if a := v.visit(n.Left); a != nil { + return a + } + if a := v.visit(n.Right); a != nil { + return a + } + if a := v.visitList(n.List); a != nil { + return a + } + if a := v.visitList(n.Rlist); a != nil { + return a + } + if a := v.visitList(n.Ninit); a != nil { + return a + } + if a := v.visitList(n.Nbody); a != nil { + return a + } + return nil +} + +func (v *reassignVisitor) visitList(l Nodes) *Node { + for _, n := range l.Slice() { + if a := v.visit(n); a != nil { + return a + } + } + return nil +} + // The result of mkinlcall MUST be assigned back to n, e.g. // n.Left = mkinlcall(n.Left, fn, isddd) func mkinlcall(n *Node, fn *Node, isddd bool) *Node { @@ -580,7 +782,7 @@ var inlgen int // parameters. // The result of mkinlcall1 MUST be assigned back to n, e.g. // n.Left = mkinlcall1(n.Left, fn, isddd) -func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { +func mkinlcall1(n, fn *Node, isddd bool) *Node { if fn.Func.Inl.Len() == 0 { // No inlinable body. return n @@ -591,7 +793,7 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { return n } - if Debug['l'] < 2 { + if Debug_typecheckinl == 0 { typecheckinl(fn) } @@ -607,16 +809,56 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { ninit := n.Ninit + // Make temp names to use instead of the originals. + inlvars := make(map[*Node]*Node) + + // record formals/locals for later post-processing + var inlfvars []*Node + // Find declarations corresponding to inlineable body. var dcl []*Node if fn.Name.Defn != nil { dcl = fn.Func.Inldcl.Slice() // local function + + // handle captured variables when inlining closures + if c := fn.Name.Defn.Func.Closure; c != nil { + for _, v := range c.Func.Cvars.Slice() { + if v.Op == OXXX { + continue + } + + o := v.Name.Param.Outer + // make sure the outer param matches the inlining location + // NB: if we enabled inlining of functions containing OCLOSURE or refined + // the reassigned check via some sort of copy propagation this would most + // likely need to be changed to a loop to walk up to the correct Param + if o == nil || (o.Name.Curfn != Curfn && o.Name.Curfn.Func.Closure != Curfn) { + Fatalf("%v: unresolvable capture %v %v\n", n.Line(), fn, v) + } + + if v.Name.Byval() { + iv := typecheck(inlvar(v), Erv) + ninit.Append(nod(ODCL, iv, nil)) + ninit.Append(typecheck(nod(OAS, iv, o), Etop)) + inlvars[v] = iv + } else { + addr := newname(lookup("&" + v.Sym.Name)) + addr.Type = types.NewPtr(v.Type) + ia := typecheck(inlvar(addr), Erv) + ninit.Append(nod(ODCL, ia, nil)) + ninit.Append(typecheck(nod(OAS, ia, nod(OADDR, o, nil)), Etop)) + inlvars[addr] = ia + + // When capturing by reference, all occurrence of the captured var + // must be substituted with dereference of the temporary address + inlvars[v] = typecheck(nod(OIND, ia, nil), Erv) + } + } + } } else { dcl = fn.Func.Dcl // imported function } - // Make temp names to use instead of the originals. - inlvars := make(map[*Node]*Node) for _, ln := range dcl { if ln.Op != ONAME { continue @@ -631,13 +873,25 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { if ln.Class() == PPARAM || ln.Name.Param.Stackcopy != nil && ln.Name.Param.Stackcopy.Class() == PPARAM { ninit.Append(nod(ODCL, inlvars[ln], nil)) } + if genDwarfInline > 0 { + inlf := inlvars[ln] + if ln.Class() == PPARAM { + inlf.SetInlFormal(true) + } else { + inlf.SetInlLocal(true) + } + inlf.Pos = ln.Pos + inlfvars = append(inlfvars, inlf) + } } // temporaries for return values. var retvars []*Node for i, t := range fn.Type.Results().Fields().Slice() { var m *Node + var mpos src.XPos if t != nil && asNode(t.Nname) != nil && !isblank(asNode(t.Nname)) { + mpos = asNode(t.Nname).Pos m = inlvar(asNode(t.Nname)) m = typecheck(m, Erv) inlvars[asNode(t.Nname)] = m @@ -646,6 +900,17 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { m = retvar(t, i) } + if genDwarfInline > 0 { + // Don't update the src.Pos on a return variable if it + // was manufactured by the inliner (e.g. "~r2"); such vars + // were not part of the original callee. + if !strings.HasPrefix(m.Sym.Name, "~r") { + m.SetInlFormal(true) + m.Pos = mpos + inlfvars = append(inlfvars, m) + } + } + ninit.Append(nod(ODCL, m, nil)) retvars = append(retvars, m) } @@ -736,10 +1001,26 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { inlgen++ + parent := -1 + if b := Ctxt.PosTable.Pos(n.Pos).Base(); b != nil { + parent = b.InliningIndex() + } + sort.Sort(byNodeName(dcl)) + newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym()) + + if genDwarfInline > 0 { + if !fn.Sym.Linksym().WasInlined() { + Ctxt.DwFixups.SetPrecursorFunc(fn.Sym.Linksym(), fn) + fn.Sym.Linksym().Set(obj.AttrWasInlined, true) + } + } + subst := inlsubst{ - retlabel: retlabel, - retvars: retvars, - inlvars: inlvars, + retlabel: retlabel, + retvars: retvars, + inlvars: inlvars, + bases: make(map[*src.PosBase]*src.PosBase), + newInlIndex: newIndex, } body := subst.list(fn.Func.Inl) @@ -749,6 +1030,12 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { typecheckslice(body, Etop) + if genDwarfInline > 0 { + for _, v := range inlfvars { + v.Pos = subst.updatedPos(v.Pos) + } + } + //dumplist("ninit post", ninit); call := nod(OINLCALL, nil, nil) @@ -758,51 +1045,24 @@ func mkinlcall1(n *Node, fn *Node, isddd bool) *Node { call.Type = n.Type call.SetTypecheck(1) - // Hide the args from setPos -- the parameters to the inlined - // call already have good line numbers that should be preserved. - args := as.Rlist - as.Rlist.Set(nil) - - // Rewrite the line information for the inlined AST. - parent := -1 - callBase := Ctxt.PosTable.Pos(n.Pos).Base() - if callBase != nil { - parent = callBase.InliningIndex() - } - newIndex := Ctxt.InlTree.Add(parent, n.Pos, fn.Sym.Linksym()) - setpos := &setPos{ - bases: make(map[*src.PosBase]*src.PosBase), - newInlIndex: newIndex, - } - setpos.node(call) - - as.Rlist.Set(args.Slice()) - - //dumplist("call body", body); - - n = call - // transitive inlining // might be nice to do this before exporting the body, // but can't emit the body with inlining expanded. // instead we emit the things that the body needs // and each use must redo the inlining. // luckily these are small. - body = fn.Func.Inl.Slice() - fn.Func.Inl.Set(nil) // prevent infinite recursion (shouldn't happen anyway) inlnodelist(call.Nbody) for _, n := range call.Nbody.Slice() { if n.Op == OINLCALL { inlconv2stmt(n) } } - fn.Func.Inl.Set(body) if Debug['m'] > 2 { - fmt.Printf("%v: After inlining %+v\n\n", n.Line(), n) + fmt.Printf("%v: After inlining %+v\n\n", call.Line(), call) } - return n + return call } // Every time we expand a function we generate a new set of tmpnames, @@ -857,6 +1117,14 @@ type inlsubst struct { retvars []*Node inlvars map[*Node]*Node + + // bases maps from original PosBase to PosBase with an extra + // inlined call frame. + bases map[*src.PosBase]*src.PosBase + + // newInlIndex is the index of the inlined call frame to + // insert for inlined nodes. + newInlIndex int } // list inlines a list of nodes. @@ -904,7 +1172,6 @@ func (subst *inlsubst) node(n *Node) *Node { // dump("Return before substitution", n); case ORETURN: m := nod(OGOTO, subst.retlabel, nil) - m.Ninit.Set(subst.list(n.Ninit)) if len(subst.retvars) != 0 && n.List.Len() != 0 { @@ -930,6 +1197,7 @@ func (subst *inlsubst) node(n *Node) *Node { case OGOTO, OLABEL: m := nod(OXXX, nil, nil) *m = *n + m.Pos = subst.updatedPos(m.Pos) m.Ninit.Set(nil) p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen) m.Left = newname(lookup(p)) @@ -939,6 +1207,7 @@ func (subst *inlsubst) node(n *Node) *Node { m := nod(OXXX, nil, nil) *m = *n + m.Pos = subst.updatedPos(m.Pos) m.Ninit.Set(nil) if n.Op == OCLOSURE { @@ -955,55 +1224,39 @@ func (subst *inlsubst) node(n *Node) *Node { return m } -// setPos is a visitor to update position info with a new inlining index. -type setPos struct { - bases map[*src.PosBase]*src.PosBase - newInlIndex int -} - -func (s *setPos) nodelist(ll Nodes) { - for _, n := range ll.Slice() { - s.node(n) - } -} - -func (s *setPos) node(n *Node) { - if n == nil { - return - } - if n.Op == OLITERAL || n.Op == OTYPE { - if n.Sym != nil { - // This node is not a copy, so don't clobber position. - return - } - } - - // don't clobber names, unless they're freshly synthesized - if n.Op != ONAME || !n.Pos.IsKnown() { - n.Pos = s.updatedPos(n) - } - - s.node(n.Left) - s.node(n.Right) - s.nodelist(n.List) - s.nodelist(n.Rlist) - s.nodelist(n.Ninit) - s.nodelist(n.Nbody) -} - -func (s *setPos) updatedPos(n *Node) src.XPos { - pos := Ctxt.PosTable.Pos(n.Pos) +func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos { + pos := Ctxt.PosTable.Pos(xpos) oldbase := pos.Base() // can be nil - newbase := s.bases[oldbase] + newbase := subst.bases[oldbase] if newbase == nil { - newbase = src.NewInliningBase(oldbase, s.newInlIndex) - pos.SetBase(newbase) - s.bases[oldbase] = newbase + newbase = src.NewInliningBase(oldbase, subst.newInlIndex) + subst.bases[oldbase] = newbase } pos.SetBase(newbase) return Ctxt.PosTable.XPos(pos) } -func (n *Node) isMethodCalledAsFunction() bool { - return n.Left.Op == ONAME && n.Left.Left != nil && n.Left.Left.Op == OTYPE && n.Left.Right != nil && n.Left.Right.Op == ONAME +func cmpNodeName(a, b *Node) bool { + // named before artificial + aart := 0 + if strings.HasPrefix(a.Sym.Name, "~r") { + aart = 1 + } + bart := 0 + if strings.HasPrefix(b.Sym.Name, "~r") { + bart = 1 + } + if aart != bart { + return aart < bart + } + + // otherwise sort by name + return a.Sym.Name < b.Sym.Name } + +// byNodeName implements sort.Interface for []*Node using cmpNodeName. +type byNodeName []*Node + +func (s byNodeName) Len() int { return len(s) } +func (s byNodeName) Less(i, j int) bool { return cmpNodeName(s[i], s[j]) } +func (s byNodeName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/gc/inl_test.go new file mode 100644 index 00000000000..3e6da2ed7bb --- /dev/null +++ b/src/cmd/compile/internal/gc/inl_test.go @@ -0,0 +1,215 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gc + +import ( + "bufio" + "internal/testenv" + "io" + "os/exec" + "regexp" + "runtime" + "strings" + "testing" +) + +// TestIntendedInlining tests that specific runtime functions are inlined. +// This allows refactoring for code clarity and re-use without fear that +// changes to the compiler will cause silent performance regressions. +func TestIntendedInlining(t *testing.T) { + if testing.Short() && testenv.Builder() == "" { + t.Skip("skipping in short mode") + } + testenv.MustHaveGoRun(t) + t.Parallel() + + // want is the list of function names (by package) that should + // be inlined. + want := map[string][]string{ + "runtime": { + // TODO(mvdan): enable these once mid-stack + // inlining is available + // "adjustctxt", + + "add", + "acquirem", + "add1", + "addb", + "adjustpanics", + "adjustpointer", + "bucketMask", + "bucketShift", + "chanbuf", + "deferArgs", + "deferclass", + "evacuated", + "fastlog2", + "fastrand", + "float64bits", + "funcPC", + "getm", + "isDirectIface", + "itabHashFunc", + "maxSliceCap", + "noescape", + "readUnaligned32", + "readUnaligned64", + "releasem", + "round", + "roundupsize", + "selectsize", + "stringStructOf", + "subtract1", + "subtractb", + "tophash", + "totaldefersize", + "(*bmap).keys", + "(*bmap).overflow", + "(*waitq).enqueue", + + // GC-related ones + "cgoInRange", + "gclinkptr.ptr", + "guintptr.ptr", + "heapBits.bits", + "heapBits.isPointer", + "heapBits.morePointers", + "heapBits.next", + "heapBitsForAddr", + "inheap", + "markBits.isMarked", + "muintptr.ptr", + "puintptr.ptr", + "spanOfUnchecked", + "(*gcWork).putFast", + "(*gcWork).tryGetFast", + "(*guintptr).set", + "(*markBits).advance", + "(*mspan).allocBitsForIndex", + "(*mspan).base", + "(*mspan).markBitsForBase", + "(*mspan).markBitsForIndex", + "(*muintptr).set", + "(*puintptr).set", + }, + "runtime/internal/sys": {}, + "bytes": { + "(*Buffer).Bytes", + "(*Buffer).Cap", + "(*Buffer).Len", + "(*Buffer).Next", + "(*Buffer).Read", + "(*Buffer).ReadByte", + "(*Buffer).Reset", + "(*Buffer).String", + "(*Buffer).UnreadByte", + "(*Buffer).tryGrowByReslice", + }, + "unicode/utf8": { + "FullRune", + "FullRuneInString", + "RuneLen", + "ValidRune", + }, + "reflect": { + "Value.CanAddr", + "Value.CanSet", + "Value.IsValid", + "add", + "align", + "flag.kind", + "flag.ro", + + // TODO: these use panic, need mid-stack + // inlining + // "Value.CanInterface", + // "Value.pointer", + // "flag.mustBe", + // "flag.mustBeAssignable", + // "flag.mustBeExported", + }, + "regexp": { + "(*bitState).push", + }, + } + + if runtime.GOARCH != "386" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" { + // nextFreeFast calls sys.Ctz64, which on 386 is implemented in asm and is not inlinable. + // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386. + // On MIPS64x, Ctz64 is not intrinsified and causes nextFreeFast too expensive to inline + // (Issue 22239). + want["runtime"] = append(want["runtime"], "nextFreeFast") + } + if runtime.GOARCH != "386" { + // As explained above, Ctz64 and Ctz32 are not Go code on 386. + // The same applies to Bswap32. + want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz64") + want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz32") + want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32") + } + switch runtime.GOARCH { + case "amd64", "amd64p32", "arm64", "mips64", "mips64le", "ppc64", "ppc64le", "s390x": + // rotl_31 is only defined on 64-bit architectures + want["runtime"] = append(want["runtime"], "rotl_31") + } + + notInlinedReason := make(map[string]string) + pkgs := make([]string, 0, len(want)) + for pname, fnames := range want { + pkgs = append(pkgs, pname) + for _, fname := range fnames { + fullName := pname + "." + fname + if _, ok := notInlinedReason[fullName]; ok { + t.Errorf("duplicate func: %s", fullName) + } + notInlinedReason[fullName] = "unknown reason" + } + } + + args := append([]string{"build", "-a", "-gcflags=all=-m -m"}, pkgs...) + cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), args...)) + pr, pw := io.Pipe() + cmd.Stdout = pw + cmd.Stderr = pw + cmdErr := make(chan error, 1) + go func() { + cmdErr <- cmd.Run() + pw.Close() + }() + scanner := bufio.NewScanner(pr) + curPkg := "" + canInline := regexp.MustCompile(`: can inline ([^ ]*)`) + cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "# ") { + curPkg = line[2:] + continue + } + if m := canInline.FindStringSubmatch(line); m != nil { + fname := m[1] + delete(notInlinedReason, curPkg+"."+fname) + continue + } + if m := cannotInline.FindStringSubmatch(line); m != nil { + fname, reason := m[1], m[2] + fullName := curPkg + "." + fname + if _, ok := notInlinedReason[fullName]; ok { + // cmd/compile gave us a reason why + notInlinedReason[fullName] = reason + } + continue + } + } + if err := <-cmdErr; err != nil { + t.Fatal(err) + } + if err := scanner.Err(); err != nil { + t.Fatal(err) + } + for fullName, reason := range notInlinedReason { + t.Errorf("%s was not inlined: %s", fullName, reason) + } +} diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index 2b61564ad8d..b651c9acb3a 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -11,6 +11,7 @@ import ( "bytes" "cmd/compile/internal/ssa" "cmd/compile/internal/types" + "cmd/internal/dwarf" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" @@ -43,7 +44,12 @@ var ( Debug_slice int Debug_vlog bool Debug_wb int + Debug_eagerwb int Debug_pctab string + Debug_locationlist int + Debug_typecheckinl int + Debug_gendwarfinl int + Debug_softfloat int ) // Debug arguments. @@ -67,8 +73,13 @@ var debugtab = []struct { {"slice", "print information about slice compilation", &Debug_slice}, {"typeassert", "print information about type assertion inlining", &Debug_typeassert}, {"wb", "print information about write barriers", &Debug_wb}, + {"eagerwb", "use unbuffered write barrier", &Debug_eagerwb}, {"export", "print export data", &Debug_export}, {"pctab", "print named pc-value table", &Debug_pctab}, + {"locationlists", "print information about DWARF location list creation", &Debug_locationlist}, + {"typecheckinl", "eager typechecking of inline function bodies", &Debug_typecheckinl}, + {"dwarfinl", "print information about DWARF inlined function creation", &Debug_gendwarfinl}, + {"softfloat", "force compiler to emit soft-float code", &Debug_softfloat}, } const debugHelpHeader = `usage: -d arg[,arg]* and arg is [=] @@ -102,19 +113,6 @@ func hidePanic() { } } -func doversion() { - p := objabi.Expstring() - if p == objabi.DefaultExpstring() { - p = "" - } - sep := "" - if p != "" { - sep = " " - } - fmt.Printf("compile version %s%s%s\n", objabi.Version, sep, p) - os.Exit(0) -} - // supportsDynlink reports whether or not the code generator for the given // architecture supports the -shared and -dynlink flags. func supportsDynlink(arch *sys.Arch) bool { @@ -125,6 +123,8 @@ func supportsDynlink(arch *sys.Arch) bool { var timings Timings var benchfile string +var nowritebarrierrecCheck *nowritebarrierrecChecker + // Main parses flags and Go source files specified in the command-line // arguments, type-checks the parsed Go package, compiles functions to machine // code, and finally writes the compiled package definition to disk. @@ -137,6 +137,7 @@ func Main(archInit func(*Arch)) { Ctxt = obj.Linknew(thearch.LinkArch) Ctxt.DiagFunc = yyerror + Ctxt.DiagFlush = flusherrors Ctxt.Bso = bufio.NewWriter(os.Stdout) localpkg = types.NewPkg("", "") @@ -182,9 +183,10 @@ func Main(archInit func(*Arch)) { objabi.Flagcount("E", "debug symbol export", &Debug['E']) objabi.Flagfn1("I", "add `directory` to import search path", addidir) objabi.Flagcount("K", "debug missing line numbers", &Debug['K']) + objabi.Flagcount("L", "show full file names in error messages", &Debug['L']) objabi.Flagcount("N", "disable optimizations", &Debug['N']) flag.BoolVar(&Debug_asm, "S", false, "print assembly listing") - objabi.Flagfn0("V", "print compiler version", doversion) + objabi.AddVersionFlag() // -V objabi.Flagcount("W", "debug parse tree after type checking", &Debug['W']) flag.StringVar(&asmhdr, "asmhdr", "", "write assembly header to `file`") flag.StringVar(&buildid, "buildid", "", "record `id` as the build id in the export metadata") @@ -192,6 +194,8 @@ func Main(archInit func(*Arch)) { flag.BoolVar(&pure_go, "complete", false, "compiling complete package (no C or assembly)") flag.StringVar(&debugstr, "d", "", "print debug information about items in `list`; try -d help") flag.BoolVar(&flagDWARF, "dwarf", true, "generate DWARF symbols") + flag.BoolVar(&Ctxt.Flag_locationlists, "dwarflocationlists", false, "add location lists to DWARF in optimized mode") + flag.IntVar(&genDwarfInline, "gendwarfinl", 2, "generate DWARF inline info records") objabi.Flagcount("e", "no limit on number of errors reported", &Debug['e']) objabi.Flagcount("f", "debug stack frames", &Debug['f']) objabi.Flagcount("h", "halt on error", &Debug['h']) @@ -235,6 +239,11 @@ func Main(archInit func(*Arch)) { flag.StringVar(&benchfile, "bench", "", "append benchmark times to `file`") objabi.Flagparse(usage) + // Record flags that affect the build result. (And don't + // record flags that don't, since that would cause spurious + // changes in the binary.) + recordFlags("B", "N", "l", "msan", "race", "shared", "dynlink", "dwarflocationlists") + Ctxt.Flag_shared = flag_dynlink || flag_shared Ctxt.Flag_dynlink = flag_dynlink Ctxt.Flag_optimize = Debug['N'] == 0 @@ -243,6 +252,11 @@ func Main(archInit func(*Arch)) { Ctxt.Debugvlog = Debug_vlog if flagDWARF { Ctxt.DebugInfo = debuginfo + Ctxt.GenAbstractFunc = genAbstractFunc + Ctxt.DwFixups = obj.NewDwarfFixupTable(Ctxt) + } else { + // turn off inline generation if no dwarf at all + genDwarfInline = 0 } if flag.NArg() < 1 && debugstr != "help" && debugstr != "ssa/help" { @@ -298,6 +312,9 @@ func Main(archInit func(*Arch)) { if nBackendWorkers > 1 && !concurrentBackendAllowed() { log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args) } + if Ctxt.Flag_locationlists && len(Ctxt.Arch.DWARFRegisters) == 0 { + log.Fatalf("location lists requested but register mapping not available on %v", Ctxt.Arch.Name) + } // parse -d argument if debugstr != "" { @@ -374,16 +391,29 @@ func Main(archInit func(*Arch)) { // set via a -d flag Ctxt.Debugpcln = Debug_pctab + if flagDWARF { + dwarf.EnableLogging(Debug_gendwarfinl != 0) + } + + if Debug_softfloat != 0 { + thearch.SoftFloat = true + } // enable inlining. for now: // default: inlining on. (debug['l'] == 1) // -l: inlining off (debug['l'] == 0) - // -ll, -lll: inlining on again, with extra debugging (debug['l'] > 1) + // -l=2, -l=3: inlining on again, with extra debugging (debug['l'] > 1) if Debug['l'] <= 1 { Debug['l'] = 1 - Debug['l'] } - trackScopes = flagDWARF && Debug['l'] == 0 && Debug['N'] != 0 + // The buffered write barrier is only implemented on amd64 + // right now. + if objabi.GOARCH != "amd64" { + Debug_eagerwb = 1 + } + + trackScopes = flagDWARF && ((Debug['l'] == 0 && Debug['N'] != 0) || Ctxt.Flag_locationlists) Widthptr = thearch.LinkArch.PtrSize Widthreg = thearch.LinkArch.RegSize @@ -488,6 +518,8 @@ func Main(archInit func(*Arch)) { fcount++ } } + // With all types ckecked, it's now safe to verify map keys. + checkMapKeys() timings.AddEvent(fcount, "funcs") // Phase 4: Decide how to capture closed variables. @@ -510,7 +542,7 @@ func Main(archInit func(*Arch)) { // Phase 5: Inlining timings.Start("fe", "inlining") - if Debug['l'] > 1 { + if Debug_typecheckinl != 0 { // Typecheck imported function bodies if debug['l'] > 1, // otherwise lazily when used or re-exported. for _, n := range importlist { @@ -553,6 +585,14 @@ func Main(archInit func(*Arch)) { escapes(xtop) if dolinkobj { + // Collect information for go:nowritebarrierrec + // checking. This must happen before transformclosure. + // We'll do the final check after write barriers are + // inserted. + if compiling_runtime { + nowritebarrierrecCheck = newNowritebarrierrecChecker() + } + // Phase 7: Transform closure bodies to properly reference captured variables. // This needs to happen before walk, because closures must be transformed // before walk reaches a call of a closure. @@ -601,8 +641,20 @@ func Main(archInit func(*Arch)) { // at least until this convoluted structure has been unwound. nBackendWorkers = 1 - if compiling_runtime { - checknowritebarrierrec() + if nowritebarrierrecCheck != nil { + // Write barriers are now known. Check the + // call graph. + nowritebarrierrecCheck.check() + nowritebarrierrecCheck = nil + } + + // Finalize DWARF inline routine DIEs, then explicitly turn off + // DWARF inlining gen so as to avoid problems with generated + // method wrappers. + if Ctxt.DwFixups != nil { + Ctxt.DwFixups.Finalize(myimportpath, Debug_gendwarfinl != 0) + Ctxt.DwFixups = nil + genDwarfInline = 0 } // Check whether any of the functions we have compiled have gigantic stack frames. @@ -610,7 +662,7 @@ func Main(archInit func(*Arch)) { return largeStackFrames[i].Before(largeStackFrames[j]) }) for _, largePos := range largeStackFrames { - yyerrorl(largePos, "stack frame too large (>2GB)") + yyerrorl(largePos, "stack frame too large (>1GB)") } } @@ -763,7 +815,7 @@ func isDriveLetter(b byte) bool { return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z' } -// is this path a local name? begins with ./ or ../ or / +// is this path a local name? begins with ./ or ../ or / func islocalname(name string) bool { return strings.HasPrefix(name, "/") || runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' || @@ -868,7 +920,7 @@ func loadsys() { n.Type = typ declare(n, PFUNC) case varTag: - importvar(Runtimepkg, sym, typ) + importvar(lineno, Runtimepkg, sym, typ) default: Fatalf("unhandled declaration tag %v", d.tag) } @@ -1178,8 +1230,8 @@ func concurrentBackendAllowed() bool { if Debug_vlog || debugstr != "" || debuglive > 0 { return false } - // TODO: test and add builders for GOEXPERIMENT values, and enable - if os.Getenv("GOEXPERIMENT") != "" { + // TODO: Test and delete these conditions. + if objabi.Fieldtrack_enabled != 0 || objabi.Preemptibleloops_enabled != 0 || objabi.Clobberdead_enabled != 0 { return false } // TODO: fix races and enable the following flags @@ -1188,3 +1240,58 @@ func concurrentBackendAllowed() bool { } return true } + +// recordFlags records the specified command-line flags to be placed +// in the DWARF info. +func recordFlags(flags ...string) { + if myimportpath == "" { + // We can't record the flags if we don't know what the + // package name is. + return + } + + type BoolFlag interface { + IsBoolFlag() bool + } + type CountFlag interface { + IsCountFlag() bool + } + var cmd bytes.Buffer + for _, name := range flags { + f := flag.Lookup(name) + if f == nil { + continue + } + getter := f.Value.(flag.Getter) + if getter.String() == f.DefValue { + // Flag has default value, so omit it. + continue + } + if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() { + val, ok := getter.Get().(bool) + if ok && val { + fmt.Fprintf(&cmd, " -%s", f.Name) + continue + } + } + if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() { + val, ok := getter.Get().(int) + if ok && val == 1 { + fmt.Fprintf(&cmd, " -%s", f.Name) + continue + } + } + fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get()) + } + + if cmd.Len() == 0 { + return + } + s := Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + myimportpath) + s.Type = objabi.SDWARFINFO + // Sometimes (for example when building tests) we can link + // together two package main archives. So allow dups. + s.Set(obj.AttrDuplicateOK, true) + Ctxt.Data = append(Ctxt.Data, s) + s.P = cmd.Bytes()[1:] +} diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 54c48434cc7..dcd5f20dfd2 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -19,7 +19,6 @@ import ( ) func parseFiles(filenames []string) uint { - var lines uint var noders []*noder // Limit the number of simultaneously open files. sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10) @@ -45,6 +44,7 @@ func parseFiles(filenames []string) uint { }(filename) } + var lines uint for _, p := range noders { for e := range p.err { yyerrorpos(e.Pos, "%s", e.Msg) @@ -87,15 +87,15 @@ type noder struct { scope ScopeID } -func (p *noder) funchdr(n *Node, pos src.Pos) ScopeID { +func (p *noder) funchdr(n *Node) ScopeID { old := p.scope p.scope = 0 funchdr(n) return old } -func (p *noder) funcbody(n *Node, pos src.Pos, old ScopeID) { - funcbody(n) +func (p *noder) funcbody(old ScopeID) { + funcbody() p.scope = old } @@ -224,15 +224,14 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) { pack.Sym = my pack.Name.Pkg = ipkg - if my.Name == "." { + switch my.Name { + case ".": importdot(ipkg, pack) return - } - if my.Name == "init" { + case "init": yyerrorl(pack.Pos, "cannot import package as init - init must be a func") return - } - if my.Name == "_" { + case "_": return } if my.Def != nil { @@ -322,7 +321,6 @@ func (p *noder) typeDecl(decl *syntax.TypeDecl) *Node { n := p.declName(decl.Name) n.Op = OTYPE declare(n, dclcontext) - n.SetLocal(true) // decl.Type may be nil but in that case we got a syntax error during parsing typ := p.typeExprOrNil(decl.Type) @@ -391,9 +389,8 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { declare(f.Func.Nname, PFUNC) } - oldScope := p.funchdr(f, fun.Pos()) + oldScope := p.funchdr(f) - endPos := fun.Pos() if fun.Body != nil { if f.Noescape() { yyerrorl(f.Pos, "can only use //go:noescape with external func implementations") @@ -405,7 +402,6 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { } f.Nbody.Set(body) - endPos = fun.Body.Rbrace lineno = Ctxt.PosTable.XPos(fun.Body.Rbrace) f.Func.Endlineno = lineno } else { @@ -414,7 +410,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { } } - p.funcbody(f, endPos, oldScope) + p.funcbody(oldScope) return f } @@ -541,6 +537,9 @@ func (p *noder) expr(expr syntax.Expr) *Node { // ntype? Shrug, doesn't matter here. return p.nod(expr, ODOTTYPE, p.expr(expr.X), p.expr(expr.Type)) case *syntax.Operation: + if expr.Op == syntax.Add && expr.Y != nil { + return p.sum(expr) + } x := p.expr(expr.X) if expr.Y == nil { if expr.Op == syntax.And { @@ -601,6 +600,82 @@ func (p *noder) expr(expr syntax.Expr) *Node { panic("unhandled Expr") } +// sum efficiently handles very large summation expressions (such as +// in issue #16394). In particular, it avoids left recursion and +// collapses string literals. +func (p *noder) sum(x syntax.Expr) *Node { + // While we need to handle long sums with asymptotic + // efficiency, the vast majority of sums are very small: ~95% + // have only 2 or 3 operands, and ~99% of string literals are + // never concatenated. + + adds := make([]*syntax.Operation, 0, 2) + for { + add, ok := x.(*syntax.Operation) + if !ok || add.Op != syntax.Add || add.Y == nil { + break + } + adds = append(adds, add) + x = add.X + } + + // nstr is the current rightmost string literal in the + // summation (if any), and chunks holds its accumulated + // substrings. + // + // Consider the expression x + "a" + "b" + "c" + y. When we + // reach the string literal "a", we assign nstr to point to + // its corresponding Node and initialize chunks to {"a"}. + // Visiting the subsequent string literals "b" and "c", we + // simply append their values to chunks. Finally, when we + // reach the non-constant operand y, we'll join chunks to form + // "abc" and reassign the "a" string literal's value. + // + // N.B., we need to be careful about named string constants + // (indicated by Sym != nil) because 1) we can't modify their + // value, as doing so would affect other uses of the string + // constant, and 2) they may have types, which we need to + // handle correctly. For now, we avoid these problems by + // treating named string constants the same as non-constant + // operands. + var nstr *Node + chunks := make([]string, 0, 1) + + n := p.expr(x) + if Isconst(n, CTSTR) && n.Sym == nil { + nstr = n + chunks = append(chunks, nstr.Val().U.(string)) + } + + for i := len(adds) - 1; i >= 0; i-- { + add := adds[i] + + r := p.expr(add.Y) + if Isconst(r, CTSTR) && r.Sym == nil { + if nstr != nil { + // Collapse r into nstr instead of adding to n. + chunks = append(chunks, r.Val().U.(string)) + continue + } + + nstr = r + chunks = append(chunks, nstr.Val().U.(string)) + } else { + if len(chunks) > 1 { + nstr.SetVal(Val{U: strings.Join(chunks, "")}) + } + nstr = nil + chunks = chunks[:0] + } + n = p.nod(add, OADD, n, r) + } + if len(chunks) > 1 { + nstr.SetVal(Val{U: strings.Join(chunks, "")}) + } + + return n +} + func (p *noder) typeExpr(typ syntax.Expr) *Node { // TODO(mdempsky): Be stricter? typecheck should handle errors anyway. return p.expr(typ) @@ -700,7 +775,11 @@ func (p *noder) embedded(typ syntax.Expr) *Node { } typ = op.X } - n := embedded(p.packname(typ), localpkg) + + sym := p.packname(typ) + n := nod(ODCLFIELD, newname(lookup(sym.Name)), oldname(sym)) + n.SetEmbedded(true) + if isStar { n.Right = p.nod(op, OIND, n.Right, nil) } @@ -708,9 +787,13 @@ func (p *noder) embedded(typ syntax.Expr) *Node { } func (p *noder) stmts(stmts []syntax.Stmt) []*Node { + return p.stmtsFall(stmts, false) +} + +func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []*Node { var nodes []*Node - for _, stmt := range stmts { - s := p.stmt(stmt) + for i, stmt := range stmts { + s := p.stmtFall(stmt, fallOK && i+1 == len(stmts)) if s == nil { } else if s.Op == OBLOCK && s.Ninit.Len() == 0 { nodes = append(nodes, s.List.Slice()...) @@ -722,12 +805,16 @@ func (p *noder) stmts(stmts []syntax.Stmt) []*Node { } func (p *noder) stmt(stmt syntax.Stmt) *Node { + return p.stmtFall(stmt, false) +} + +func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) *Node { p.lineno(stmt) switch stmt := stmt.(type) { case *syntax.EmptyStmt: return nil case *syntax.LabeledStmt: - return p.labeledStmt(stmt) + return p.labeledStmt(stmt, fallOK) case *syntax.BlockStmt: l := p.blockStmt(stmt) if len(l) == 0 { @@ -749,15 +836,10 @@ func (p *noder) stmt(stmt syntax.Stmt) *Node { return n } - lhs := p.exprList(stmt.Lhs) - rhs := p.exprList(stmt.Rhs) - n := p.nod(stmt, OAS, nil, nil) // assume common case - if stmt.Op == syntax.Def { - n.SetColas(true) - colasdefn(lhs, n) // modifies lhs, call before using lhs[0] in common case - } + rhs := p.exprList(stmt.Rhs) + lhs := p.assignList(stmt.Lhs, n, stmt.Op == syntax.Def) if len(lhs) == 1 && len(rhs) == 1 { // common case @@ -778,7 +860,10 @@ func (p *noder) stmt(stmt syntax.Stmt) *Node { case syntax.Continue: op = OCONTINUE case syntax.Fallthrough: - op = OXFALL + if !fallOK { + yyerror("fallthrough statement out of place") + } + op = OFALL case syntax.Goto: op = OGOTO default: @@ -788,9 +873,6 @@ func (p *noder) stmt(stmt syntax.Stmt) *Node { if stmt.Label != nil { n.Left = p.newname(stmt.Label) } - if op == OXFALL { - n.Xoffset = int64(types.Block) - } return n case *syntax.CallStmt: var op Op @@ -836,6 +918,66 @@ func (p *noder) stmt(stmt syntax.Stmt) *Node { panic("unhandled Stmt") } +func (p *noder) assignList(expr syntax.Expr, defn *Node, colas bool) []*Node { + if !colas { + return p.exprList(expr) + } + + defn.SetColas(true) + + var exprs []syntax.Expr + if list, ok := expr.(*syntax.ListExpr); ok { + exprs = list.ElemList + } else { + exprs = []syntax.Expr{expr} + } + + res := make([]*Node, len(exprs)) + seen := make(map[*types.Sym]bool, len(exprs)) + + newOrErr := false + for i, expr := range exprs { + p.lineno(expr) + res[i] = nblank + + name, ok := expr.(*syntax.Name) + if !ok { + yyerrorpos(expr.Pos(), "non-name %v on left side of :=", p.expr(expr)) + newOrErr = true + continue + } + + sym := p.name(name) + if sym.IsBlank() { + continue + } + + if seen[sym] { + yyerrorpos(expr.Pos(), "%v repeated on left side of :=", sym) + newOrErr = true + continue + } + seen[sym] = true + + if sym.Block == types.Block { + res[i] = oldname(sym) + continue + } + + newOrErr = true + n := newname(sym) + declare(n, dclcontext) + n.Name.Defn = defn + defn.Ninit.Append(nod(ODCL, n, nil)) + res[i] = n + } + + if !newOrErr { + yyerrorl(defn.Pos, "no new variables on left side of :=") + } + return res +} + func (p *noder) blockStmt(stmt *syntax.BlockStmt) []*Node { p.openScope(stmt.Pos()) nodes := p.stmts(stmt.List) @@ -875,12 +1017,7 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) *Node { n = p.nod(r, ORANGE, nil, p.expr(r.X)) if r.Lhs != nil { - lhs := p.exprList(r.Lhs) - n.List.Set(lhs) - if r.Def { - n.SetColas(true) - colasdefn(lhs, n) - } + n.List.Set(p.assignList(r.Lhs, n, r.Def)) } } else { n = p.nod(stmt, OFOR, nil, nil) @@ -910,7 +1047,7 @@ func (p *noder) switchStmt(stmt *syntax.SwitchStmt) *Node { } tswitch := n.Left - if tswitch != nil && (tswitch.Op != OTYPESW || tswitch.Left == nil) { + if tswitch != nil && tswitch.Op != OTYPESW { tswitch = nil } n.List.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)) @@ -932,15 +1069,35 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *Node, rbrace if clause.Cases != nil { n.List.Set(p.exprList(clause.Cases)) } - if tswitch != nil { + if tswitch != nil && tswitch.Left != nil { nn := newname(tswitch.Left.Sym) declare(nn, dclcontext) n.Rlist.Set1(nn) // keep track of the instances for reporting unused nn.Name.Defn = tswitch } - n.Xoffset = int64(types.Block) - n.Nbody.Set(p.stmts(clause.Body)) + + // Trim trailing empty statements. We omit them from + // the Node AST anyway, and it's easier to identify + // out-of-place fallthrough statements without them. + body := clause.Body + for len(body) > 0 { + if _, ok := body[len(body)-1].(*syntax.EmptyStmt); !ok { + break + } + body = body[:len(body)-1] + } + + n.Nbody.Set(p.stmtsFall(body, true)) + if l := n.Nbody.Len(); l > 0 && n.Nbody.Index(l-1).Op == OFALL { + if tswitch != nil { + yyerror("cannot fallthrough in type switch") + } + if i+1 == len(clauses) { + yyerror("cannot fallthrough final case in switch") + } + } + nodes = append(nodes, n) } if len(clauses) > 0 { @@ -968,7 +1125,6 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace src.Pos) []*Nod if clause.Comm != nil { n.List.Set1(p.stmt(clause.Comm)) } - n.Xoffset = int64(types.Block) n.Nbody.Set(p.stmts(clause.Body)) nodes = append(nodes, n) } @@ -978,12 +1134,12 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace src.Pos) []*Nod return nodes } -func (p *noder) labeledStmt(label *syntax.LabeledStmt) *Node { +func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) *Node { lhs := p.nod(label, OLABEL, p.newname(label.Label), nil) var ls *Node if label.Stmt != nil { // TODO(mdempsky): Should always be present. - ls = p.stmt(label.Stmt) + ls = p.stmtFall(label.Stmt, fallOK) } lhs.Name.Defn = ls diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go index 83e64e728e9..874c59cb448 100644 --- a/src/cmd/compile/internal/gc/obj.go +++ b/src/cmd/compile/internal/gc/obj.go @@ -9,6 +9,7 @@ import ( "cmd/internal/bio" "cmd/internal/obj" "cmd/internal/objabi" + "cmd/internal/src" "crypto/sha256" "fmt" "io" @@ -16,9 +17,7 @@ import ( ) // architecture-independent object file output -const ( - ArhdrSize = 60 -) +const ArhdrSize = 60 func formathdr(arhdr []byte, name string, size int64) { copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size)) @@ -56,13 +55,13 @@ func dumpobj() { } func dumpobj1(outfile string, mode int) { - var err error - bout, err = bio.Create(outfile) + bout, err := bio.Create(outfile) if err != nil { flusherrors() fmt.Printf("can't create %s: %v\n", outfile, err) errorexit() } + defer bout.Close() startobj := int64(0) var arhdr [ArhdrSize]byte @@ -92,7 +91,7 @@ func dumpobj1(outfile string, mode int) { printheader() if mode&modeCompilerObj != 0 { - dumpexport() + dumpexport(bout) } if writearchive { @@ -109,7 +108,6 @@ func dumpobj1(outfile string, mode int) { } if mode&modeLinkerObj == 0 { - bout.Close() return } @@ -171,8 +169,6 @@ func dumpobj1(outfile string, mode int) { formathdr(arhdr[:], "_go_.o", size) bout.Write(arhdr[:]) } - - bout.Close() } func addptabs() { @@ -204,24 +200,68 @@ func addptabs() { } } +func dumpGlobal(n *Node) { + if n.Type == nil { + Fatalf("external %v nil type\n", n) + } + if n.Class() == PFUNC { + return + } + if n.Sym.Pkg != localpkg { + return + } + dowidth(n.Type) + ggloblnod(n) +} + +func dumpGlobalConst(n *Node) { + // only export typed constants + t := n.Type + if t == nil { + return + } + if n.Sym.Pkg != localpkg { + return + } + // only export integer constants for now + switch t.Etype { + case TINT8: + case TINT16: + case TINT32: + case TINT64: + case TINT: + case TUINT8: + case TUINT16: + case TUINT32: + case TUINT64: + case TUINT: + case TUINTPTR: + // ok + case TIDEAL: + if !Isconst(n, CTINT) { + return + } + x := n.Val().U.(*Mpint) + if x.Cmp(minintval[TINT]) < 0 || x.Cmp(maxintval[TINT]) > 0 { + return + } + // Ideal integers we export as int (if they fit). + t = types.Types[TINT] + default: + return + } + Ctxt.DwarfIntConst(myimportpath, n.Sym.Name, typesymname(t), n.Int64()) +} + func dumpglobls() { // add globals for _, n := range externdcl { - if n.Op != ONAME { - continue + switch n.Op { + case ONAME: + dumpGlobal(n) + case OLITERAL: + dumpGlobalConst(n) } - - if n.Type == nil { - Fatalf("external %v nil type\n", n) - } - if n.Class() == PFUNC { - continue - } - if n.Sym.Pkg != localpkg { - continue - } - dowidth(n.Type) - ggloblnod(n) } obj.SortSlice(funcsyms, func(i, j int) bool { @@ -291,7 +331,7 @@ func dbvec(s *obj.LSym, off int, bv bvec) int { return off } -func stringsym(s string) (data *obj.LSym) { +func stringsym(pos src.XPos, s string) (data *obj.LSym) { var symname string if len(s) > 100 { // Huge strings are hashed to avoid long names in object files. @@ -312,7 +352,7 @@ func stringsym(s string) (data *obj.LSym) { if !symdata.SeenGlobl() { // string data - off := dsname(symdata, 0, s) + off := dsname(symdata, 0, s, pos, "string") ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL) } @@ -328,7 +368,7 @@ func slicebytes(nam *Node, s string, len int) { sym.Def = asTypesNode(newname(sym)) lsym := sym.Linksym() - off := dsname(lsym, 0, s) + off := dsname(lsym, 0, s, nam.Pos, "slice") ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL) if nam.Op != ONAME { @@ -341,7 +381,15 @@ func slicebytes(nam *Node, s string, len int) { duintptr(nsym, off, uint64(len)) } -func dsname(s *obj.LSym, off int, t string) int { +func dsname(s *obj.LSym, off int, t string, pos src.XPos, what string) int { + // Objects that are too large will cause the data section to overflow right away, + // causing a cryptic error message by the linker. Check for oversize objects here + // and provide a useful error message instead. + if int64(len(t)) > 2e9 { + yyerrorl(pos, "%v with length %v is too big", what, len(t)) + return 0 + } + s.WriteString(Ctxt, int64(off), len(t), t) return off + len(t) } @@ -406,7 +454,7 @@ func gdata(nam *Node, nr *Node, wid int) { } case string: - symdata := stringsym(u) + symdata := stringsym(nam.Pos, u) s.WriteAddr(Ctxt, nam.Xoffset, Widthptr, symdata, 0) s.WriteInt(Ctxt, nam.Xoffset+int64(Widthptr), Widthptr, int64(len(u))) diff --git a/src/cmd/compile/internal/gc/op_string.go b/src/cmd/compile/internal/gc/op_string.go new file mode 100644 index 00000000000..2d4772780e5 --- /dev/null +++ b/src/cmd/compile/internal/gc/op_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Op -trimprefix=O"; DO NOT EDIT. + +package gc + +import "fmt" + +const _Op_name = "XXXNAMENONAMETYPEPACKLITERALADDSUBORXORADDSTRADDRANDANDAPPENDARRAYBYTESTRARRAYBYTESTRTMPARRAYRUNESTRSTRARRAYBYTESTRARRAYBYTETMPSTRARRAYRUNEASAS2AS2FUNCAS2RECVAS2MAPRAS2DOTTYPEASOPCALLCALLFUNCCALLMETHCALLINTERCALLPARTCAPCLOSECLOSURECMPIFACECMPSTRCOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVNOPCOPYDCLDCLFUNCDCLFIELDDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTINDINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMULDIVMODLSHRSHANDANDNOTNEWNOTCOMPLUSMINUSORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRRECOVERRECVRUNESTRSELRECVSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFBLOCKBREAKCASEXCASECONTINUEDEFEREMPTYFALLFORFORUNTILGOTOIFLABELPROCRANGERETURNSELECTSWITCHTYPESWTCHANTMAPTSTRUCTTINTERTFUNCTARRAYDDDDDDARGINLCALLEFACEITABIDATASPTRCLOSUREVARCFUNCCHECKNILVARKILLVARLIVEINDREGSPRETJMPGETGEND" + +var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 36, 39, 45, 49, 55, 61, 73, 88, 100, 112, 127, 139, 141, 144, 151, 158, 165, 175, 179, 183, 191, 199, 208, 216, 219, 224, 231, 239, 245, 252, 258, 267, 275, 283, 289, 293, 302, 309, 313, 316, 323, 331, 339, 346, 352, 355, 361, 368, 376, 380, 387, 395, 397, 399, 401, 403, 405, 407, 410, 415, 423, 426, 435, 438, 442, 450, 457, 466, 469, 472, 475, 478, 481, 484, 490, 493, 496, 499, 503, 508, 512, 517, 522, 528, 533, 537, 542, 550, 558, 564, 573, 580, 584, 591, 598, 606, 610, 614, 618, 625, 632, 640, 646, 651, 656, 660, 665, 673, 678, 683, 687, 690, 698, 702, 704, 709, 713, 718, 724, 730, 736, 742, 747, 751, 758, 764, 769, 775, 778, 784, 791, 796, 800, 805, 809, 819, 824, 832, 839, 846, 854, 860, 864, 867} + +func (i Op) String() string { + if i >= Op(len(_Op_index)-1) { + return fmt.Sprintf("Op(%d)", i) + } + return _Op_name[_Op_index[i]:_Op_index[i+1]] +} diff --git a/src/cmd/compile/internal/gc/opnames.go b/src/cmd/compile/internal/gc/opnames.go deleted file mode 100644 index 09442b595f4..00000000000 --- a/src/cmd/compile/internal/gc/opnames.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gc - -// auto generated by go tool dist -var opnames = []string{ - OXXX: "XXX", - ONAME: "NAME", - ONONAME: "NONAME", - OTYPE: "TYPE", - OPACK: "PACK", - OLITERAL: "LITERAL", - OADD: "ADD", - OSUB: "SUB", - OOR: "OR", - OXOR: "XOR", - OADDSTR: "ADDSTR", - OADDR: "ADDR", - OANDAND: "ANDAND", - OAPPEND: "APPEND", - OARRAYBYTESTR: "ARRAYBYTESTR", - OARRAYBYTESTRTMP: "ARRAYBYTESTRTMP", - OARRAYRUNESTR: "ARRAYRUNESTR", - OSTRARRAYBYTE: "STRARRAYBYTE", - OSTRARRAYBYTETMP: "STRARRAYBYTETMP", - OSTRARRAYRUNE: "STRARRAYRUNE", - OAS: "AS", - OAS2: "AS2", - OAS2FUNC: "AS2FUNC", - OAS2RECV: "AS2RECV", - OAS2MAPR: "AS2MAPR", - OAS2DOTTYPE: "AS2DOTTYPE", - OASOP: "ASOP", - OCALL: "CALL", - OCALLFUNC: "CALLFUNC", - OCALLMETH: "CALLMETH", - OCALLINTER: "CALLINTER", - OCALLPART: "CALLPART", - OCAP: "CAP", - OCLOSE: "CLOSE", - OCLOSURE: "CLOSURE", - OCMPIFACE: "CMPIFACE", - OCMPSTR: "CMPSTR", - OCOMPLIT: "COMPLIT", - OMAPLIT: "MAPLIT", - OSTRUCTLIT: "STRUCTLIT", - OARRAYLIT: "ARRAYLIT", - OSLICELIT: "SLICELIT", - OPTRLIT: "PTRLIT", - OCONV: "CONV", - OCONVIFACE: "CONVIFACE", - OCONVNOP: "CONVNOP", - OCOPY: "COPY", - ODCL: "DCL", - ODCLFUNC: "DCLFUNC", - ODCLFIELD: "DCLFIELD", - ODCLCONST: "DCLCONST", - ODCLTYPE: "DCLTYPE", - ODELETE: "DELETE", - ODOT: "DOT", - ODOTPTR: "DOTPTR", - ODOTMETH: "DOTMETH", - ODOTINTER: "DOTINTER", - OXDOT: "XDOT", - ODOTTYPE: "DOTTYPE", - ODOTTYPE2: "DOTTYPE2", - OEQ: "EQ", - ONE: "NE", - OLT: "LT", - OLE: "LE", - OGE: "GE", - OGT: "GT", - OIND: "IND", - OINDEX: "INDEX", - OINDEXMAP: "INDEXMAP", - OKEY: "KEY", - OSTRUCTKEY: "STRUCTKEY", - OLEN: "LEN", - OMAKE: "MAKE", - OMAKECHAN: "MAKECHAN", - OMAKEMAP: "MAKEMAP", - OMAKESLICE: "MAKESLICE", - OMUL: "MUL", - ODIV: "DIV", - OMOD: "MOD", - OLSH: "LSH", - ORSH: "RSH", - OAND: "AND", - OANDNOT: "ANDNOT", - ONEW: "NEW", - ONOT: "NOT", - OCOM: "COM", - OPLUS: "PLUS", - OMINUS: "MINUS", - OOROR: "OROR", - OPANIC: "PANIC", - OPRINT: "PRINT", - OPRINTN: "PRINTN", - OPAREN: "PAREN", - OSEND: "SEND", - OSLICE: "SLICE", - OSLICEARR: "SLICEARR", - OSLICESTR: "SLICESTR", - OSLICE3: "SLICE3", - OSLICE3ARR: "SLICE3ARR", - ORECOVER: "RECOVER", - ORECV: "RECV", - ORUNESTR: "RUNESTR", - OSELRECV: "SELRECV", - OSELRECV2: "SELRECV2", - OIOTA: "IOTA", - OREAL: "REAL", - OIMAG: "IMAG", - OCOMPLEX: "COMPLEX", - OBLOCK: "BLOCK", - OBREAK: "BREAK", - OCASE: "CASE", - OXCASE: "XCASE", - OCONTINUE: "CONTINUE", - ODEFER: "DEFER", - OEMPTY: "EMPTY", - OFALL: "FALL", - OXFALL: "XFALL", - OFOR: "FOR", - OFORUNTIL: "FORUNTIL", - OGOTO: "GOTO", - OIF: "IF", - OLABEL: "LABEL", - OPROC: "PROC", - ORANGE: "RANGE", - ORETURN: "RETURN", - OSELECT: "SELECT", - OSWITCH: "SWITCH", - OTYPESW: "TYPESW", - OTCHAN: "TCHAN", - OTMAP: "TMAP", - OTSTRUCT: "TSTRUCT", - OTINTER: "TINTER", - OTFUNC: "TFUNC", - OTARRAY: "TARRAY", - ODDD: "DDD", - ODDDARG: "DDDARG", - OINLCALL: "INLCALL", - OEFACE: "EFACE", - OITAB: "ITAB", - OIDATA: "IDATA", - OSPTR: "SPTR", - OCLOSUREVAR: "CLOSUREVAR", - OCFUNC: "CFUNC", - OCHECKNIL: "CHECKNIL", - OVARKILL: "VARKILL", - OVARLIVE: "VARLIVE", - OINDREGSP: "INDREGSP", - ORETJMP: "RETJMP", - OGETG: "GETG", - OEND: "END", -} diff --git a/src/cmd/compile/internal/gc/order.go b/src/cmd/compile/internal/gc/order.go index cdda2f3486e..de89adf0e0e 100644 --- a/src/cmd/compile/internal/gc/order.go +++ b/src/cmd/compile/internal/gc/order.go @@ -187,7 +187,7 @@ func isaddrokay(n *Node) bool { // The result of orderaddrtemp MUST be assigned back to n, e.g. // n.Left = orderaddrtemp(n.Left, order) func orderaddrtemp(n *Node, order *Order) *Node { - if consttype(n) >= 0 { + if consttype(n) > 0 { // TODO: expand this to all static composite literal nodes? n = defaultlit(n, nil) dowidth(n.Type) @@ -235,18 +235,16 @@ func poptemp(mark ordermarker, order *Order) { // above the mark on the temporary stack, but it does not pop them // from the stack. func cleantempnopop(mark ordermarker, order *Order, out *[]*Node) { - var kill *Node - for i := len(order.temp) - 1; i >= int(mark); i-- { n := order.temp[i] if n.Name.Keepalive() { n.Name.SetKeepalive(false) n.SetAddrtaken(true) // ensure SSA keeps the n variable - kill = nod(OVARLIVE, n, nil) + kill := nod(OVARLIVE, n, nil) kill = typecheck(kill, Etop) *out = append(*out, kill) } - kill = nod(OVARKILL, n, nil) + kill := nod(OVARKILL, n, nil) kill = typecheck(kill, Etop) *out = append(*out, kill) } @@ -346,14 +344,14 @@ func ismulticall(l Nodes) bool { } // call must return multiple values - return n.Left.Type.Results().NumFields() > 1 + return n.Left.Type.NumResults() > 1 } // Copyret emits t1, t2, ... = n, where n is a function call, // and then returns the list t1, t2, .... func copyret(n *Node, order *Order) []*Node { if !n.Type.IsFuncArgStruct() { - Fatalf("copyret %v %d", n.Type, n.Left.Type.Results().NumFields()) + Fatalf("copyret %v %d", n.Type, n.Left.Type.NumResults()) } var l1 []*Node @@ -429,10 +427,10 @@ func ordercall(n *Node, order *Order) { // to make sure that all map assignments have the form m[k] = x. // (Note: orderexpr has already been called on n, so we know k is addressable.) // -// If n is the multiple assignment form ..., m[k], ... = ..., the rewrite is +// If n is the multiple assignment form ..., m[k], ... = ..., x, ..., the rewrite is // t1 = m // t2 = k -// ...., t3, ... = x +// ...., t3, ... = ..., x, ... // t1[t2] = t3 // // The temporaries t1, t2 are needed in case the ... being assigned @@ -446,30 +444,29 @@ func ordermapassign(n *Node, order *Order) { Fatalf("ordermapassign %v", n.Op) case OAS: + if n.Left.Op == OINDEXMAP { + // Make sure we evaluate the RHS before starting the map insert. + // We need to make sure the RHS won't panic. See issue 22881. + n.Right = ordercheapexpr(n.Right, order) + } order.out = append(order.out, n) case OAS2, OAS2DOTTYPE, OAS2MAPR, OAS2FUNC: var post []*Node - var m *Node - var a *Node - for i1, n1 := range n.List.Slice() { - if n1.Op == OINDEXMAP { - m = n1 + for i, m := range n.List.Slice() { + switch { + case m.Op == OINDEXMAP: if !m.Left.IsAutoTmp() { m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0) } if !m.Right.IsAutoTmp() { m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0) } - n.List.SetIndex(i1, ordertemp(m.Type, order, false)) - a = nod(OAS, m, n.List.Index(i1)) - a = typecheck(a, Etop) - post = append(post, a) - } else if instrumenting && n.Op == OAS2FUNC && !isblank(n.List.Index(i1)) { - m = n.List.Index(i1) + fallthrough + case instrumenting && n.Op == OAS2FUNC && !isblank(m): t := ordertemp(m.Type, order, false) - n.List.SetIndex(i1, t) - a = nod(OAS, m, t) + n.List.SetIndex(i, t) + a := nod(OAS, m, t) a = typecheck(a, Etop) post = append(post, a) } @@ -533,8 +530,9 @@ func orderstmt(n *Node, order *Order) { // out map read from map write when l is // a map index expression. t := marktemp(order) - n.Left = orderexpr(n.Left, order, nil) + n.Right = orderexpr(n.Right, order, nil) + n.Left = ordersafeexpr(n.Left, order) tmp1 := treecopy(n.Left, src.NoXPos) if tmp1.Op == OINDEXMAP { @@ -619,7 +617,6 @@ func orderstmt(n *Node, order *Order) { ODCLCONST, ODCLTYPE, OFALL, - OXFALL, OGOTO, OLABEL, ORETJMP: @@ -761,11 +758,12 @@ func orderstmt(n *Node, order *Order) { r := n.Right n.Right = ordercopyexpr(r, r.Type, order, 0) - // n->alloc is the temp for the iterator. - prealloc[n] = ordertemp(types.Types[TUINT8], order, true) + // prealloc[n] is the temp for the iterator. + // hiter contains pointers and needs to be zeroed. + prealloc[n] = ordertemp(hiter(n.Type), order, true) } - for i := range n.List.Slice() { - n.List.SetIndex(i, orderexprinplace(n.List.Index(i), order)) + for i, n1 := range n.List.Slice() { + n.List.SetIndex(i, orderexprinplace(n1, order)) } orderblockNodes(&n.Nbody) order.out = append(order.out, n) @@ -787,14 +785,11 @@ func orderstmt(n *Node, order *Order) { case OSELECT: t := marktemp(order) - var tmp1 *Node - var tmp2 *Node - var r *Node for _, n2 := range n.List.Slice() { if n2.Op != OXCASE { Fatalf("order select case %v", n2.Op) } - r = n2.Left + r := n2.Left setlineno(n2) // Append any new body prologue to ninit. @@ -855,16 +850,16 @@ func orderstmt(n *Node, order *Order) { // use channel element type for temporary to avoid conversions, // such as in case interfacevalue = <-intchan. // the conversion happens in the OAS instead. - tmp1 = r.Left + tmp1 := r.Left if r.Colas() { - tmp2 = nod(ODCL, tmp1, nil) + tmp2 := nod(ODCL, tmp1, nil) tmp2 = typecheck(tmp2, Etop) n2.Ninit.Append(tmp2) } r.Left = ordertemp(r.Right.Left.Type.Elem(), order, types.Haspointers(r.Right.Left.Type.Elem())) - tmp2 = nod(OAS, tmp1, r.Left) + tmp2 := nod(OAS, tmp1, r.Left) tmp2 = typecheck(tmp2, Etop) n2.Ninit.Append(tmp2) } @@ -873,15 +868,15 @@ func orderstmt(n *Node, order *Order) { r.List.Set(nil) } if r.List.Len() != 0 { - tmp1 = r.List.First() + tmp1 := r.List.First() if r.Colas() { - tmp2 = nod(ODCL, tmp1, nil) + tmp2 := nod(ODCL, tmp1, nil) tmp2 = typecheck(tmp2, Etop) n2.Ninit.Append(tmp2) } r.List.Set1(ordertemp(types.Types[TBOOL], order, false)) - tmp2 = okas(tmp1, r.List.First()) + tmp2 := okas(tmp1, r.List.First()) tmp2 = typecheck(tmp2, Etop) n2.Ninit.Append(tmp2) } diff --git a/src/cmd/compile/internal/gc/pgen.go b/src/cmd/compile/internal/gc/pgen.go index 66e4a10ee88..cf99931bb5f 100644 --- a/src/cmd/compile/internal/gc/pgen.go +++ b/src/cmd/compile/internal/gc/pgen.go @@ -13,8 +13,10 @@ import ( "cmd/internal/src" "cmd/internal/sys" "fmt" + "math" "math/rand" "sort" + "strings" "sync" "time" ) @@ -36,26 +38,22 @@ func emitptrargsmap() { nptr := int(Curfn.Type.ArgWidth() / int64(Widthptr)) bv := bvalloc(int32(nptr) * 2) nbitmap := 1 - if Curfn.Type.Results().NumFields() > 0 { + if Curfn.Type.NumResults() > 0 { nbitmap = 2 } off := duint32(lsym, 0, uint32(nbitmap)) off = duint32(lsym, off, uint32(bv.n)) - var xoffset int64 + if Curfn.IsMethod() { - xoffset = 0 - onebitwalktype1(Curfn.Type.Recvs(), &xoffset, bv) + onebitwalktype1(Curfn.Type.Recvs(), 0, bv) } - - if Curfn.Type.Params().NumFields() > 0 { - xoffset = 0 - onebitwalktype1(Curfn.Type.Params(), &xoffset, bv) + if Curfn.Type.NumParams() > 0 { + onebitwalktype1(Curfn.Type.Params(), 0, bv) } - off = dbvec(lsym, off, bv) - if Curfn.Type.Results().NumFields() > 0 { - xoffset = 0 - onebitwalktype1(Curfn.Type.Results(), &xoffset, bv) + + if Curfn.Type.NumResults() > 0 { + onebitwalktype1(Curfn.Type.Results(), 0, bv) off = dbvec(lsym, off, bv) } @@ -132,20 +130,21 @@ func (s *ssafn) AllocFrame(f *ssa.Func) { scratchUsed := false for _, b := range f.Blocks { for _, v := range b.Values { - switch a := v.Aux.(type) { - case *ssa.ArgSymbol: - n := a.Node.(*Node) - // Don't modify nodfp; it is a global. - if n != nodfp { + if n, ok := v.Aux.(*Node); ok { + switch n.Class() { + case PPARAM, PPARAMOUT: + // Don't modify nodfp; it is a global. + if n != nodfp { + n.Name.SetUsed(true) + } + case PAUTO: n.Name.SetUsed(true) } - case *ssa.AutoSymbol: - a.Node.(*Node).Name.SetUsed(true) } - if !scratchUsed { scratchUsed = v.Op.UsesScratch() } + } } @@ -230,23 +229,23 @@ func compilenow() bool { return nBackendWorkers == 1 && Debug_compilelater == 0 } -const maxStackSize = 1 << 31 +const maxStackSize = 1 << 30 // compileSSA builds an SSA backend function, // uses it to generate a plist, // and flushes that plist to machine code. // worker indicates which of the backend workers is doing the processing. func compileSSA(fn *Node, worker int) { - ssafn := buildssa(fn, worker) - pp := newProgs(fn, worker) - genssa(ssafn, pp) - if pp.Text.To.Offset < maxStackSize { - pp.Flush() - } else { + f := buildssa(fn, worker) + if f.Frontend().(*ssafn).stksize >= maxStackSize { largeStackFramesMu.Lock() largeStackFrames = append(largeStackFrames, fn.Pos) largeStackFramesMu.Unlock() + return } + pp := newProgs(fn, worker) + genssa(f, pp) + pp.Flush() // fieldtrack must be called after pp.Flush. See issue 20014. fieldtrack(pp.Text.From.Sym, fn.Func.FieldTrack) pp.Free() @@ -281,6 +280,7 @@ func compileFunctions() { }) } var wg sync.WaitGroup + Ctxt.InParallel = true c := make(chan *Node, nBackendWorkers) for i := 0; i < nBackendWorkers; i++ { wg.Add(1) @@ -297,35 +297,101 @@ func compileFunctions() { close(c) compilequeue = nil wg.Wait() + Ctxt.InParallel = false sizeCalculationDisabled = false } } -func debuginfo(fnsym *obj.LSym, curfn interface{}) []dwarf.Scope { +func debuginfo(fnsym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) { fn := curfn.(*Node) - if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { - Fatalf("unexpected fnsym: %v != %v", fnsym, expect) + debugInfo := fn.Func.DebugInfo + fn.Func.DebugInfo = nil + if fn.Func.Nname != nil { + if expect := fn.Func.Nname.Sym.Linksym(); fnsym != expect { + Fatalf("unexpected fnsym: %v != %v", fnsym, expect) + } } - var dwarfVars []*dwarf.Var - var varScopes []ScopeID - + var automDecls []*Node + // Populate Automs for fn. for _, n := range fn.Func.Dcl { if n.Op != ONAME { // might be OTYPE or OLITERAL continue } - var name obj.AddrName + switch n.Class() { + case PAUTO: + if !n.Name.Used() { + // Text == nil -> generating abstract function + if fnsym.Func.Text != nil { + Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") + } + continue + } + name = obj.NAME_AUTO + case PPARAM, PPARAMOUT: + name = obj.NAME_PARAM + default: + continue + } + automDecls = append(automDecls, n) + gotype := ngotype(n).Linksym() + fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{ + Asym: Ctxt.Lookup(n.Sym.Name), + Aoffset: int32(n.Xoffset), + Name: name, + Gotype: gotype, + }) + } + + decls, dwarfVars := createDwarfVars(fnsym, debugInfo, automDecls) + + var varScopes []ScopeID + for _, decl := range decls { + pos := decl.Pos + if decl.Name.Defn != nil && (decl.Name.Captured() || decl.Name.Byval()) { + // It's not clear which position is correct for captured variables here: + // * decl.Pos is the wrong position for captured variables, in the inner + // function, but it is the right position in the outer function. + // * decl.Name.Defn is nil for captured variables that were arguments + // on the outer function, however the decl.Pos for those seems to be + // correct. + // * decl.Name.Defn is the "wrong" thing for variables declared in the + // header of a type switch, it's their position in the header, rather + // than the position of the case statement. In principle this is the + // right thing, but here we prefer the latter because it makes each + // instance of the header variable local to the lexical block of its + // case statement. + // This code is probably wrong for type switch variables that are also + // captured. + pos = decl.Name.Defn.Pos + } + varScopes = append(varScopes, findScope(fn.Func.Marks, pos)) + } + + scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes) + var inlcalls dwarf.InlCalls + if genDwarfInline > 0 { + inlcalls = assembleInlines(fnsym, fn, dwarfVars) + } + return scopes, inlcalls +} + +// createSimpleVars creates a DWARF entry for every variable declared in the +// function, claiming that they are permanently on the stack. +func createSimpleVars(automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) { + var vars []*dwarf.Var + var decls []*Node + selected := make(map[*Node]bool) + for _, n := range automDecls { + if n.IsAutoTmp() { + continue + } var abbrev int offs := n.Xoffset switch n.Class() { case PAUTO: - if !n.Name.Used() { - Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)") - } - name = obj.NAME_AUTO - abbrev = dwarf.DW_ABRV_AUTO if Ctxt.FixedFrameSize() == 0 { offs -= int64(Widthptr) @@ -335,48 +401,490 @@ func debuginfo(fnsym *obj.LSym, curfn interface{}) []dwarf.Scope { } case PPARAM, PPARAMOUT: - name = obj.NAME_PARAM - abbrev = dwarf.DW_ABRV_PARAM offs += Ctxt.FixedFrameSize() - default: - continue + Fatalf("createSimpleVars unexpected type %v for node %v", n.Class(), n) } + selected[n] = true + typename := dwarf.InfoPrefix + typesymname(n.Type) + decls = append(decls, n) + inlIndex := 0 + if genDwarfInline > 1 { + if n.InlFormal() || n.InlLocal() { + inlIndex = posInlIndex(n.Pos) + 1 + } + } + declpos := Ctxt.InnermostPos(n.Pos) + vars = append(vars, &dwarf.Var{ + Name: n.Sym.Name, + IsReturnValue: n.Class() == PPARAMOUT, + IsInlFormal: n.InlFormal(), + Abbrev: abbrev, + StackOffset: int32(offs), + Type: Ctxt.Lookup(typename), + DeclFile: declpos.Base().SymFilename(), + DeclLine: declpos.Line(), + DeclCol: declpos.Col(), + InlIndex: int32(inlIndex), + ChildIndex: -1, + }) + } + return decls, vars, selected +} + +type varPart struct { + varOffset int64 + slot ssa.SlotID +} + +func createComplexVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*Node) ([]*Node, []*dwarf.Var, map[*Node]bool) { + for _, blockDebug := range debugInfo.Blocks { + for _, locList := range blockDebug.Variables { + for _, loc := range locList.Locations { + if loc.StartProg != nil { + loc.StartPC = loc.StartProg.Pc + } + if loc.EndProg != nil { + loc.EndPC = loc.EndProg.Pc + } else { + loc.EndPC = fnsym.Size + } + if Debug_locationlist == 0 { + loc.EndProg = nil + loc.StartProg = nil + } + } + } + } + + // Group SSA variables by the user variable they were decomposed from. + varParts := map[*Node][]varPart{} + ssaVars := make(map[*Node]bool) + for slotID, slot := range debugInfo.VarSlots { + for slot.SplitOf != nil { + slot = slot.SplitOf + } + n := slot.N.(*Node) + ssaVars[n] = true + varParts[n] = append(varParts[n], varPart{varOffset(slot), ssa.SlotID(slotID)}) + } + + // Produce a DWARF variable entry for each user variable. + // Don't iterate over the map -- that's nondeterministic, and + // createComplexVar has side effects. Instead, go by slot. + var decls []*Node + var vars []*dwarf.Var + for _, slot := range debugInfo.VarSlots { + for slot.SplitOf != nil { + slot = slot.SplitOf + } + n := slot.N.(*Node) + parts := varParts[n] + if parts == nil { + continue + } + // Don't work on this variable again, no matter how many slots it has. + delete(varParts, n) + + // Get the order the parts need to be in to represent the memory + // of the decomposed user variable. + sort.Sort(partsByVarOffset(parts)) + + if dvar := createComplexVar(debugInfo, n, parts); dvar != nil { + decls = append(decls, n) + vars = append(vars, dvar) + } + } + + return decls, vars, ssaVars +} + +func createDwarfVars(fnsym *obj.LSym, debugInfo *ssa.FuncDebug, automDecls []*Node) ([]*Node, []*dwarf.Var) { + // Collect a raw list of DWARF vars. + var vars []*dwarf.Var + var decls []*Node + var selected map[*Node]bool + if Ctxt.Flag_locationlists && Ctxt.Flag_optimize && debugInfo != nil { + decls, vars, selected = createComplexVars(fnsym, debugInfo, automDecls) + } else { + decls, vars, selected = createSimpleVars(automDecls) + } + + var dcl []*Node + var chopVersion bool + if fnsym.WasInlined() { + dcl, chopVersion = preInliningDcls(fnsym) + } else { + dcl = automDecls + } + + // If optimization is enabled, the list above will typically be + // missing some of the original pre-optimization variables in the + // function (they may have been promoted to registers, folded into + // constants, dead-coded away, etc). Here we add back in entries + // for selected missing vars. Note that the recipe below creates a + // conservative location. The idea here is that we want to + // communicate to the user that "yes, there is a variable named X + // in this function, but no, I don't have enough information to + // reliably report its contents." + for _, n := range dcl { + if _, found := selected[n]; found { + continue + } + c := n.Sym.Name[0] + if c == '~' || c == '.' || n.Type.IsUntyped() { + continue + } + typename := dwarf.InfoPrefix + typesymname(n.Type) + decls = append(decls, n) + abbrev := dwarf.DW_ABRV_AUTO_LOCLIST + if n.Class() == PPARAM || n.Class() == PPARAMOUT { + abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + } + inlIndex := 0 + if genDwarfInline > 1 { + if n.InlFormal() || n.InlLocal() { + inlIndex = posInlIndex(n.Pos) + 1 + } + } + declpos := Ctxt.InnermostPos(n.Pos) + vars = append(vars, &dwarf.Var{ + Name: n.Sym.Name, + IsReturnValue: n.Class() == PPARAMOUT, + Abbrev: abbrev, + StackOffset: int32(n.Xoffset), + Type: Ctxt.Lookup(typename), + DeclFile: declpos.Base().SymFilename(), + DeclLine: declpos.Line(), + DeclCol: declpos.Col(), + InlIndex: int32(inlIndex), + ChildIndex: -1, + }) + // Append a "deleted auto" entry to the autom list so as to + // insure that the type in question is picked up by the linker. + // See issue 22941. gotype := ngotype(n).Linksym() fnsym.Func.Autom = append(fnsym.Func.Autom, &obj.Auto{ Asym: Ctxt.Lookup(n.Sym.Name), - Aoffset: int32(n.Xoffset), - Name: name, + Aoffset: int32(-1), + Name: obj.NAME_DELETED_AUTO, Gotype: gotype, }) - if n.IsAutoTmp() { - continue - } - - typename := dwarf.InfoPrefix + gotype.Name[len("type."):] - dwarfVars = append(dwarfVars, &dwarf.Var{ - Name: n.Sym.Name, - Abbrev: abbrev, - Offset: int32(offs), - Type: Ctxt.Lookup(typename), - }) - - var scope ScopeID - if !n.Name.Captured() && !n.Name.Byval() { - // n.Pos of captured variables is their first - // use in the closure but they should always - // be assigned to scope 0 instead. - // TODO(mdempsky): Verify this. - scope = findScope(fn.Func.Marks, n.Pos) - } - - varScopes = append(varScopes, scope) } - return assembleScopes(fnsym, fn, dwarfVars, varScopes) + // Parameter and local variable names are given middle dot + // version numbers as part of the writing them out to export + // data (see issue 4326). If DWARF inlined routine generation + // is turned on, undo this versioning, since DWARF variables + // in question will be parented by the inlined routine and + // not the top-level caller. + if genDwarfInline > 1 && chopVersion { + for _, v := range vars { + if v.InlIndex != -1 { + if i := strings.Index(v.Name, "·"); i > 0 { + v.Name = v.Name[:i] // cut off Vargen + } + } + } + } + + return decls, vars +} + +// Given a function that was inlined at some point during the compilation, +// return a list of nodes corresponding to the autos/locals in that +// function prior to inlining. Untyped and compiler-synthesized vars are +// stripped out along the way. +func preInliningDcls(fnsym *obj.LSym) ([]*Node, bool) { + fn := Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*Node) + imported := false + var dcl, rdcl []*Node + if fn.Name.Defn != nil { + dcl = fn.Func.Inldcl.Slice() // local function + } else { + dcl = fn.Func.Dcl // imported function + imported = true + } + for _, n := range dcl { + c := n.Sym.Name[0] + if c == '~' || c == '.' || n.Type.IsUntyped() { + continue + } + rdcl = append(rdcl, n) + } + return rdcl, imported +} + +// varOffset returns the offset of slot within the user variable it was +// decomposed from. This has nothing to do with its stack offset. +func varOffset(slot *ssa.LocalSlot) int64 { + offset := slot.Off + for ; slot.SplitOf != nil; slot = slot.SplitOf { + offset += slot.SplitOffset + } + return offset +} + +type partsByVarOffset []varPart + +func (a partsByVarOffset) Len() int { return len(a) } +func (a partsByVarOffset) Less(i, j int) bool { return a[i].varOffset < a[j].varOffset } +func (a partsByVarOffset) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// stackOffset returns the stack location of a LocalSlot relative to the +// stack pointer, suitable for use in a DWARF location entry. This has nothing +// to do with its offset in the user variable. +func stackOffset(slot *ssa.LocalSlot) int32 { + n := slot.N.(*Node) + var base int64 + switch n.Class() { + case PAUTO: + if Ctxt.FixedFrameSize() == 0 { + base -= int64(Widthptr) + } + if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + base -= int64(Widthptr) + } + case PPARAM, PPARAMOUT: + base += Ctxt.FixedFrameSize() + } + return int32(base + n.Xoffset + slot.Off) +} + +// createComplexVar builds a DWARF variable entry and location list representing n. +func createComplexVar(debugInfo *ssa.FuncDebug, n *Node, parts []varPart) *dwarf.Var { + slots := debugInfo.Slots + var offs int64 // base stack offset for this kind of variable + var abbrev int + switch n.Class() { + case PAUTO: + abbrev = dwarf.DW_ABRV_AUTO_LOCLIST + if Ctxt.FixedFrameSize() == 0 { + offs -= int64(Widthptr) + } + if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { + offs -= int64(Widthptr) + } + + case PPARAM, PPARAMOUT: + abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + offs += Ctxt.FixedFrameSize() + default: + return nil + } + + gotype := ngotype(n).Linksym() + typename := dwarf.InfoPrefix + gotype.Name[len("type."):] + inlIndex := 0 + if genDwarfInline > 1 { + if n.InlFormal() || n.InlLocal() { + inlIndex = posInlIndex(n.Pos) + 1 + } + } + declpos := Ctxt.InnermostPos(n.Pos) + dvar := &dwarf.Var{ + Name: n.Sym.Name, + IsReturnValue: n.Class() == PPARAMOUT, + IsInlFormal: n.InlFormal(), + Abbrev: abbrev, + Type: Ctxt.Lookup(typename), + // The stack offset is used as a sorting key, so for decomposed + // variables just give it the lowest one. It's not used otherwise. + // This won't work well if the first slot hasn't been assigned a stack + // location, but it's not obvious how to do better. + StackOffset: int32(stackOffset(slots[parts[0].slot])), + DeclFile: declpos.Base().SymFilename(), + DeclLine: declpos.Line(), + DeclCol: declpos.Col(), + InlIndex: int32(inlIndex), + ChildIndex: -1, + } + + if Debug_locationlist != 0 { + Ctxt.Logf("Building location list for %+v. Parts:\n", n) + for _, part := range parts { + Ctxt.Logf("\t%v => %v\n", debugInfo.Slots[part.slot], debugInfo.SlotLocsString(part.slot)) + } + } + + // Given a variable that's been decomposed into multiple parts, + // its location list may need a new entry after the beginning or + // end of every location entry for each of its parts. For example: + // + // [variable] [pc range] + // string.ptr |----|-----| |----| + // string.len |------------| |--| + // ... needs a location list like: + // string |----|-----|-| |--|-| + // + // Note that location entries may or may not line up with each other, + // and some of the result will only have one or the other part. + // + // To build the resulting list: + // - keep a "current" pointer for each part + // - find the next transition point + // - advance the current pointer for each part up to that transition point + // - build the piece for the range between that transition point and the next + // - repeat + + type locID struct { + block int + loc int + } + findLoc := func(part varPart, id locID) *ssa.VarLoc { + if id.block >= len(debugInfo.Blocks) { + return nil + } + return debugInfo.Blocks[id.block].Variables[part.slot].Locations[id.loc] + } + nextLoc := func(part varPart, id locID) (locID, *ssa.VarLoc) { + // Check if there's another loc in this block + id.loc++ + if b := debugInfo.Blocks[id.block]; b != nil && id.loc < len(b.Variables[part.slot].Locations) { + return id, findLoc(part, id) + } + // Find the next block that has a loc for this part. + id.loc = 0 + id.block++ + for ; id.block < len(debugInfo.Blocks); id.block++ { + if b := debugInfo.Blocks[id.block]; b != nil && len(b.Variables[part.slot].Locations) != 0 { + return id, findLoc(part, id) + } + } + return id, nil + } + curLoc := make([]locID, len(slots)) + // Position each pointer at the first entry for its slot. + for _, part := range parts { + if b := debugInfo.Blocks[0]; b != nil && len(b.Variables[part.slot].Locations) != 0 { + // Block 0 has an entry; no need to advance. + continue + } + curLoc[part.slot], _ = nextLoc(part, curLoc[part.slot]) + } + + // findBoundaryAfter finds the next beginning or end of a piece after currentPC. + findBoundaryAfter := func(currentPC int64) int64 { + min := int64(math.MaxInt64) + for _, part := range parts { + // For each part, find the first PC greater than current. Doesn't + // matter if it's a start or an end, since we're looking for any boundary. + // If it's the new winner, save it. + onePart: + for i, loc := curLoc[part.slot], findLoc(part, curLoc[part.slot]); loc != nil; i, loc = nextLoc(part, i) { + for _, pc := range [2]int64{loc.StartPC, loc.EndPC} { + if pc > currentPC { + if pc < min { + min = pc + } + break onePart + } + } + } + } + return min + } + var start int64 + end := findBoundaryAfter(0) + for { + // Advance to the next chunk. + start = end + end = findBoundaryAfter(start) + if end == math.MaxInt64 { + break + } + + dloc := dwarf.Location{StartPC: start, EndPC: end} + if Debug_locationlist != 0 { + Ctxt.Logf("Processing range %x -> %x\n", start, end) + } + + // Advance curLoc to the last location that starts before/at start. + // After this loop, if there's a location that covers [start, end), it will be current. + // Otherwise the current piece will be too early. + for _, part := range parts { + choice := locID{-1, -1} + for i, loc := curLoc[part.slot], findLoc(part, curLoc[part.slot]); loc != nil; i, loc = nextLoc(part, i) { + if loc.StartPC > start { + break //overshot + } + choice = i // best yet + } + if choice.block != -1 { + curLoc[part.slot] = choice + } + if Debug_locationlist != 0 { + Ctxt.Logf("\t %v => %v", slots[part.slot], curLoc[part.slot]) + } + } + if Debug_locationlist != 0 { + Ctxt.Logf("\n") + } + // Assemble the location list entry for this chunk. + present := 0 + for _, part := range parts { + dpiece := dwarf.Piece{ + Length: slots[part.slot].Type.Size(), + } + loc := findLoc(part, curLoc[part.slot]) + if loc == nil || start >= loc.EndPC || end <= loc.StartPC { + if Debug_locationlist != 0 { + Ctxt.Logf("\t%v: missing", slots[part.slot]) + } + dpiece.Missing = true + dloc.Pieces = append(dloc.Pieces, dpiece) + continue + } + present++ + if Debug_locationlist != 0 { + Ctxt.Logf("\t%v: %v", slots[part.slot], debugInfo.Blocks[curLoc[part.slot].block].LocString(loc)) + } + if loc.OnStack { + dpiece.OnStack = true + dpiece.StackOffset = stackOffset(slots[loc.StackLocation]) + } else { + for reg := 0; reg < len(debugInfo.Registers); reg++ { + if loc.Registers&(1< totally missing\n") + } + continue + } + // Extend the previous entry if possible. + if len(dvar.LocationList) > 0 { + prev := &dvar.LocationList[len(dvar.LocationList)-1] + if prev.EndPC == dloc.StartPC && len(prev.Pieces) == len(dloc.Pieces) { + equal := true + for i := range prev.Pieces { + if prev.Pieces[i] != dloc.Pieces[i] { + equal = false + } + } + if equal { + prev.EndPC = end + if Debug_locationlist != 0 { + Ctxt.Logf("-> merged with previous, now %#v\n", prev) + } + continue + } + } + } + dvar.LocationList = append(dvar.LocationList, dloc) + if Debug_locationlist != 0 { + Ctxt.Logf("-> added: %#v\n", dloc) + } + } + return dvar } // fieldtrack adds R_USEFIELD relocations to fnsym to record any diff --git a/src/cmd/compile/internal/gc/phi.go b/src/cmd/compile/internal/gc/phi.go index 0ce7a4b11d7..b549f0ea6f2 100644 --- a/src/cmd/compile/internal/gc/phi.go +++ b/src/cmd/compile/internal/gc/phi.go @@ -233,24 +233,25 @@ func (s *phiState) insertVarPhis(n int, var_ *Node, defs []*ssa.Block, typ *type // a D-edge, or an edge whose target is in currentRoot's subtree. continue } - if !hasPhi.contains(c.ID) { - // Add a phi to block c for variable n. - hasPhi.add(c.ID) - v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right? - // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building. - s.s.addNamedValue(var_, v) - for i := 0; i < len(c.Preds); i++ { - v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs. - } - if debugPhi { - fmt.Printf("new phi for var%d in %s: %s\n", n, c, v) - } - if !hasDef.contains(c.ID) { - // There's now a new definition of this variable in block c. - // Add it to the priority queue to explore. - heap.Push(priq, c) - hasDef.add(c.ID) - } + if hasPhi.contains(c.ID) { + continue + } + // Add a phi to block c for variable n. + hasPhi.add(c.ID) + v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right? + // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building. + s.s.addNamedValue(var_, v) + for i := 0; i < len(c.Preds); i++ { + v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs. + } + if debugPhi { + fmt.Printf("new phi for var%d in %s: %s\n", n, c, v) + } + if !hasDef.contains(c.ID) { + // There's now a new definition of this variable in block c. + // Add it to the priority queue to explore. + heap.Push(priq, c) + hasDef.add(c.ID) } } diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index ca449b72bdf..49d0229702b 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -306,12 +306,10 @@ func affectedNode(v *ssa.Value) (*Node, ssa.SymEffect) { var n *Node switch a := v.Aux.(type) { - case nil, *ssa.ExternSymbol: + case nil, *obj.LSym: // ok, but no node - case *ssa.ArgSymbol: - n = a.Node.(*Node) - case *ssa.AutoSymbol: - n = a.Node.(*Node) + case *Node: + n = a default: Fatalf("weird aux: %s", v.LongString()) } @@ -353,110 +351,85 @@ func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects { return &lv.be[b.ID] } -// NOTE: The bitmap for a specific type t should be cached in t after the first run -// and then simply copied into bv at the correct offset on future calls with -// the same type t. On https://rsc.googlecode.com/hg/testdata/slow.go, onebitwalktype1 -// accounts for 40% of the 6g execution time. -func onebitwalktype1(t *types.Type, xoffset *int64, bv bvec) { - if t.Align > 0 && *xoffset&int64(t.Align-1) != 0 { +// NOTE: The bitmap for a specific type t could be cached in t after +// the first run and then simply copied into bv at the correct offset +// on future calls with the same type t. +func onebitwalktype1(t *types.Type, off int64, bv bvec) { + if t.Align > 0 && off&int64(t.Align-1) != 0 { Fatalf("onebitwalktype1: invalid initial alignment, %v", t) } switch t.Etype { - case TINT8, - TUINT8, - TINT16, - TUINT16, - TINT32, - TUINT32, - TINT64, - TUINT64, - TINT, - TUINT, - TUINTPTR, - TBOOL, - TFLOAT32, - TFLOAT64, - TCOMPLEX64, - TCOMPLEX128: - *xoffset += t.Width + case TINT8, TUINT8, TINT16, TUINT16, + TINT32, TUINT32, TINT64, TUINT64, + TINT, TUINT, TUINTPTR, TBOOL, + TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128: - case TPTR32, - TPTR64, - TUNSAFEPTR, - TFUNC, - TCHAN, - TMAP: - if *xoffset&int64(Widthptr-1) != 0 { + case TPTR32, TPTR64, TUNSAFEPTR, TFUNC, TCHAN, TMAP: + if off&int64(Widthptr-1) != 0 { Fatalf("onebitwalktype1: invalid alignment, %v", t) } - bv.Set(int32(*xoffset / int64(Widthptr))) // pointer - *xoffset += t.Width + bv.Set(int32(off / int64(Widthptr))) // pointer case TSTRING: // struct { byte *str; intgo len; } - if *xoffset&int64(Widthptr-1) != 0 { + if off&int64(Widthptr-1) != 0 { Fatalf("onebitwalktype1: invalid alignment, %v", t) } - bv.Set(int32(*xoffset / int64(Widthptr))) //pointer in first slot - *xoffset += t.Width + bv.Set(int32(off / int64(Widthptr))) //pointer in first slot case TINTER: // struct { Itab *tab; void *data; } // or, when isnilinter(t)==true: // struct { Type *type; void *data; } - if *xoffset&int64(Widthptr-1) != 0 { + if off&int64(Widthptr-1) != 0 { Fatalf("onebitwalktype1: invalid alignment, %v", t) } - bv.Set(int32(*xoffset / int64(Widthptr))) // pointer in first slot - bv.Set(int32(*xoffset/int64(Widthptr) + 1)) // pointer in second slot - *xoffset += t.Width + bv.Set(int32(off / int64(Widthptr))) // pointer in first slot + bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot case TSLICE: // struct { byte *array; uintgo len; uintgo cap; } - if *xoffset&int64(Widthptr-1) != 0 { + if off&int64(Widthptr-1) != 0 { Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t) } - bv.Set(int32(*xoffset / int64(Widthptr))) // pointer in first slot (BitsPointer) - *xoffset += t.Width + bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer) case TARRAY: + elt := t.Elem() + if elt.Width == 0 { + // Short-circuit for #20739. + break + } for i := int64(0); i < t.NumElem(); i++ { - onebitwalktype1(t.Elem(), xoffset, bv) + onebitwalktype1(elt, off, bv) + off += elt.Width } case TSTRUCT: - var o int64 - for _, t1 := range t.Fields().Slice() { - fieldoffset := t1.Offset - *xoffset += fieldoffset - o - onebitwalktype1(t1.Type, xoffset, bv) - o = fieldoffset + t1.Type.Width + for _, f := range t.Fields().Slice() { + onebitwalktype1(f.Type, off+f.Offset, bv) } - *xoffset += t.Width - o - default: Fatalf("onebitwalktype1: unexpected type, %v", t) } } -// Returns the number of words of local variables. -func localswords(lv *Liveness) int32 { +// localWords returns the number of words of local variables. +func (lv *Liveness) localWords() int32 { return int32(lv.stkptrsize / int64(Widthptr)) } -// Returns the number of words of in and out arguments. -func argswords(lv *Liveness) int32 { +// argWords returns the number of words of in and out arguments. +func (lv *Liveness) argWords() int32 { return int32(lv.fn.Type.ArgWidth() / int64(Widthptr)) } // Generates live pointer value maps for arguments and local variables. The // this argument and the in arguments are always assumed live. The vars // argument is a slice of *Nodes. -func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, locals bvec) { - var xoffset int64 - +func (lv *Liveness) pointerMap(liveout bvec, vars []*Node, args, locals bvec) { for i := int32(0); ; i++ { i = liveout.Next(i) if i < 0 { @@ -465,12 +438,10 @@ func onebitlivepointermap(lv *Liveness, liveout bvec, vars []*Node, args bvec, l node := vars[i] switch node.Class() { case PAUTO: - xoffset = node.Xoffset + lv.stkptrsize - onebitwalktype1(node.Type, &xoffset, locals) + onebitwalktype1(node.Type, node.Xoffset+lv.stkptrsize, locals) case PPARAM, PPARAMOUT: - xoffset = node.Xoffset - onebitwalktype1(node.Type, &xoffset, args) + onebitwalktype1(node.Type, node.Xoffset, args) } } } @@ -484,7 +455,7 @@ func issafepoint(v *ssa.Value) bool { // Initializes the sets for solving the live variables. Visits all the // instructions in each basic block to summarizes the information at each basic // block -func livenessprologue(lv *Liveness) { +func (lv *Liveness) prologue() { lv.initcache() for _, b := range lv.f.Blocks { @@ -518,7 +489,7 @@ func livenessprologue(lv *Liveness) { } // Solve the liveness dataflow equations. -func livenesssolve(lv *Liveness) { +func (lv *Liveness) solve() { // These temporary bitvectors exist to avoid successive allocations and // frees within the loop. newlivein := bvalloc(int32(len(lv.vars))) @@ -618,7 +589,7 @@ func livenesssolve(lv *Liveness) { // Visits all instructions in a basic block and computes a bit vector of live // variables at each safe point locations. -func livenessepilogue(lv *Liveness) { +func (lv *Liveness) epilogue() { nvars := int32(len(lv.vars)) liveout := bvalloc(nvars) any := bvalloc(nvars) @@ -721,7 +692,7 @@ func livenessepilogue(lv *Liveness) { for _, b := range lv.f.Blocks { be := lv.blockEffects(b) - // walk backward, emit pcdata and populate the maps + // walk backward, construct maps at each safe point index := int32(be.lastbitmapindex) if index < 0 { // the first block we encounter should have the ATEXT so @@ -924,13 +895,7 @@ func clobberWalk(b *ssa.Block, v *Node, offset int64, t *types.Type) { // clobberPtr generates a clobber of the pointer at offset offset in v. // The clobber instruction is added at the end of b. func clobberPtr(b *ssa.Block, v *Node, offset int64) { - var aux interface{} - if v.Class() == PAUTO { - aux = &ssa.AutoSymbol{Node: v} - } else { - aux = &ssa.ArgSymbol{Node: v} - } - b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, aux) + b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v) } func (lv *Liveness) avarinitanyall(b *ssa.Block, any, all bvec) { @@ -988,7 +953,7 @@ func hashbitmap(h uint32, bv bvec) uint32 { // is actually a net loss: we save about 50k of argument bitmaps but the new // PCDATA tables cost about 100k. So for now we keep using a single index for // both bitmap lists. -func livenesscompact(lv *Liveness) { +func (lv *Liveness) compact() { // Linear probing hash table of bitmaps seen so far. // The hash table has 4n entries to keep the linear // scan short. An entry of -1 indicates an empty slot. @@ -1047,7 +1012,8 @@ Outer: } lv.livevars = lv.livevars[:uniq] - // Rewrite PCDATA instructions to use new numbering. + // Record compacted stack map indexes for each value. + // These will later become PCDATA instructions. lv.showlive(nil, lv.livevars[0]) pos := 1 lv.stackMapIndex = make(map[*ssa.Value]int) @@ -1138,7 +1104,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo // Prints the computed liveness information and inputs, for debugging. // This format synthesizes the information used during the multiple passes // into a single presentation. -func livenessprintdebug(lv *Liveness) { +func (lv *Liveness) printDebug() { fmt.Printf("liveness: %s\n", lv.fn.funcname()) pcdata := 0 @@ -1250,12 +1216,12 @@ func livenessprintdebug(lv *Liveness) { // first word dumped is the total number of bitmaps. The second word is the // length of the bitmaps. All bitmaps are assumed to be of equal length. The // remaining bytes are the raw bitmaps. -func livenessemit(lv *Liveness, argssym, livesym *obj.LSym) { - args := bvalloc(argswords(lv)) +func (lv *Liveness) emit(argssym, livesym *obj.LSym) { + args := bvalloc(lv.argWords()) aoff := duint32(argssym, 0, uint32(len(lv.livevars))) // number of bitmaps aoff = duint32(argssym, aoff, uint32(args.n)) // number of bits in each bitmap - locals := bvalloc(localswords(lv)) + locals := bvalloc(lv.localWords()) loff := duint32(livesym, 0, uint32(len(lv.livevars))) // number of bitmaps loff = duint32(livesym, loff, uint32(locals.n)) // number of bits in each bitmap @@ -1263,7 +1229,7 @@ func livenessemit(lv *Liveness, argssym, livesym *obj.LSym) { args.Clear() locals.Clear() - onebitlivepointermap(lv, live, lv.vars, args, locals) + lv.pointerMap(live, lv.vars, args, locals) aoff = dbvec(argssym, aoff, args) loff = dbvec(livesym, loff, locals) @@ -1288,18 +1254,18 @@ func liveness(e *ssafn, f *ssa.Func) map[*ssa.Value]int { lv := newliveness(e.curfn, f, vars, idx, e.stkptrsize) // Run the dataflow framework. - livenessprologue(lv) - livenesssolve(lv) - livenessepilogue(lv) - livenesscompact(lv) + lv.prologue() + lv.solve() + lv.epilogue() + lv.compact() lv.clobber() if debuglive >= 2 { - livenessprintdebug(lv) + lv.printDebug() } // Emit the live pointer map data structures if ls := e.curfn.Func.lsym; ls != nil { - livenessemit(lv, &ls.Func.GCArgs, &ls.Func.GCLocals) + lv.emit(&ls.Func.GCArgs, &ls.Func.GCLocals) } return lv.stackMapIndex } diff --git a/src/cmd/compile/internal/gc/racewalk.go b/src/cmd/compile/internal/gc/racewalk.go index cfb803187cd..4b92ce9e0ed 100644 --- a/src/cmd/compile/internal/gc/racewalk.go +++ b/src/cmd/compile/internal/gc/racewalk.go @@ -70,11 +70,15 @@ func instrument(fn *Node) { nodpc := *nodfp nodpc.Type = types.Types[TUINTPTR] nodpc.Xoffset = int64(-Widthptr) + savedLineno := lineno + lineno = src.NoXPos nd := mkcall("racefuncenter", nil, nil, &nodpc) + fn.Func.Enter.Prepend(nd) nd = mkcall("racefuncexit", nil, nil) fn.Func.Exit.Append(nd) fn.Func.Dcl = append(fn.Func.Dcl, &nodpc) + lineno = savedLineno } if Debug['W'] != 0 { @@ -140,11 +144,9 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { case OAS, OAS2FUNC: instrumentnode(&n.Left, init, 1, 0) instrumentnode(&n.Right, init, 0, 0) - goto ret // can't matter case OCFUNC, OVARKILL, OVARLIVE: - goto ret case OBLOCK: ls := n.List.Slice() @@ -162,26 +164,25 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { instrumentnode(&ls[i], &ls[i].Ninit, 0, 0) afterCall = (op == OCALLFUNC || op == OCALLMETH || op == OCALLINTER) } - goto ret case ODEFER: instrumentnode(&n.Left, init, 0, 0) - goto ret case OPROC: instrumentnode(&n.Left, init, 0, 0) - goto ret case OCALLINTER: instrumentnode(&n.Left, init, 0, 0) - goto ret - // Instrument dst argument of runtime.writebarrier* calls - // as we do not instrument runtime code. - // typedslicecopy is instrumented in runtime. case OCALLFUNC: + // Note that runtime.typedslicecopy is the only + // assignment-like function call in the AST at this + // point (between walk and SSA); since we don't + // instrument it here, typedslicecopy is manually + // instrumented in runtime. Calls to the write barrier + // and typedmemmove are created later by SSA, so those + // still appear as OAS nodes at this point. instrumentnode(&n.Left, init, 0, 0) - goto ret case ONOT, OMINUS, @@ -190,28 +191,23 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { OIMAG, OCOM: instrumentnode(&n.Left, init, wr, 0) - goto ret case ODOTINTER: instrumentnode(&n.Left, init, 0, 0) - goto ret case ODOT: instrumentnode(&n.Left, init, 0, 1) callinstr(&n, init, wr, skip) - goto ret case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND instrumentnode(&n.Left, init, 0, 0) callinstr(&n, init, wr, skip) - goto ret case OIND: // *p instrumentnode(&n.Left, init, 0, 0) callinstr(&n, init, wr, skip) - goto ret case OSPTR, OLEN, OCAP: instrumentnode(&n.Left, init, 0, 0) @@ -223,8 +219,6 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { callinstr(&n1, init, 0, skip) } - goto ret - case OLSH, ORSH, OAND, @@ -243,7 +237,6 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { OCOMPLEX: instrumentnode(&n.Left, init, wr, 0) instrumentnode(&n.Right, init, wr, 0) - goto ret case OANDAND, OOROR: instrumentnode(&n.Left, init, wr, 0) @@ -254,24 +247,18 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { // so instrumentation goes to n->right->ninit, not init. instrumentnode(&n.Right, &n.Right.Ninit, wr, 0) - goto ret - case ONAME: callinstr(&n, init, wr, skip) - goto ret case OCONV: instrumentnode(&n.Left, init, wr, 0) - goto ret case OCONVNOP: instrumentnode(&n.Left, init, wr, 0) - goto ret case ODIV, OMOD: instrumentnode(&n.Left, init, wr, 0) instrumentnode(&n.Right, init, wr, 0) - goto ret case OINDEX: if !n.Left.Type.IsArray() { @@ -281,14 +268,13 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { instrumentnode(&n.Left, init, wr, 0) instrumentnode(&n.Right, init, 0, 0) - goto ret + break } instrumentnode(&n.Right, init, 0, 0) if !n.Left.Type.IsString() { callinstr(&n, init, wr, skip) } - goto ret case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR: instrumentnode(&n.Left, init, 0, 0) @@ -297,34 +283,26 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { instrumentnode(&high, init, 0, 0) instrumentnode(&max, init, 0, 0) n.SetSliceBounds(low, high, max) - goto ret case OADDR: instrumentnode(&n.Left, init, 0, 1) - goto ret // n->left is Type* which is not interesting. case OEFACE: instrumentnode(&n.Right, init, 0, 0) - goto ret - case OITAB, OIDATA: instrumentnode(&n.Left, init, 0, 0) - goto ret case OSTRARRAYBYTETMP: instrumentnode(&n.Left, init, 0, 0) - goto ret case OAS2DOTTYPE: instrumentnode(&n.Left, init, 1, 0) instrumentnode(&n.Right, init, 0, 0) - goto ret case ODOTTYPE, ODOTTYPE2: instrumentnode(&n.Left, init, 0, 0) - goto ret // should not appear in AST by now case OSEND, @@ -332,7 +310,6 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { OCLOSE, ONEW, OXCASE, - OXFALL, OCASE, OPANIC, ORECOVER, @@ -377,13 +354,11 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { if n.Right != nil { instrumentnode(&n.Right, &n.Right.Ninit, 0, 0) } - goto ret case OIF, OSWITCH: if n.Left != nil { instrumentnode(&n.Left, &n.Left.Ninit, 0, 0) } - goto ret // just do generic traversal case OCALLMETH, @@ -396,7 +371,6 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { OFALL, OGOTO, OLABEL: - goto ret // does not require instrumentation case OPRINT, // don't bother instrumenting it @@ -412,10 +386,8 @@ func instrumentnode(np **Node, init *Nodes, wr int, skip int) { ONONAME, OLITERAL, OTYPESW: // ignored by code generation, do not instrument. - goto ret } -ret: if n.Op != OBLOCK { // OBLOCK is handled above in a special way. instrumentlist(n.List, init) } @@ -462,6 +434,15 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool { return false } t := n.Type + // dowidth may not have been called for PEXTERN. + dowidth(t) + w := t.Width + if w == BADWIDTH { + Fatalf("instrument: %v badwidth", t) + } + if w == 0 { + return false // can't race on zero-sized things + } if isartificial(n) { return false } @@ -479,9 +460,15 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool { // that has got a pointer inside. Whether it points to // the heap or not is impossible to know at compile time if class == PAUTOHEAP || class == PEXTERN || b.Op == OINDEX || b.Op == ODOTPTR || b.Op == OIND { - hascalls := 0 - foreach(n, hascallspred, &hascalls) - if hascalls != 0 { + hasCalls := false + inspect(n, func(n *Node) bool { + switch n.Op { + case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER: + hasCalls = true + } + return !hasCalls + }) + if hasCalls { n = detachexpr(n, init) *np = n } @@ -494,26 +481,19 @@ func callinstr(np **Node, init *Nodes, wr int, skip int) bool { if wr != 0 { name = "msanwrite" } - // dowidth may not have been called for PEXTERN. - dowidth(t) - w := t.Width - if w == BADWIDTH { - Fatalf("instrument: %v badwidth", t) - } f = mkcall(name, nil, init, uintptraddr(n), nodintconst(w)) - } else if flag_race && (t.IsStruct() || t.IsArray()) { + } else if flag_race && t.NumComponents() > 1 { + // for composite objects we have to write every address + // because a write might happen to any subobject. + // composites with only one element don't have subobjects, though. name := "racereadrange" if wr != 0 { name = "racewriterange" } - // dowidth may not have been called for PEXTERN. - dowidth(t) - w := t.Width - if w == BADWIDTH { - Fatalf("instrument: %v badwidth", t) - } f = mkcall(name, nil, init, uintptraddr(n), nodintconst(w)) } else if flag_race { + // for non-composite objects we can write just the start + // address, as any write must write the first byte. name := "raceread" if wr != 0 { name = "racewrite" @@ -552,10 +532,6 @@ func makeaddable(n *Node) { makeaddable(n.Left) // nothing to do - case ODOTPTR: - fallthrough - default: - break } } @@ -580,34 +556,6 @@ func detachexpr(n *Node, init *Nodes) *Node { return ind } -func foreachnode(n *Node, f func(*Node, interface{}), c interface{}) { - if n != nil { - f(n, c) - } -} - -func foreachlist(l Nodes, f func(*Node, interface{}), c interface{}) { - for _, n := range l.Slice() { - foreachnode(n, f, c) - } -} - -func foreach(n *Node, f func(*Node, interface{}), c interface{}) { - foreachlist(n.Ninit, f, c) - foreachnode(n.Left, f, c) - foreachnode(n.Right, f, c) - foreachlist(n.List, f, c) - foreachlist(n.Nbody, f, c) - foreachlist(n.Rlist, f, c) -} - -func hascallspred(n *Node, c interface{}) { - switch n.Op { - case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER: - (*c.(*int))++ - } -} - // appendinit is like addinit in subr.go // but appends rather than prepends. func appendinit(np **Node, init Nodes) { diff --git a/src/cmd/compile/internal/gc/range.go b/src/cmd/compile/internal/gc/range.go index 032601ca3df..db852e83a2b 100644 --- a/src/cmd/compile/internal/gc/range.go +++ b/src/cmd/compile/internal/gc/range.go @@ -7,19 +7,12 @@ package gc import ( "cmd/compile/internal/types" "cmd/internal/objabi" + "cmd/internal/sys" "unicode/utf8" ) // range func typecheckrange(n *Node) { - var toomany int - var why string - var t1 *types.Type - var t2 *types.Type - var v1 *Node - var v2 *Node - var ls []*Node - // Typechecking order is important here: // 0. first typecheck range expression (slice/map/chan), // it is evaluated only once and so logically it is not part of the loop. @@ -29,15 +22,31 @@ func typecheckrange(n *Node) { // 2. decldepth++ to denote loop body. // 3. typecheck body. // 4. decldepth--. + typecheckrangeExpr(n) + // second half of dance, the first half being typecheckrangeExpr + n.SetTypecheck(1) + ls := n.List.Slice() + for i1, n1 := range ls { + if n1.Typecheck() == 0 { + ls[i1] = typecheck(ls[i1], Erv|Easgn) + } + } + + decldepth++ + typecheckslice(n.Nbody.Slice(), Etop) + decldepth-- +} + +func typecheckrangeExpr(n *Node) { n.Right = typecheck(n.Right, Erv) t := n.Right.Type if t == nil { - goto out + return } // delicate little dance. see typecheckas2 - ls = n.List.Slice() + ls := n.List.Slice() for i1, n1 := range ls { if n1.Name == nil || n1.Name.Defn != n { ls[i1] = typecheck(ls[i1], Erv|Easgn) @@ -49,11 +58,12 @@ func typecheckrange(n *Node) { } n.Type = t - toomany = 0 + var t1, t2 *types.Type + toomany := false switch t.Etype { default: - yyerror("cannot range over %L", n.Right) - goto out + yyerrorl(n.Pos, "cannot range over %L", n.Right) + return case TARRAY, TSLICE: t1 = types.Types[TINT] @@ -65,14 +75,14 @@ func typecheckrange(n *Node) { case TCHAN: if !t.ChanDir().CanRecv() { - yyerror("invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type) - goto out + yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type) + return } t1 = t.Elem() t2 = nil if n.List.Len() == 2 { - toomany = 1 + toomany = true } case TSTRING: @@ -80,15 +90,14 @@ func typecheckrange(n *Node) { t2 = types.Runetype } - if n.List.Len() > 2 || toomany != 0 { - yyerror("too many variables in range") + if n.List.Len() > 2 || toomany { + yyerrorl(n.Pos, "too many variables in range") } - v1 = nil + var v1, v2 *Node if n.List.Len() != 0 { v1 = n.List.First() } - v2 = nil if n.List.Len() > 1 { v2 = n.List.Second() } @@ -104,11 +113,12 @@ func typecheckrange(n *Node) { v2 = nil } + var why string if v1 != nil { if v1.Name != nil && v1.Name.Defn == n { v1.Type = t1 } else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 { - yyerror("cannot assign type %v to %L in range%s", t1, v1, why) + yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why) } checkassign(n, v1) } @@ -117,24 +127,26 @@ func typecheckrange(n *Node) { if v2.Name != nil && v2.Name.Defn == n { v2.Type = t2 } else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 { - yyerror("cannot assign type %v to %L in range%s", t2, v2, why) + yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why) } checkassign(n, v2) } +} - // second half of dance -out: - n.SetTypecheck(1) - ls = n.List.Slice() - for i1, n1 := range ls { - if n1.Typecheck() == 0 { - ls[i1] = typecheck(ls[i1], Erv|Easgn) +func cheapComputableIndex(width int64) bool { + switch thearch.LinkArch.Family { + // MIPS does not have R+R addressing + // Arm64 may lack ability to generate this code in our assembler, + // but the architecture supports it. + case sys.PPC64, sys.S390X: + return width == 1 + case sys.AMD64, sys.I386, sys.ARM64, sys.ARM: + switch width { + case 1, 2, 4, 8: + return true } } - - decldepth++ - typecheckslice(n.Nbody.Slice(), Etop) - decldepth-- + return false } // walkrange transforms various forms of ORANGE into @@ -155,27 +167,36 @@ func walkrange(n *Node) *Node { lno := setlineno(a) n.Right = nil - var v1 *Node - if n.List.Len() != 0 { + var v1, v2 *Node + l := n.List.Len() + if l > 0 { v1 = n.List.First() } - var v2 *Node - if n.List.Len() > 1 && !isblank(n.List.Second()) { + + if l > 1 { v2 = n.List.Second() } + if isblank(v2) { + v2 = nil + } + + if isblank(v1) && v2 == nil { + v1 = nil + } + if v1 == nil && v2 != nil { Fatalf("walkrange: v2 != nil while v1 == nil") } - var ifGuard *Node - - translatedLoopOp := OFOR - // n.List has no meaning anymore, clear it // to avoid erroneous processing by racewalk. n.List.Set(nil) + var ifGuard *Node + + translatedLoopOp := OFOR + var body []*Node var init []*Node switch t.Etype { @@ -193,65 +214,83 @@ func walkrange(n *Node) *Node { hv1 := temp(types.Types[TINT]) hn := temp(types.Types[TINT]) - var hp *Node init = append(init, nod(OAS, hv1, nil)) init = append(init, nod(OAS, hn, nod(OLEN, ha, nil))) - if v2 != nil { - hp = temp(types.NewPtr(n.Type.Elem())) - tmp := nod(OINDEX, ha, nodintconst(0)) - tmp.SetBounded(true) - init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil))) - } - n.Left = nod(OLT, hv1, hn) n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))) - if v1 == nil { - body = nil - } else if v2 == nil { - body = []*Node{nod(OAS, v1, hv1)} - } else { // for i,a := range thing { body } - if objabi.Preemptibleloops_enabled != 0 { - // Doing this transformation makes a bounds check removal less trivial; see #20711 - // TODO enhance the preemption check insertion so that this transformation is not necessary. - ifGuard = nod(OIF, nil, nil) - ifGuard.Left = nod(OLT, hv1, hn) - translatedLoopOp = OFORUNTIL - } + // for range ha { body } + if v1 == nil { + break + } + + // for v1 := range ha { body } + if v2 == nil { + body = []*Node{nod(OAS, v1, hv1)} + break + } + + // for v1, v2 := range ha { body } + if cheapComputableIndex(n.Type.Elem().Width) { + // v1, v2 = hv1, ha[hv1] + tmp := nod(OINDEX, ha, hv1) + tmp.SetBounded(true) + // Use OAS2 to correctly handle assignments + // of the form "v1, a[v1] := range". a := nod(OAS2, nil, nil) a.List.Set2(v1, v2) - a.Rlist.Set2(hv1, nod(OIND, hp, nil)) + a.Rlist.Set2(hv1, tmp) body = []*Node{a} - - // Advance pointer as part of increment. - // We used to advance the pointer before executing the loop body, - // but doing so would make the pointer point past the end of the - // array during the final iteration, possibly causing another unrelated - // piece of memory not to be garbage collected until the loop finished. - // Advancing during the increment ensures that the pointer p only points - // pass the end of the array during the final "p++; i++; if(i >= len(x)) break;", - // after which p is dead, so it cannot confuse the collector. - tmp := nod(OADD, hp, nodintconst(t.Elem().Width)) - - tmp.Type = hp.Type - tmp.SetTypecheck(1) - tmp.Right.Type = types.Types[types.Tptr] - tmp.Right.SetTypecheck(1) - a = nod(OAS, hp, tmp) - a = typecheck(a, Etop) - n.Right.Ninit.Set1(a) + break } + if objabi.Preemptibleloops_enabled != 0 { + // Doing this transformation makes a bounds check removal less trivial; see #20711 + // TODO enhance the preemption check insertion so that this transformation is not necessary. + ifGuard = nod(OIF, nil, nil) + ifGuard.Left = nod(OLT, hv1, hn) + translatedLoopOp = OFORUNTIL + } + + hp := temp(types.NewPtr(n.Type.Elem())) + tmp := nod(OINDEX, ha, nodintconst(0)) + tmp.SetBounded(true) + init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil))) + + // Use OAS2 to correctly handle assignments + // of the form "v1, a[v1] := range". + a := nod(OAS2, nil, nil) + a.List.Set2(v1, v2) + a.Rlist.Set2(hv1, nod(OIND, hp, nil)) + body = append(body, a) + + // Advance pointer as part of increment. + // We used to advance the pointer before executing the loop body, + // but doing so would make the pointer point past the end of the + // array during the final iteration, possibly causing another unrelated + // piece of memory not to be garbage collected until the loop finished. + // Advancing during the increment ensures that the pointer p only points + // pass the end of the array during the final "p++; i++; if(i >= len(x)) break;", + // after which p is dead, so it cannot confuse the collector. + tmp = nod(OADD, hp, nodintconst(t.Elem().Width)) + + tmp.Type = hp.Type + tmp.SetTypecheck(1) + tmp.Right.Type = types.Types[types.Tptr] + tmp.Right.SetTypecheck(1) + a = nod(OAS, hp, tmp) + a = typecheck(a, Etop) + n.Right.Ninit.Set1(a) + case TMAP: // orderstmt allocated the iterator for us. // we only use a once, so no copy needed. ha := a - th := hiter(t) hit := prealloc[n] - hit.Type = th + th := hit.Type n.Left = nil keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter valsym := th.Field(1).Sym // ditto diff --git a/src/cmd/compile/internal/gc/reflect.go b/src/cmd/compile/internal/gc/reflect.go index 47ac5418228..bbb263ee8d3 100644 --- a/src/cmd/compile/internal/gc/reflect.go +++ b/src/cmd/compile/internal/gc/reflect.go @@ -81,6 +81,7 @@ const ( func structfieldSize() int { return 3 * Widthptr } // Sizeof(runtime.structfield{}) func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{}) + func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) if t.Sym == nil && len(methods(t)) == 0 { return 0 @@ -95,7 +96,8 @@ func makefield(name string, t *types.Type) *types.Field { return f } -func mapbucket(t *types.Type) *types.Type { +// bmap makes the map bucket type given the type of the map. +func bmap(t *types.Type) *types.Type { if t.MapType().Bucket != nil { return t.MapType().Bucket } @@ -120,11 +122,13 @@ func mapbucket(t *types.Type) *types.Type { arr = types.NewArray(keytype, BUCKETSIZE) arr.SetNoalg(true) - field = append(field, makefield("keys", arr)) + keys := makefield("keys", arr) + field = append(field, keys) arr = types.NewArray(valtype, BUCKETSIZE) arr.SetNoalg(true) - field = append(field, makefield("values", arr)) + values := makefield("values", arr) + field = append(field, values) // Make sure the overflow pointer is the last memory in the struct, // because the runtime assumes it can use size-ptrSize as the @@ -143,7 +147,7 @@ func mapbucket(t *types.Type) *types.Type { // so if the struct needs 64-bit padding (because a key or value does) // then it would end with an extra 32-bit padding field. // Preempt that by emitting the padding here. - if int(t.Val().Align) > Widthptr || int(t.Key().Align) > Widthptr { + if int(valtype.Align) > Widthptr || int(keytype.Align) > Widthptr { field = append(field, makefield("pad", types.Types[TUINTPTR])) } @@ -154,22 +158,65 @@ func mapbucket(t *types.Type) *types.Type { // the type of the overflow field to uintptr in this case. // See comment on hmap.overflow in ../../../../runtime/hashmap.go. otyp := types.NewPtr(bucket) - if !types.Haspointers(t.Val()) && !types.Haspointers(t.Key()) && t.Val().Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE { + if !types.Haspointers(valtype) && !types.Haspointers(keytype) { otyp = types.Types[TUINTPTR] } - ovf := makefield("overflow", otyp) - field = append(field, ovf) + overflow := makefield("overflow", otyp) + field = append(field, overflow) // link up fields bucket.SetNoalg(true) - bucket.SetLocal(t.Local()) bucket.SetFields(field[:]) dowidth(bucket) + // Check invariants that map code depends on. + if !IsComparable(t.Key()) { + Fatalf("unsupported map key type for %v", t) + } + if BUCKETSIZE < 8 { + Fatalf("bucket size too small for proper alignment") + } + if keytype.Align > BUCKETSIZE { + Fatalf("key align too big for %v", t) + } + if valtype.Align > BUCKETSIZE { + Fatalf("value align too big for %v", t) + } + if keytype.Width > MAXKEYSIZE { + Fatalf("key size to large for %v", t) + } + if valtype.Width > MAXVALSIZE { + Fatalf("value size to large for %v", t) + } + if t.Key().Width > MAXKEYSIZE && !keytype.IsPtr() { + Fatalf("key indirect incorrect for %v", t) + } + if t.Val().Width > MAXVALSIZE && !valtype.IsPtr() { + Fatalf("value indirect incorrect for %v", t) + } + if keytype.Width%int64(keytype.Align) != 0 { + Fatalf("key size not a multiple of key align for %v", t) + } + if valtype.Width%int64(valtype.Align) != 0 { + Fatalf("value size not a multiple of value align for %v", t) + } + if bucket.Align%keytype.Align != 0 { + Fatalf("bucket align not multiple of key align %v", t) + } + if bucket.Align%valtype.Align != 0 { + Fatalf("bucket align not multiple of value align %v", t) + } + if keys.Offset%int64(keytype.Align) != 0 { + Fatalf("bad alignment of keys in bmap for %v", t) + } + if values.Offset%int64(valtype.Align) != 0 { + Fatalf("bad alignment of values in bmap for %v", t) + } + // Double-check that overflow field is final memory in struct, // with no padding at end. See comment above. - if ovf.Offset != bucket.Width-int64(Widthptr) { - Fatalf("bad math in mapbucket for %v", t) + if overflow.Offset != bucket.Width-int64(Widthptr) { + Fatalf("bad offset of overflow in bmap for %v", t) } t.MapType().Bucket = bucket @@ -178,82 +225,114 @@ func mapbucket(t *types.Type) *types.Type { return bucket } -// Builds a type representing a Hmap structure for the given map type. -// Make sure this stays in sync with ../../../../runtime/hashmap.go! +// hmap builds a type representing a Hmap structure for the given map type. +// Make sure this stays in sync with ../../../../runtime/hashmap.go. func hmap(t *types.Type) *types.Type { if t.MapType().Hmap != nil { return t.MapType().Hmap } - bucket := mapbucket(t) + bmap := bmap(t) + + // build a struct: + // type hmap struct { + // count int + // flags uint8 + // B uint8 + // noverflow uint16 + // hash0 uint32 + // buckets *bmap + // oldbuckets *bmap + // nevacuate uintptr + // extra unsafe.Pointer // *mapextra + // } + // must match ../../../../runtime/hashmap.go:hmap. fields := []*types.Field{ makefield("count", types.Types[TINT]), makefield("flags", types.Types[TUINT8]), makefield("B", types.Types[TUINT8]), makefield("noverflow", types.Types[TUINT16]), - makefield("hash0", types.Types[TUINT32]), - makefield("buckets", types.NewPtr(bucket)), - makefield("oldbuckets", types.NewPtr(bucket)), + makefield("hash0", types.Types[TUINT32]), // Used in walk.go for OMAKEMAP. + makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP. + makefield("oldbuckets", types.NewPtr(bmap)), makefield("nevacuate", types.Types[TUINTPTR]), - makefield("overflow", types.Types[TUNSAFEPTR]), + makefield("extra", types.Types[TUNSAFEPTR]), } - h := types.New(TSTRUCT) - h.SetNoalg(true) - h.SetLocal(t.Local()) - h.SetFields(fields) - dowidth(h) - t.MapType().Hmap = h - h.StructType().Map = t - return h + hmap := types.New(TSTRUCT) + hmap.SetNoalg(true) + hmap.SetFields(fields) + dowidth(hmap) + + // The size of hmap should be 48 bytes on 64 bit + // and 28 bytes on 32 bit platforms. + if size := int64(8 + 5*Widthptr); hmap.Width != size { + Fatalf("hmap size not correct: got %d, want %d", hmap.Width, size) + } + + t.MapType().Hmap = hmap + hmap.StructType().Map = t + return hmap } +// hiter builds a type representing an Hiter structure for the given map type. +// Make sure this stays in sync with ../../../../runtime/hashmap.go. func hiter(t *types.Type) *types.Type { if t.MapType().Hiter != nil { return t.MapType().Hiter } + hmap := hmap(t) + bmap := bmap(t) + // build a struct: - // hiter { - // key *Key - // val *Value - // t *MapType - // h *Hmap - // buckets *Bucket - // bptr *Bucket - // overflow0 unsafe.Pointer - // overflow1 unsafe.Pointer + // type hiter struct { + // key *Key + // val *Value + // t unsafe.Pointer // *MapType + // h *hmap + // buckets *bmap + // bptr *bmap + // overflow unsafe.Pointer // *[]*bmap + // oldoverflow unsafe.Pointer // *[]*bmap // startBucket uintptr - // stuff uintptr - // bucket uintptr + // offset uint8 + // wrapped bool + // B uint8 + // i uint8 + // bucket uintptr // checkBucket uintptr // } // must match ../../../../runtime/hashmap.go:hiter. - var field [12]*types.Field - field[0] = makefield("key", types.NewPtr(t.Key())) - field[1] = makefield("val", types.NewPtr(t.Val())) - field[2] = makefield("t", types.NewPtr(types.Types[TUINT8])) - field[3] = makefield("h", types.NewPtr(hmap(t))) - field[4] = makefield("buckets", types.NewPtr(mapbucket(t))) - field[5] = makefield("bptr", types.NewPtr(mapbucket(t))) - field[6] = makefield("overflow0", types.Types[TUNSAFEPTR]) - field[7] = makefield("overflow1", types.Types[TUNSAFEPTR]) - field[8] = makefield("startBucket", types.Types[TUINTPTR]) - field[9] = makefield("stuff", types.Types[TUINTPTR]) // offset+wrapped+B+I - field[10] = makefield("bucket", types.Types[TUINTPTR]) - field[11] = makefield("checkBucket", types.Types[TUINTPTR]) + fields := []*types.Field{ + makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP. + makefield("val", types.NewPtr(t.Val())), // Used in range.go for TMAP. + makefield("t", types.Types[TUNSAFEPTR]), + makefield("h", types.NewPtr(hmap)), + makefield("buckets", types.NewPtr(bmap)), + makefield("bptr", types.NewPtr(bmap)), + makefield("overflow", types.Types[TUNSAFEPTR]), + makefield("oldoverflow", types.Types[TUNSAFEPTR]), + makefield("startBucket", types.Types[TUINTPTR]), + makefield("offset", types.Types[TUINT8]), + makefield("wrapped", types.Types[TBOOL]), + makefield("B", types.Types[TUINT8]), + makefield("i", types.Types[TUINT8]), + makefield("bucket", types.Types[TUINTPTR]), + makefield("checkBucket", types.Types[TUINTPTR]), + } // build iterator struct holding the above fields - i := types.New(TSTRUCT) - i.SetNoalg(true) - i.SetFields(field[:]) - dowidth(i) - if i.Width != int64(12*Widthptr) { - Fatalf("hash_iter size not correct %d %d", i.Width, 12*Widthptr) + hiter := types.New(TSTRUCT) + hiter.SetNoalg(true) + hiter.SetFields(fields) + dowidth(hiter) + if hiter.Width != int64(12*Widthptr) { + Fatalf("hash_iter size not correct %d %d", hiter.Width, 12*Widthptr) } - t.MapType().Hiter = i - i.StructType().Map = t - return i + t.MapType().Hiter = hiter + hiter.StructType().Map = t + return hiter } // f is method type, with receiver. @@ -359,18 +438,18 @@ func methods(t *types.Type) []*Sig { if !sig.isym.Siggen() { sig.isym.SetSiggen(true) if !eqtype(this, it) || this.Width < int64(Widthptr) { - compiling_wrappers = 1 - genwrapper(it, f, sig.isym, 1) - compiling_wrappers = 0 + compiling_wrappers = true + genwrapper(it, f, sig.isym, true) + compiling_wrappers = false } } if !sig.tsym.Siggen() { sig.tsym.SetSiggen(true) if !eqtype(this, t) { - compiling_wrappers = 1 - genwrapper(t, f, sig.tsym, 0) - compiling_wrappers = 0 + compiling_wrappers = true + genwrapper(t, f, sig.tsym, false) + compiling_wrappers = false } } } @@ -421,7 +500,7 @@ func imethods(t *types.Type) []*Sig { isym := methodsym(method, t, false) if !isym.Siggen() { isym.SetSiggen(true) - genwrapper(t, f, isym, 0) + genwrapper(t, f, isym, false) } } @@ -492,32 +571,12 @@ func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int { return dsymptrOff(s, ot, pkg.Pathsym, 0) } -// isExportedField reports whether a struct field is exported. -// It also returns the package to use for PkgPath for an unexported field. -func isExportedField(ft *types.Field) (bool, *types.Pkg) { - if ft.Sym != nil && ft.Embedded == 0 { - return exportname(ft.Sym.Name), ft.Sym.Pkg - } else { - if ft.Type.Sym != nil && - (ft.Type.Sym.Pkg == builtinpkg || !exportname(ft.Type.Sym.Name)) { - return false, ft.Type.Sym.Pkg - } else { - return true, nil - } - } -} - // dnameField dumps a reflect.name for a struct field. func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int { - var name string - if ft.Sym != nil { - name = ft.Sym.Name + if !exportname(ft.Sym.Name) && ft.Sym.Pkg != spkg { + Fatalf("package mismatch for %v", ft.Sym) } - isExported, fpkg := isExportedField(ft) - if isExported || fpkg == spkg { - fpkg = nil - } - nsym := dname(name, ft.Note, fpkg, isExported) + nsym := dname(ft.Sym.Name, ft.Note, nil, exportname(ft.Sym.Name)) return dsymptr(lsym, ot, nsym, 0) } @@ -665,7 +724,7 @@ func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int { nsym := dname(a.name, "", pkg, exported) ot = dsymptrOff(lsym, ot, nsym, 0) - ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype).Linksym()) + ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype)) ot = dmethodptrOff(lsym, ot, a.isym.Linksym()) ot = dmethodptrOff(lsym, ot, a.tsym.Linksym()) } @@ -788,7 +847,7 @@ func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { sizeofAlg := 2 * Widthptr if algarray == nil { - algarray = Sysfunc("algarray") + algarray = sysfunc("algarray") } dowidth(t) alg := algtype(t) @@ -804,7 +863,7 @@ func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { if t.Sym != nil || methods(tptr) != nil { sptrWeak = false } - sptr = dtypesym(tptr).Linksym() + sptr = dtypesym(tptr) } gcsym, useGCProg, ptrdata := dgcsym(t) @@ -901,10 +960,17 @@ func dcommontype(lsym *obj.LSym, ot int, t *types.Type) int { return ot } +// typeHasNoAlg returns whether t does not have any associated hash/eq +// algorithms because t, or some component of t, is marked Noalg. +func typeHasNoAlg(t *types.Type) bool { + a, bad := algtype1(t) + return a == ANOEQ && bad.Noalg() +} + func typesymname(t *types.Type) string { name := t.ShortString() // Use a separate symbol name for Noalg types for #17752. - if a, bad := algtype1(t); a == ANOEQ && bad.Noalg() { + if typeHasNoAlg(t) { name = "noalg." + name } return name @@ -1079,15 +1145,16 @@ func formalType(t *types.Type) *types.Type { return t } -func dtypesym(t *types.Type) *types.Sym { +func dtypesym(t *types.Type) *obj.LSym { t = formalType(t) if t.IsUntyped() { Fatalf("dtypesym %v", t) } s := typesym(t) + lsym := s.Linksym() if s.Siggen() { - return s + return lsym } s.SetSiggen(true) @@ -1104,21 +1171,18 @@ func dtypesym(t *types.Type) *types.Sym { dupok = obj.DUPOK } - if myimportpath == "runtime" && (tbase == types.Types[tbase.Etype] || tbase == types.Bytetype || tbase == types.Runetype || tbase == types.Errortype) { // int, float, etc - goto ok + if myimportpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc + // named types from other files are defined only by those files + if tbase.Sym != nil && tbase.Sym.Pkg != localpkg { + return lsym + } + // TODO(mdempsky): Investigate whether this can happen. + if isforw[tbase.Etype] { + return lsym + } } - // named types from other files are defined only by those files - if tbase.Sym != nil && !tbase.Local() { - return s - } - if isforw[tbase.Etype] { - return s - } - -ok: ot := 0 - lsym := s.Linksym() switch t.Etype { default: ot = dcommontype(lsym, ot, t) @@ -1130,8 +1194,8 @@ ok: t2 := types.NewSlice(t.Elem()) s2 := dtypesym(t2) ot = dcommontype(lsym, ot, t) - ot = dsymptr(lsym, ot, s1.Linksym(), 0) - ot = dsymptr(lsym, ot, s2.Linksym(), 0) + ot = dsymptr(lsym, ot, s1, 0) + ot = dsymptr(lsym, ot, s2, 0) ot = duintptr(lsym, ot, uint64(t.NumElem())) ot = dextratype(lsym, ot, t, 0) @@ -1139,14 +1203,14 @@ ok: // ../../../../runtime/type.go:/sliceType s1 := dtypesym(t.Elem()) ot = dcommontype(lsym, ot, t) - ot = dsymptr(lsym, ot, s1.Linksym(), 0) + ot = dsymptr(lsym, ot, s1, 0) ot = dextratype(lsym, ot, t, 0) case TCHAN: // ../../../../runtime/type.go:/chanType s1 := dtypesym(t.Elem()) ot = dcommontype(lsym, ot, t) - ot = dsymptr(lsym, ot, s1.Linksym(), 0) + ot = dsymptr(lsym, ot, s1, 0) ot = duintptr(lsym, ot, uint64(t.ChanDir())) ot = dextratype(lsym, ot, t, 0) @@ -1164,8 +1228,8 @@ ok: } ot = dcommontype(lsym, ot, t) - inCount := t.Recvs().NumFields() + t.Params().NumFields() - outCount := t.Results().NumFields() + inCount := t.NumRecvs() + t.NumParams() + outCount := t.NumResults() if isddd { outCount |= 1 << 15 } @@ -1175,18 +1239,18 @@ ok: ot += 4 // align for *rtype } - dataAdd := (inCount + t.Results().NumFields()) * Widthptr + dataAdd := (inCount + t.NumResults()) * Widthptr ot = dextratype(lsym, ot, t, dataAdd) // Array of rtype pointers follows funcType. for _, t1 := range t.Recvs().Fields().Slice() { - ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) + ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) } for _, t1 := range t.Params().Fields().Slice() { - ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) + ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) } for _, t1 := range t.Results().Fields().Slice() { - ot = dsymptr(lsym, ot, dtypesym(t1.Type).Linksym(), 0) + ot = dsymptr(lsym, ot, dtypesym(t1.Type), 0) } case TINTER: @@ -1221,20 +1285,20 @@ ok: nsym := dname(a.name, "", pkg, exported) ot = dsymptrOff(lsym, ot, nsym, 0) - ot = dsymptrOff(lsym, ot, dtypesym(a.type_).Linksym(), 0) + ot = dsymptrOff(lsym, ot, dtypesym(a.type_), 0) } // ../../../../runtime/type.go:/mapType case TMAP: s1 := dtypesym(t.Key()) s2 := dtypesym(t.Val()) - s3 := dtypesym(mapbucket(t)) + s3 := dtypesym(bmap(t)) s4 := dtypesym(hmap(t)) ot = dcommontype(lsym, ot, t) - ot = dsymptr(lsym, ot, s1.Linksym(), 0) - ot = dsymptr(lsym, ot, s2.Linksym(), 0) - ot = dsymptr(lsym, ot, s3.Linksym(), 0) - ot = dsymptr(lsym, ot, s4.Linksym(), 0) + ot = dsymptr(lsym, ot, s1, 0) + ot = dsymptr(lsym, ot, s2, 0) + ot = dsymptr(lsym, ot, s3, 0) + ot = dsymptr(lsym, ot, s4, 0) if t.Key().Width > MAXKEYSIZE { ot = duint8(lsym, ot, uint8(Widthptr)) ot = duint8(lsym, ot, 1) // indirect @@ -1251,7 +1315,7 @@ ok: ot = duint8(lsym, ot, 0) // not indirect } - ot = duint16(lsym, ot, uint16(mapbucket(t).Width)) + ot = duint16(lsym, ot, uint16(bmap(t).Width)) ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key())))) ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key())))) ot = dextratype(lsym, ot, t, 0) @@ -1269,12 +1333,13 @@ ok: s1 := dtypesym(t.Elem()) ot = dcommontype(lsym, ot, t) - ot = dsymptr(lsym, ot, s1.Linksym(), 0) + ot = dsymptr(lsym, ot, s1, 0) ot = dextratype(lsym, ot, t, 0) // ../../../../runtime/type.go:/structType // for security, only the exported fields. case TSTRUCT: + fields := t.Fields().Slice() // omitFieldForAwfulBoringCryptoKludge reports whether // the field t should be omitted from the reflect data. @@ -1294,46 +1359,44 @@ ok: } return strings.HasPrefix(path, "crypto/") } - - n := 0 - - for _, t1 := range t.Fields().Slice() { - if omitFieldForAwfulBoringCryptoKludge(t1) { - continue + newFields := fields[:0:0] + for _, t1 := range fields { + if !omitFieldForAwfulBoringCryptoKludge(t1) { + newFields = append(newFields, t1) } + } + fields = newFields + + for _, t1 := range fields { dtypesym(t1.Type) - n++ } - ot = dcommontype(lsym, ot, t) - pkg := localpkg - if t.Sym != nil { - pkg = t.Sym.Pkg - } else { - // Unnamed type. Grab the package from the first field, if any. - for _, f := range t.Fields().Slice() { - if f.Embedded != 0 { - continue - } - pkg = f.Sym.Pkg + // All non-exported struct field names within a struct + // type must originate from a single package. By + // identifying and recording that package within the + // struct type descriptor, we can omit that + // information from the field descriptors. + var spkg *types.Pkg + for _, f := range fields { + if !exportname(f.Sym.Name) { + spkg = f.Sym.Pkg break } } - ot = dgopkgpath(lsym, ot, pkg) - ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) - ot = duintptr(lsym, ot, uint64(n)) - ot = duintptr(lsym, ot, uint64(n)) - dataAdd := n * structfieldSize() + ot = dcommontype(lsym, ot, t) + ot = dgopkgpath(lsym, ot, spkg) + ot = dsymptr(lsym, ot, lsym, ot+3*Widthptr+uncommonSize(t)) + ot = duintptr(lsym, ot, uint64(len(fields))) + ot = duintptr(lsym, ot, uint64(len(fields))) + + dataAdd := len(fields) * structfieldSize() ot = dextratype(lsym, ot, t, dataAdd) - for _, f := range t.Fields().Slice() { - if omitFieldForAwfulBoringCryptoKludge(f) { - continue - } + for _, f := range fields { // ../../../../runtime/type.go:/structField - ot = dnameField(lsym, ot, pkg, f) - ot = dsymptr(lsym, ot, dtypesym(f.Type).Linksym(), 0) + ot = dnameField(lsym, ot, spkg, f) + ot = dsymptr(lsym, ot, dtypesym(f.Type), 0) offsetAnon := uint64(f.Offset) << 1 if offsetAnon>>1 != uint64(f.Offset) { Fatalf("%v: bad field offset for %s", t, f.Sym.Name) @@ -1365,9 +1428,13 @@ ok: keep = true } } + // Do not put Noalg types in typelinks. See issue #22605. + if typeHasNoAlg(t) { + keep = false + } lsym.Set(obj.AttrMakeTypelink, keep) - return s + return lsym } // for each itabEntry, gather the methods on @@ -1434,7 +1501,7 @@ func itabsym(it *obj.LSym, offset int64) *obj.LSym { } // keep this arithmetic in sync with *itab layout - methodnum := int((offset - 3*int64(Widthptr) - 8) / int64(Widthptr)) + methodnum := int((offset - 2*int64(Widthptr) - 8) / int64(Widthptr)) if methodnum >= len(syms) { return nil } @@ -1483,23 +1550,19 @@ func dumptabs() { // type itab struct { // inter *interfacetype // _type *_type - // link *itab // hash uint32 - // bad bool - // inhash bool - // unused [2]byte + // _ [4]byte // fun [1]uintptr // variable sized // } - o := dsymptr(i.lsym, 0, dtypesym(i.itype).Linksym(), 0) - o = dsymptr(i.lsym, o, dtypesym(i.t).Linksym(), 0) - o += Widthptr // skip link field - o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash - o += 4 // skip bad/inhash/unused fields - o += len(imethods(i.itype)) * Widthptr // skip fun method pointers - // at runtime the itab will contain pointers to types, other itabs and - // method functions. None are allocated on heap, so we can use obj.NOPTR. - ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.NOPTR)) - + o := dsymptr(i.lsym, 0, dtypesym(i.itype), 0) + o = dsymptr(i.lsym, o, dtypesym(i.t), 0) + o = duint32(i.lsym, o, typehash(i.t)) // copy of type hash + o += 4 // skip unused field + for _, fn := range genfun(i.t, i.itype) { + o = dsymptr(i.lsym, o, fn, 0) // method pointer for each method + } + // Nothing writes static itabs, so they are read only. + ggloblsym(i.lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) ilink := itablinkpkg.Lookup(i.t.ShortString() + "," + i.itype.ShortString()).Linksym() dsymptr(ilink, 0, i.lsym, 0) ggloblsym(ilink, int32(Widthptr), int16(obj.DUPOK|obj.RODATA)) @@ -1518,7 +1581,7 @@ func dumptabs() { // } nsym := dname(p.s.Name, "", nil, true) ot = dsymptrOff(s, ot, nsym, 0) - ot = dsymptrOff(s, ot, dtypesym(p.t).Linksym(), 0) + ot = dsymptrOff(s, ot, dtypesym(p.t), 0) } ggloblsym(s, int32(ot), int16(obj.RODATA)) @@ -1612,8 +1675,8 @@ func dalgsym(t *types.Type) *obj.LSym { s.SetAlgGen(true) if memhashvarlen == nil { - memhashvarlen = Sysfunc("memhash_varlen") - memequalvarlen = Sysfunc("memequal_varlen") + memhashvarlen = sysfunc("memhash_varlen") + memequalvarlen = sysfunc("memequal_varlen") } // make hash closure @@ -1743,8 +1806,7 @@ func fillptrmask(t *types.Type, ptrmask []byte) { } vec := bvalloc(8 * int32(len(ptrmask))) - xoffset := int64(0) - onebitwalktype1(t, &xoffset, vec) + onebitwalktype1(t, 0, vec) nptr := typeptrdata(t) / int64(Widthptr) for i := int64(0); i < nptr; i++ { diff --git a/src/cmd/compile/internal/gc/scope.go b/src/cmd/compile/internal/gc/scope.go index b0bc7f69086..ebdaa19994a 100644 --- a/src/cmd/compile/internal/gc/scope.go +++ b/src/cmd/compile/internal/gc/scope.go @@ -168,7 +168,7 @@ func (v varsByScopeAndOffset) Less(i, j int) bool { if v.scopes[i] != v.scopes[j] { return v.scopes[i] < v.scopes[j] } - return v.vars[i].Offset < v.vars[j].Offset + return v.vars[i].StackOffset < v.vars[j].StackOffset } func (v varsByScopeAndOffset) Swap(i, j int) { diff --git a/src/cmd/compile/internal/gc/scope_test.go b/src/cmd/compile/internal/gc/scope_test.go index 9113afe279b..5d44b7a4f4f 100644 --- a/src/cmd/compile/internal/gc/scope_test.go +++ b/src/cmd/compile/internal/gc/scope_test.go @@ -173,6 +173,18 @@ var testfile = []testline{ {line: " fi(p)", scopes: []int{1}}, {line: " }"}, {line: "}"}, + {line: "func TestCaptureVar(flag bool) func() int {"}, + {line: " a := 1", vars: []string{"arg flag bool", "arg ~r1 func() int", "var a int"}}, + {line: " if flag {"}, + {line: " b := 2", scopes: []int{1}, vars: []string{"var b int", "var f func() int"}}, + {line: " f := func() int {", scopes: []int{1, 0}}, + {line: " return b + 1"}, + {line: " }"}, + {line: " return f", scopes: []int{1}}, + {line: " }"}, + {line: " f1(a)"}, + {line: " return nil"}, + {line: "}"}, {line: "func main() {"}, {line: " TestNestedFor()"}, {line: " TestOas2()"}, @@ -184,6 +196,7 @@ var testfile = []testline{ {line: " TestDiscontiguousRanges()"}, {line: " TestClosureScope()"}, {line: " TestEscape()"}, + {line: " TestCaptureVar(true)"}, {line: "}"}, } diff --git a/src/cmd/compile/internal/gc/select.go b/src/cmd/compile/internal/gc/select.go index 320cd9a47ef..38eaaccfd27 100644 --- a/src/cmd/compile/internal/gc/select.go +++ b/src/cmd/compile/internal/gc/select.go @@ -8,39 +8,32 @@ import "cmd/compile/internal/types" // select func typecheckselect(sel *Node) { - var ncase *Node - var n *Node - var def *Node lno := setlineno(sel) - count := 0 typecheckslice(sel.Ninit.Slice(), Etop) - for _, n1 := range sel.List.Slice() { - count++ - ncase = n1 - setlineno(ncase) + for _, ncase := range sel.List.Slice() { if ncase.Op != OXCASE { + setlineno(ncase) Fatalf("typecheckselect %v", ncase.Op) } if ncase.List.Len() == 0 { // default if def != nil { - yyerror("multiple defaults in select (first at %v)", def.Line()) + yyerrorl(ncase.Pos, "multiple defaults in select (first at %v)", def.Line()) } else { def = ncase } } else if ncase.List.Len() > 1 { - yyerror("select cases cannot be lists") + yyerrorl(ncase.Pos, "select cases cannot be lists") } else { ncase.List.SetFirst(typecheck(ncase.List.First(), Etop)) - n = ncase.List.First() + n := ncase.List.First() ncase.Left = n ncase.List.Set(nil) - setlineno(n) switch n.Op { default: - yyerror("select case must be receive, send or assign recv") + yyerrorl(n.Pos, "select case must be receive, send or assign recv") // convert x = <-c into OSELRECV(x, <-c). // remove implicit conversions; the eventual assignment @@ -51,7 +44,7 @@ func typecheckselect(sel *Node) { } if n.Right.Op != ORECV { - yyerror("select assignment must have receive on right hand side") + yyerrorl(n.Pos, "select assignment must have receive on right hand side") break } @@ -60,7 +53,7 @@ func typecheckselect(sel *Node) { // convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok case OAS2RECV: if n.Rlist.First().Op != ORECV { - yyerror("select assignment must have receive on right hand side") + yyerrorl(n.Pos, "select assignment must have receive on right hand side") break } @@ -72,7 +65,7 @@ func typecheckselect(sel *Node) { // convert <-c into OSELRECV(N, <-c) case ORECV: - n = nod(OSELRECV, nil, n) + n = nodl(n.Pos, OSELRECV, nil, n) n.SetTypecheck(1) ncase.Left = n @@ -85,35 +78,41 @@ func typecheckselect(sel *Node) { typecheckslice(ncase.Nbody.Slice(), Etop) } - sel.Xoffset = int64(count) lineno = lno } func walkselect(sel *Node) { - if sel.List.Len() == 0 && sel.Xoffset != 0 { - Fatalf("double walkselect") // already rewrote + lno := setlineno(sel) + if sel.Nbody.Len() != 0 { + Fatalf("double walkselect") } - lno := setlineno(sel) - i := sel.List.Len() + init := sel.Ninit.Slice() + sel.Ninit.Set(nil) + + init = append(init, walkselectcases(&sel.List)...) + sel.List.Set(nil) + + sel.Nbody.Set(init) + walkstmtlist(sel.Nbody.Slice()) + + lineno = lno +} + +func walkselectcases(cases *Nodes) []*Node { + n := cases.Len() + sellineno := lineno // optimization: zero-case select - var init []*Node - var r *Node - var n *Node - var var_ *Node - var selv *Node - var chosen *Node - if i == 0 { - sel.Nbody.Set1(mkcall("block", nil, nil)) - goto out + if n == 0 { + return []*Node{mkcall("block", nil, nil)} } // optimization: one-case select: single op. // TODO(rsc): Reenable optimization once order.go can handle it. // golang.org/issue/7672. - if i == 1 { - cas := sel.List.First() + if n == 1 { + cas := cases.First() setlineno(cas) l := cas.Ninit.Slice() if cas.Left != nil { // not default: @@ -163,21 +162,19 @@ func walkselect(sel *Node) { a.Nbody.Set1(mkcall("block", nil, &ln)) l = ln.Slice() a = typecheck(a, Etop) - l = append(l, a) - l = append(l, n) + l = append(l, a, n) } l = append(l, cas.Nbody.Slice()...) l = append(l, nod(OBREAK, nil, nil)) - sel.Nbody.Set(l) - goto out + return l } // convert case value arguments to addresses. // this rewrite is used by both the general code and the next optimization. - for _, cas := range sel.List.Slice() { + for _, cas := range cases.Slice() { setlineno(cas) - n = cas.Left + n := cas.Left if n == nil { continue } @@ -205,15 +202,15 @@ func walkselect(sel *Node) { } // optimization: two-case select but one is default: single non-blocking op. - if i == 2 && (sel.List.First().Left == nil || sel.List.Second().Left == nil) { + if n == 2 && (cases.First().Left == nil || cases.Second().Left == nil) { var cas *Node var dflt *Node - if sel.List.First().Left == nil { - cas = sel.List.Second() - dflt = sel.List.First() + if cases.First().Left == nil { + cas = cases.Second() + dflt = cases.First() } else { - dflt = sel.List.Second() - cas = sel.List.First() + dflt = cases.Second() + cas = cases.First() } n := cas.Left @@ -247,26 +244,24 @@ func walkselect(sel *Node) { r.Left = typecheck(r.Left, Erv) r.Nbody.Set(cas.Nbody.Slice()) r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...)) - sel.Nbody.Set2(r, nod(OBREAK, nil, nil)) - goto out + return []*Node{r, nod(OBREAK, nil, nil)} } - init = sel.Ninit.Slice() - sel.Ninit.Set(nil) + var init []*Node // generate sel-struct - setlineno(sel) - selv = temp(selecttype(sel.Xoffset)) - r = nod(OAS, selv, nil) + lineno = sellineno + selv := temp(selecttype(int64(n))) + r := nod(OAS, selv, nil) r = typecheck(r, Etop) init = append(init, r) - var_ = conv(conv(nod(OADDR, selv, nil), types.Types[TUNSAFEPTR]), types.NewPtr(types.Types[TUINT8])) - r = mkcall("newselect", nil, nil, var_, nodintconst(selv.Type.Width), nodintconst(sel.Xoffset)) + var_ := conv(conv(nod(OADDR, selv, nil), types.Types[TUNSAFEPTR]), types.NewPtr(types.Types[TUINT8])) + r = mkcall("newselect", nil, nil, var_, nodintconst(selv.Type.Width), nodintconst(int64(n))) r = typecheck(r, Etop) init = append(init, r) // register cases - for _, cas := range sel.List.Slice() { + for _, cas := range cases.Slice() { setlineno(cas) init = append(init, cas.Ninit.Slice()...) @@ -298,8 +293,8 @@ func walkselect(sel *Node) { } // run the select - setlineno(sel) - chosen = temp(types.Types[TINT]) + lineno = sellineno + chosen := temp(types.Types[TINT]) r = nod(OAS, chosen, mkcall("selectgo", types.Types[TINT], nil, var_)) r = typecheck(r, Etop) init = append(init, r) @@ -308,7 +303,7 @@ func walkselect(sel *Node) { init = append(init, nod(OVARKILL, selv, nil)) // dispatch cases - for i, cas := range sel.List.Slice() { + for i, cas := range cases.Slice() { setlineno(cas) cond := nod(OEQ, chosen, nodintconst(int64(i))) @@ -320,12 +315,7 @@ func walkselect(sel *Node) { init = append(init, r) } - sel.Nbody.Set(init) - -out: - sel.List.Set(nil) - walkstmtlist(sel.Nbody.Slice()) - lineno = lno + return init } // Keep in sync with src/runtime/select.go. @@ -342,7 +332,6 @@ func selecttype(size int64) *types.Type { namedfield("releasetime", types.Types[TUINT64]), }) scase.SetNoalg(true) - scase.SetLocal(true) sel := tostruct([]*Node{ namedfield("tcase", types.Types[TUINT16]), @@ -354,7 +343,6 @@ func selecttype(size int64) *types.Type { namedfield("pollorderarr", types.NewArray(types.Types[TUINT16], size)), }) sel.SetNoalg(true) - sel.SetLocal(true) return sel } diff --git a/src/cmd/compile/internal/gc/sinit.go b/src/cmd/compile/internal/gc/sinit.go index 613cdf6e74f..3af2460a802 100644 --- a/src/cmd/compile/internal/gc/sinit.go +++ b/src/cmd/compile/internal/gc/sinit.go @@ -44,7 +44,7 @@ func init1(n *Node, out *[]*Node) { init1(n1, out) } - if n.Left != nil && n.Type != nil && n.Left.Op == OTYPE && n.Class() == PFUNC { + if n.isMethodExpression() { // Methods called as Type.Method(receiver, ...). // Definitions for method expressions are stored in type->nname. init1(asNode(n.Type.FuncType().Nname), out) @@ -157,7 +157,6 @@ func init1(n *Node, out *[]*Node) { initlist = initlist[:last] n.SetInitorder(InitDone) - return } // foundinitloop prints an init loop error and exits. @@ -214,10 +213,10 @@ func init2(n *Node, out *[]*Node) { init2list(n.Rlist, out) init2list(n.Nbody, out) - if n.Op == OCLOSURE { + switch n.Op { + case OCLOSURE: init2list(n.Func.Closure.Nbody, out) - } - if n.Op == ODOTMETH || n.Op == OCALLPART { + case ODOTMETH, OCALLPART: init2(asNode(n.Type.FuncType().Nname), out) } } @@ -229,8 +228,7 @@ func init2list(l Nodes, out *[]*Node) { } func initreorder(l []*Node, out *[]*Node) { - var n *Node - for _, n = range l { + for _, n := range l { switch n.Op { case ODCLFUNC, ODCLCONST, ODCLTYPE: continue @@ -480,9 +478,8 @@ func staticassign(l *Node, r *Node, out *[]*Node) bool { n := *l gdata(&n, r.Func.Closure.Func.Nname, Widthptr) return true - } else { - closuredebugruntimecheck(r) } + closuredebugruntimecheck(r) case OCONVIFACE: // This logic is mirrored in isStaticCompositeLiteral. @@ -885,11 +882,10 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { // put dynamics into array (5) var index int64 - for _, r := range n.List.Slice() { - value := r - if r.Op == OKEY { - index = nonnegintconst(r.Left) - value = r.Right + for _, value := range n.List.Slice() { + if value.Op == OKEY { + index = nonnegintconst(value.Left) + value = value.Right } a := nod(OINDEX, vauto, nodintconst(index)) a.SetBounded(true) @@ -932,6 +928,7 @@ func slicelit(ctxt initContext, n *Node, var_ *Node, init *Nodes) { func maplit(n *Node, m *Node, init *Nodes) { // make the map var a := nod(OMAKE, nil, nil) + a.Esc = n.Esc a.List.Set2(typenod(n.Type), nodintconst(int64(n.List.Len()))) litas(m, a, init) @@ -941,7 +938,7 @@ func maplit(n *Node, m *Node, init *Nodes) { if r.Op != OKEY { Fatalf("maplit: rhs not OKEY: %v", r) } - if isliteral(r.Left) && isliteral(r.Right) { + if isStaticCompositeLiteral(r.Left) && isStaticCompositeLiteral(r.Right) { stat = append(stat, r) } else { dyn = append(dyn, r) @@ -966,24 +963,14 @@ func maplit(n *Node, m *Node, init *Nodes) { vstatv := staticname(tv) vstatv.Name.SetReadonly(true) - for i, r := range stat { - index := r.Left - value := r.Right - - // build vstatk[b] = index - setlineno(index) - lhs := nod(OINDEX, vstatk, nodintconst(int64(i))) - as := nod(OAS, lhs, index) - as = typecheck(as, Etop) - genAsStatic(as) - - // build vstatv[b] = value - setlineno(value) - lhs = nod(OINDEX, vstatv, nodintconst(int64(i))) - as = nod(OAS, lhs, value) - as = typecheck(as, Etop) - genAsStatic(as) + datak := nod(OARRAYLIT, nil, nil) + datav := nod(OARRAYLIT, nil, nil) + for _, r := range stat { + datak.List.Append(r.Left) + datav.List.Append(r.Right) } + fixedlit(inInitFunction, initKindStatic, datak, vstatk, init) + fixedlit(inInitFunction, initKindStatic, datav, vstatv, init) // loop adding structure elements to map // for i = 0; i < len(vstatk); i++ { diff --git a/src/cmd/compile/internal/gc/sizeof_test.go b/src/cmd/compile/internal/gc/sizeof_test.go index 1ca0a615353..48d357a0b03 100644 --- a/src/cmd/compile/internal/gc/sizeof_test.go +++ b/src/cmd/compile/internal/gc/sizeof_test.go @@ -22,7 +22,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {Func{}, 124, 216}, + {Func{}, 132, 232}, {Name{}, 36, 56}, {Param{}, 28, 56}, {Node{}, 76, 128}, diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 63e9622983c..fe062da4095 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -37,6 +37,7 @@ func initssaconfig() { Float32: types.Types[TFLOAT32], Float64: types.Types[TFLOAT64], Int: types.Types[TINT], + UInt: types.Types[TUINT], Uintptr: types.Types[TUINTPTR], String: types.Types[TSTRING], BytePtr: types.NewPtr(types.Types[TUINT8]), @@ -48,6 +49,11 @@ func initssaconfig() { Float64Ptr: types.NewPtr(types.Types[TFLOAT64]), BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])), } + + if thearch.SoftFloat { + softfloatInit() + } + // Generate a few pointer types that are uncommon in the frontend but common in the backend. // Caching is disabled in the backend, so generating these here avoids allocations. _ = types.NewPtr(types.Types[TINTER]) // *interface{} @@ -67,35 +73,37 @@ func initssaconfig() { if thearch.LinkArch.Name == "386" { ssaConfig.Set387(thearch.Use387) } + ssaConfig.SoftFloat = thearch.SoftFloat ssaCaches = make([]ssa.Cache, nBackendWorkers) // Set up some runtime functions we'll need to call. - Newproc = Sysfunc("newproc") - Deferproc = Sysfunc("deferproc") - Deferreturn = Sysfunc("deferreturn") - Duffcopy = Sysfunc("duffcopy") - Duffzero = Sysfunc("duffzero") - panicindex = Sysfunc("panicindex") - panicslice = Sysfunc("panicslice") - panicdivide = Sysfunc("panicdivide") - growslice = Sysfunc("growslice") - panicdottypeE = Sysfunc("panicdottypeE") - panicdottypeI = Sysfunc("panicdottypeI") - panicnildottype = Sysfunc("panicnildottype") - assertE2I = Sysfunc("assertE2I") - assertE2I2 = Sysfunc("assertE2I2") - assertI2I = Sysfunc("assertI2I") - assertI2I2 = Sysfunc("assertI2I2") - goschedguarded = Sysfunc("goschedguarded") - writeBarrier = Sysfunc("writeBarrier") - writebarrierptr = Sysfunc("writebarrierptr") - typedmemmove = Sysfunc("typedmemmove") - typedmemclr = Sysfunc("typedmemclr") - Udiv = Sysfunc("udiv") + Newproc = sysfunc("newproc") + Deferproc = sysfunc("deferproc") + Deferreturn = sysfunc("deferreturn") + Duffcopy = sysfunc("duffcopy") + Duffzero = sysfunc("duffzero") + panicindex = sysfunc("panicindex") + panicslice = sysfunc("panicslice") + panicdivide = sysfunc("panicdivide") + growslice = sysfunc("growslice") + panicdottypeE = sysfunc("panicdottypeE") + panicdottypeI = sysfunc("panicdottypeI") + panicnildottype = sysfunc("panicnildottype") + assertE2I = sysfunc("assertE2I") + assertE2I2 = sysfunc("assertE2I2") + assertI2I = sysfunc("assertI2I") + assertI2I2 = sysfunc("assertI2I2") + goschedguarded = sysfunc("goschedguarded") + writeBarrier = sysfunc("writeBarrier") + writebarrierptr = sysfunc("writebarrierptr") + gcWriteBarrier = sysfunc("gcWriteBarrier") + typedmemmove = sysfunc("typedmemmove") + typedmemclr = sysfunc("typedmemclr") + Udiv = sysfunc("udiv") // GO386=387 runtime functions - ControlWord64trunc = Sysfunc("controlWord64trunc") - ControlWord32 = Sysfunc("controlWord32") + ControlWord64trunc = sysfunc("controlWord64trunc") + ControlWord32 = sysfunc("controlWord32") } // buildssa builds an SSA function for fn. @@ -135,13 +143,9 @@ func buildssa(fn *Node, worker int) *ssa.Func { if fn.Func.Pragma&Nosplit != 0 { s.f.NoSplit = true } - defer func() { - if s.f.WBPos.IsKnown() { - fn.Func.WBPos = s.f.WBPos - } - }() s.exitCode = fn.Func.Exit s.panics = map[funcLine]*ssa.Block{} + s.softFloat = s.config.SoftFloat if name == os.Getenv("GOSSAFUNC") { s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name) @@ -162,15 +166,12 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.startBlock(s.f.Entry) s.vars[&memVar] = s.startmem - s.varsyms = map[*Node]interface{}{} - // Generate addresses of local declarations s.decladdrs = map[*Node]*ssa.Value{} for _, n := range fn.Func.Dcl { switch n.Class() { case PPARAM, PPARAMOUT: - aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) - s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sp) + s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), n, s.sp) if n.Class() == PPARAMOUT && s.canSSA(n) { // Save ssa-able PPARAMOUT variables so we can // store them back to the stack at the end of @@ -185,7 +186,7 @@ func buildssa(fn *Node, worker int) *ssa.Func { case PFUNC: // local function - already handled by frontend default: - s.Fatalf("local variable with class %s unimplemented", classnames[n.Class()]) + s.Fatalf("local variable with class %v unimplemented", n.Class()) } } @@ -207,6 +208,12 @@ func buildssa(fn *Node, worker int) *ssa.Func { s.popLine() } + for _, b := range s.f.Blocks { + if b.Pos != src.NoXPos { + s.updateUnsetPredPos(b) + } + } + s.insertPhis() // Don't carry reference this around longer than necessary @@ -217,6 +224,39 @@ func buildssa(fn *Node, worker int) *ssa.Func { return s.f } +// updateUnsetPredPos propagates the earliest-value position information for b +// towards all of b's predecessors that need a position, and recurs on that +// predecessor if its position is updated. B should have a non-empty position. +func (s *state) updateUnsetPredPos(b *ssa.Block) { + if b.Pos == src.NoXPos { + s.Fatalf("Block %s should have a position", b) + } + bestPos := src.NoXPos + for _, e := range b.Preds { + p := e.Block() + if !p.LackingPos() { + continue + } + if bestPos == src.NoXPos { + bestPos = b.Pos + for _, v := range b.Values { + if v.LackingPos() { + continue + } + if v.Pos != src.NoXPos { + // Assume values are still in roughly textual order; + // TODO: could also seek minimum position? + bestPos = v.Pos + break + } + } + } + p.Pos = bestPos + s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay. + } + return +} + type state struct { // configuration (arch) information config *ssa.Config @@ -258,9 +298,6 @@ type state struct { // addresses of PPARAM and PPARAMOUT variables. decladdrs map[*Node]*ssa.Value - // symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused. - varsyms map[*Node]interface{} - // starting values. Memory, stack pointer, and globals pointer startmem *ssa.Value sp *ssa.Value @@ -268,6 +305,8 @@ type state struct { // line number stack. The current line number is top of stack line []src.XPos + // the last line number processed; it may have been popped + lastPos src.XPos // list of panic calls by function name and line number. // Used to deduplicate panic calls. @@ -278,6 +317,7 @@ type state struct { cgoUnsafeArgs bool hasdefer bool // whether the function contains a defer statement + softFloat bool } type funcLine struct { @@ -349,7 +389,14 @@ func (s *state) endBlock() *ssa.Block { s.defvars[b.ID] = s.vars s.curBlock = nil s.vars = nil - b.Pos = s.peekPos() + if b.LackingPos() { + // Empty plain blocks get the line of their successor (handled after all blocks created), + // except for increment blocks in For statements (handled in ssa conversion of OFOR), + // and for blocks ending in GOTO/BREAK/CONTINUE. + b.Pos = src.NoXPos + } else { + b.Pos = s.lastPos + } return b } @@ -362,7 +409,10 @@ func (s *state) pushLine(line src.XPos) { if Debug['K'] != 0 { Warn("buildssa: unknown position (line 0)") } + } else { + s.lastPos = line } + s.line = append(s.line, line) } @@ -443,27 +493,27 @@ func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value { // entryNewValue0A adds a new value with no arguments and an aux value to the entry block. func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value { - return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux) + return s.f.Entry.NewValue0A(src.NoXPos, op, t, aux) } // entryNewValue1 adds a new value with one argument to the entry block. func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue1(s.peekPos(), op, t, arg) + return s.f.Entry.NewValue1(src.NoXPos, op, t, arg) } // entryNewValue1 adds a new value with one argument and an auxint value to the entry block. func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg) + return s.f.Entry.NewValue1I(src.NoXPos, op, t, auxint, arg) } // entryNewValue1A adds a new value with one argument and an aux value to the entry block. func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg) + return s.f.Entry.NewValue1A(src.NoXPos, op, t, aux, arg) } // entryNewValue2 adds a new value with two arguments to the entry block. func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { - return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1) + return s.f.Entry.NewValue2(src.NoXPos, op, t, arg0, arg1) } // const* routines add a new const value to the entry block. @@ -511,6 +561,25 @@ func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value { return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp) } +// newValueOrSfCall* are wrappers around newValue*, which may create a call to a +// soft-float runtime function instead (when emitting soft-float code). +func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value { + if s.softFloat { + if c, ok := s.sfcall(op, arg); ok { + return c + } + } + return s.newValue1(op, t, arg) +} +func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value { + if s.softFloat { + if c, ok := s.sfcall(op, arg0, arg1); ok { + return c + } + } + return s.newValue2(op, t, arg0, arg1) +} + // stmtList converts the statement list n to SSA and adds it to s. func (s *state) stmtList(l Nodes) { for _, n := range l.Slice() { @@ -520,8 +589,11 @@ func (s *state) stmtList(l Nodes) { // stmt converts the statement n to SSA and adds it to s. func (s *state) stmt(n *Node) { - s.pushLine(n.Pos) - defer s.popLine() + if !(n.Op == OVARKILL || n.Op == OVARLIVE) { + // OVARKILL and OVARLIVE are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging. + s.pushLine(n.Pos) + defer s.popLine() + } // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere), // then this code is dead. Stop here. @@ -634,6 +706,7 @@ func (s *state) stmt(n *Node) { } b := s.endBlock() + b.Pos = s.lastPos // Do this even if b is an empty block. b.AddEdgeTo(lab.target) case OAS: @@ -661,24 +734,26 @@ func (s *state) stmt(n *Node) { } rhs = nil case OAPPEND: - // If we're writing the result of an append back to the same slice, - // handle it specially to avoid write barriers on the fast (non-growth) path. + // Check whether we're writing the result of an append back to the same slice. + // If so, we handle it specially to avoid write barriers on the fast + // (non-growth) path. + if !samesafeexpr(n.Left, rhs.List.First()) || Debug['N'] != 0 { + break + } // If the slice can be SSA'd, it'll be on the stack, // so there will be no write barriers, // so there's no need to attempt to prevent them. - if samesafeexpr(n.Left, rhs.List.First()) { - if !s.canSSA(n.Left) { - if Debug_append > 0 { - Warnl(n.Pos, "append: len-only update") - } - s.append(rhs, true) - return - } else { - if Debug_append > 0 { // replicating old diagnostic message - Warnl(n.Pos, "append: len-only update (in local slice)") - } + if s.canSSA(n.Left) { + if Debug_append > 0 { // replicating old diagnostic message + Warnl(n.Pos, "append: len-only update (in local slice)") } + break } + if Debug_append > 0 { + Warnl(n.Pos, "append: len-only update") + } + s.append(rhs, true) + return } } @@ -778,12 +853,14 @@ func (s *state) stmt(n *Node) { case ORETURN: s.stmtList(n.List) - s.exit() + b := s.exit() + b.Pos = s.lastPos + case ORETJMP: s.stmtList(n.List) b := s.exit() b.Kind = ssa.BlockRetJmp // override BlockRet - b.Aux = n.Left.Sym.Linksym() + b.Aux = n.Sym.Linksym() case OCONTINUE, OBREAK: var to *ssa.Block @@ -808,6 +885,7 @@ func (s *state) stmt(n *Node) { } b := s.endBlock() + b.Pos = s.lastPos // Do this even if b is an empty block. b.AddEdgeTo(to) case OFOR, OFORUNTIL: @@ -873,6 +951,11 @@ func (s *state) stmt(n *Node) { } if b := s.endBlock(); b != nil { b.AddEdgeTo(bCond) + // It can happen that bIncr ends in a block containing only VARKILL, + // and that muddles the debugging experience. + if n.Op != OFORUNTIL && b.Pos == src.NoXPos { + b.Pos = bCond.Pos + } } if n.Op == OFORUNTIL { @@ -934,6 +1017,11 @@ func (s *state) stmt(n *Node) { if !n.Left.Addrtaken() { s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left) } + switch n.Left.Class() { + case PAUTO, PPARAM, PPARAMOUT: + default: + s.Fatalf("VARLIVE variable %v must be Auto or Arg", n.Left) + } s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem()) case OCHECKNIL: @@ -1408,14 +1496,13 @@ func (s *state) expr(n *Node) *ssa.Value { len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str) return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len) case OCFUNC: - aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Left.Sym.Linksym()}) + aux := n.Left.Sym.Linksym() return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb) case ONAME: if n.Class() == PFUNC { // "value" of a function is the address of the function's closure sym := funcsym(n.Sym).Linksym() - aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: sym}) - return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sb) + return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), sym, s.sb) } if s.canSSA(n) { return s.variable(n, n.Type) @@ -1525,6 +1612,12 @@ func (s *state) expr(n *Node) *ssa.Value { return v } + // map <--> *hmap + if to.Etype == TMAP && from.IsPtr() && + to.MapType().Hmap == from.Elem() { + return v + } + dowidth(from) dowidth(to) if from.Width != to.Width { @@ -1623,18 +1716,18 @@ func (s *state) expr(n *Node) *ssa.Value { if ft.IsFloat() || tt.IsFloat() { conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}] - if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS { + if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat { if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { conv = conv1 } } - if thearch.LinkArch.Family == sys.ARM64 { + if thearch.LinkArch.Family == sys.ARM64 || s.softFloat { if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 { conv = conv1 } } - if thearch.LinkArch.Family == sys.MIPS { + if thearch.LinkArch.Family == sys.MIPS && !s.softFloat { if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() { // tt is float32 or float64, and ft is also unsigned if tt.Size() == 4 { @@ -1665,12 +1758,12 @@ func (s *state) expr(n *Node) *ssa.Value { if op2 == ssa.OpCopy { return x } - return s.newValue1(op2, n.Type, x) + return s.newValueOrSfCall1(op2, n.Type, x) } if op2 == ssa.OpCopy { - return s.newValue1(op1, n.Type, x) + return s.newValueOrSfCall1(op1, n.Type, x) } - return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x)) + return s.newValueOrSfCall1(op2, n.Type, s.newValueOrSfCall1(op1, types.Types[it], x)) } // Tricky 64-bit unsigned cases. if ft.IsInteger() { @@ -1715,8 +1808,8 @@ func (s *state) expr(n *Node) *ssa.Value { ftp := floatForComplex(ft) ttp := floatForComplex(tt) return s.newValue2(ssa.OpComplexMake, tt, - s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), - s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) + s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)), + s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x))) } s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype) @@ -1733,8 +1826,8 @@ func (s *state) expr(n *Node) *ssa.Value { if n.Left.Type.IsComplex() { pt := floatForComplex(n.Left.Type) op := s.ssaOp(OEQ, pt) - r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) - i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) + r := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)) + i := s.newValueOrSfCall2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)) c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i) switch n.Op { case OEQ: @@ -1745,6 +1838,9 @@ func (s *state) expr(n *Node) *ssa.Value { s.Fatalf("ordered complex compare %v", n.Op) } } + if n.Left.Type.IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) + } return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b) case OMUL: a := s.expr(n.Left) @@ -1762,22 +1858,27 @@ func (s *state) expr(n *Node) *ssa.Value { bimag := s.newValue1(ssa.OpComplexImag, pt, b) if pt != wt { // Widen for calculation - areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) - breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) - aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) - bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) + areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) + breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) + aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) + bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) } - xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) - ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal)) + xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) + ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal)) if pt != wt { // Narrow to store back - xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) - ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) + xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) + ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) } return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) } + + if n.Type.IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + } + return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) case ODIV: @@ -1800,31 +1901,31 @@ func (s *state) expr(n *Node) *ssa.Value { bimag := s.newValue1(ssa.OpComplexImag, pt, b) if pt != wt { // Widen for calculation - areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal) - breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal) - aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag) - bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag) + areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal) + breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal) + aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag) + bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag) } - denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag)) - xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag)) - ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag)) + denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag)) + xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag)) + ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag)) // TODO not sure if this is best done in wide precision or narrow // Double-rounding might be an issue. // Note that the pre-SSA implementation does the entire calculation // in wide format, so wide is compatible. - xreal = s.newValue2(divop, wt, xreal, denom) - ximag = s.newValue2(divop, wt, ximag, denom) + xreal = s.newValueOrSfCall2(divop, wt, xreal, denom) + ximag = s.newValueOrSfCall2(divop, wt, ximag, denom) if pt != wt { // Narrow to store back - xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal) - ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag) + xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal) + ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag) } return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag) } if n.Type.IsFloat() { - return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) + return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) } return s.intDivide(n, a, b) case OMOD: @@ -1838,8 +1939,11 @@ func (s *state) expr(n *Node) *ssa.Value { pt := floatForComplex(n.Type) op := s.ssaOp(n.Op, pt) return s.newValue2(ssa.OpComplexMake, n.Type, - s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), - s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) + s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)), + s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))) + } + if n.Type.IsFloat() { + return s.newValueOrSfCall2(s.ssaOp(n.Op, n.Type), a.Type, a, b) } return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b) case OAND, OOR, OXOR: @@ -2191,7 +2295,7 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl) if inplace { - if sn.Op == ONAME { + if sn.Op == ONAME && sn.Class() != PEXTERN { // Tell liveness we're about to build a new slice s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem()) } @@ -2270,7 +2374,8 @@ func (s *state) append(n *Node, inplace bool) *ssa.Value { // This function is intended to handle && and || better than just calling // s.expr(cond) and branching on the result. func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { - if cond.Op == OANDAND { + switch cond.Op { + case OANDAND: mid := s.f.NewBlock(ssa.BlockPlain) s.stmtList(cond.Ninit) s.condBranch(cond.Left, mid, no, max8(likely, 0)) @@ -2283,8 +2388,7 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { // the likeliness of the first branch. // TODO: have the frontend give us branch prediction hints for // OANDAND and OOROR nodes (if it ever has such info). - } - if cond.Op == OOROR { + case OOROR: mid := s.f.NewBlock(ssa.BlockPlain) s.stmtList(cond.Ninit) s.condBranch(cond.Left, yes, mid, min8(likely, 0)) @@ -2294,8 +2398,7 @@ func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) { // Note: if likely==-1, then both recursive calls pass -1. // If likely==1, then we don't have enough info to decide // the likelihood of the first branch. - } - if cond.Op == ONOT { + case ONOT: s.stmtList(cond.Ninit) s.condBranch(cond.Left, no, yes, -likely) return @@ -2398,7 +2501,7 @@ func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) } // Left is not ssa-able. Compute its address. addr := s.addr(left, false) - if left.Op == ONAME && skip == 0 { + if left.Op == ONAME && left.Class() != PEXTERN && skip == 0 { s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem()) } if isReflectHeaderDataField(left) { @@ -2499,6 +2602,79 @@ const ( callGo ) +type sfRtCallDef struct { + rtfn *obj.LSym + rtype types.EType +} + +var softFloatOps map[ssa.Op]sfRtCallDef + +func softfloatInit() { + // Some of these operations get transformed by sfcall. + softFloatOps = map[ssa.Op]sfRtCallDef{ + ssa.OpAdd32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, + ssa.OpAdd64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, + ssa.OpSub32F: sfRtCallDef{sysfunc("fadd32"), TFLOAT32}, + ssa.OpSub64F: sfRtCallDef{sysfunc("fadd64"), TFLOAT64}, + ssa.OpMul32F: sfRtCallDef{sysfunc("fmul32"), TFLOAT32}, + ssa.OpMul64F: sfRtCallDef{sysfunc("fmul64"), TFLOAT64}, + ssa.OpDiv32F: sfRtCallDef{sysfunc("fdiv32"), TFLOAT32}, + ssa.OpDiv64F: sfRtCallDef{sysfunc("fdiv64"), TFLOAT64}, + + ssa.OpEq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, + ssa.OpEq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, + ssa.OpNeq64F: sfRtCallDef{sysfunc("feq64"), TBOOL}, + ssa.OpNeq32F: sfRtCallDef{sysfunc("feq32"), TBOOL}, + ssa.OpLess64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, + ssa.OpLess32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, + ssa.OpGreater64F: sfRtCallDef{sysfunc("fgt64"), TBOOL}, + ssa.OpGreater32F: sfRtCallDef{sysfunc("fgt32"), TBOOL}, + ssa.OpLeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, + ssa.OpLeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, + ssa.OpGeq64F: sfRtCallDef{sysfunc("fge64"), TBOOL}, + ssa.OpGeq32F: sfRtCallDef{sysfunc("fge32"), TBOOL}, + + ssa.OpCvt32to32F: sfRtCallDef{sysfunc("fint32to32"), TFLOAT32}, + ssa.OpCvt32Fto32: sfRtCallDef{sysfunc("f32toint32"), TINT32}, + ssa.OpCvt64to32F: sfRtCallDef{sysfunc("fint64to32"), TFLOAT32}, + ssa.OpCvt32Fto64: sfRtCallDef{sysfunc("f32toint64"), TINT64}, + ssa.OpCvt64Uto32F: sfRtCallDef{sysfunc("fuint64to32"), TFLOAT32}, + ssa.OpCvt32Fto64U: sfRtCallDef{sysfunc("f32touint64"), TUINT64}, + ssa.OpCvt32to64F: sfRtCallDef{sysfunc("fint32to64"), TFLOAT64}, + ssa.OpCvt64Fto32: sfRtCallDef{sysfunc("f64toint32"), TINT32}, + ssa.OpCvt64to64F: sfRtCallDef{sysfunc("fint64to64"), TFLOAT64}, + ssa.OpCvt64Fto64: sfRtCallDef{sysfunc("f64toint64"), TINT64}, + ssa.OpCvt64Uto64F: sfRtCallDef{sysfunc("fuint64to64"), TFLOAT64}, + ssa.OpCvt64Fto64U: sfRtCallDef{sysfunc("f64touint64"), TUINT64}, + ssa.OpCvt32Fto64F: sfRtCallDef{sysfunc("f32to64"), TFLOAT64}, + ssa.OpCvt64Fto32F: sfRtCallDef{sysfunc("f64to32"), TFLOAT32}, + } +} + +// TODO: do not emit sfcall if operation can be optimized to constant in later +// opt phase +func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) { + if callDef, ok := softFloatOps[op]; ok { + switch op { + case ssa.OpLess32F, + ssa.OpLess64F, + ssa.OpLeq32F, + ssa.OpLeq64F: + args[0], args[1] = args[1], args[0] + case ssa.OpSub32F, + ssa.OpSub64F: + args[1] = s.newValue1(s.ssaOp(OMINUS, types.Types[callDef.rtype]), args[1].Type, args[1]) + } + + result := s.rtcall(callDef.rtfn, true, []*types.Type{types.Types[callDef.rtype]}, args...)[0] + if op == ssa.OpNeq32F || op == ssa.OpNeq64F { + result = s.newValue1(ssa.OpNot, result.Type, result) + } + return result, true + } + return nil, false +} + var intrinsics map[intrinsicKey]intrinsicBuilder // An intrinsicBuilder converts a call node n into an ssa value that @@ -2577,18 +2753,34 @@ func init() { return nil }, all...) + add("runtime", "getclosureptr", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr) + }, + all...) + + addF("runtime", "getcallerpc", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr) + }, sys.AMD64, sys.I386) + + add("runtime", "getcallersp", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr) + }, + all...) /******** runtime/internal/sys ********/ addF("runtime/internal/sys", "Ctz32", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0]) }, - sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) + sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("runtime/internal/sys", "Ctz64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0]) }, - sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS) + sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64) addF("runtime/internal/sys", "Bswap32", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0]) @@ -2607,41 +2799,40 @@ func init() { s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) - + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Load64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem()) s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Loadp", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem()) s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Store", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem()) return nil }, - sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Store64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem()) return nil }, - sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "StorepNoWB", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem()) return nil }, - sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64) addF("runtime/internal/atomic", "Xchg", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { @@ -2649,14 +2840,14 @@ func init() { s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Xchg64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Xadd", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { @@ -2664,14 +2855,14 @@ func init() { s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Xadd64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem()) s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Cas", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { @@ -2679,14 +2870,14 @@ func init() { s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "Cas64", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem()) s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v) return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64) + sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64) addF("runtime/internal/atomic", "And8", func(s *state, n *Node, args []*ssa.Value) *ssa.Value { @@ -2724,6 +2915,85 @@ func init() { return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0]) }, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X) + addF("math", "Trunc", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpTrunc, types.Types[TFLOAT64], args[0]) + }, + sys.PPC64, sys.S390X) + addF("math", "Ceil", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpCeil, types.Types[TFLOAT64], args[0]) + }, + sys.PPC64, sys.S390X) + addF("math", "Floor", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpFloor, types.Types[TFLOAT64], args[0]) + }, + sys.PPC64, sys.S390X) + addF("math", "Round", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpRound, types.Types[TFLOAT64], args[0]) + }, + sys.S390X) + addF("math", "RoundToEven", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpRoundToEven, types.Types[TFLOAT64], args[0]) + }, + sys.S390X) + addF("math", "Abs", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue1(ssa.OpAbs, types.Types[TFLOAT64], args[0]) + }, + sys.PPC64) + addF("math", "Copysign", + func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return s.newValue2(ssa.OpCopysign, types.Types[TFLOAT64], args[0], args[1]) + }, + sys.PPC64) + + makeRoundAMD64 := func(op ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { + aux := syslook("support_sse41").Sym.Linksym() + addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) + v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) + b := s.endBlock() + b.Kind = ssa.BlockIf + b.SetControl(v) + bTrue := s.f.NewBlock(ssa.BlockPlain) + bFalse := s.f.NewBlock(ssa.BlockPlain) + bEnd := s.f.NewBlock(ssa.BlockPlain) + b.AddEdgeTo(bTrue) + b.AddEdgeTo(bFalse) + b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays + + // We have the intrinsic - use it directly. + s.startBlock(bTrue) + s.vars[n] = s.newValue1(op, types.Types[TFLOAT64], args[0]) + s.endBlock().AddEdgeTo(bEnd) + + // Call the pure Go version. + s.startBlock(bFalse) + a := s.call(n, callNormal) + s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TFLOAT64], a, s.mem()) + s.endBlock().AddEdgeTo(bEnd) + + // Merge results. + s.startBlock(bEnd) + return s.variable(n, types.Types[TFLOAT64]) + } + } + addF("math", "RoundToEven", + makeRoundAMD64(ssa.OpRoundToEven), + sys.AMD64) + addF("math", "Floor", + makeRoundAMD64(ssa.OpFloor), + sys.AMD64) + addF("math", "Ceil", + makeRoundAMD64(ssa.OpCeil), + sys.AMD64) + addF("math", "Trunc", + makeRoundAMD64(ssa.OpTrunc), + sys.AMD64) /******** math/bits ********/ addF("math/bits", "TrailingZeros64", @@ -2847,7 +3117,7 @@ func init() { sys.ARM64) makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value { return func(s *state, n *Node, args []*ssa.Value) *ssa.Value { - aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: syslook("support_popcnt").Sym.Linksym()}) + aux := syslook("support_popcnt").Sym.Linksym() addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb) v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem()) b := s.endBlock() @@ -2975,6 +3245,12 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder { // We can't intrinsify them. return nil } + // Skip intrinsifying math functions (which may contain hard-float + // instructions) when soft-float + if thearch.SoftFloat && pkg == "math" { + return nil + } + fn := sym.Name return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}] } @@ -3026,7 +3302,7 @@ func (s *state) intrinsicArgs(n *Node) []*ssa.Value { temps := map[*Node]*ssa.Value{} for _, a := range n.List.Slice() { if a.Op != OAS { - s.Fatalf("non-assignment as a function argument %s", opnames[a.Op]) + s.Fatalf("non-assignment as a function argument %v", a.Op) } l, r := a.Left, a.Right switch l.Op { @@ -3046,7 +3322,7 @@ func (s *state) intrinsicArgs(n *Node) []*ssa.Value { } args = append(args, callArg{l.Xoffset, v}) default: - s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op]) + s.Fatalf("function argument assignment target not allowed: %v", l.Op) } } sort.Sort(byOffset(args)) @@ -3099,10 +3375,8 @@ func (s *state) call(n *Node, k callKind) *ssa.Value { } i := s.expr(fn.Left) itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i) - if k != callNormal { - s.nilCheck(itab) - } - itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab + s.nilCheck(itab) + itabidx := fn.Xoffset + 2*int64(Widthptr) + 8 // offset of fun field in runtime.itab itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) if k == callNormal { codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem()) @@ -3199,24 +3473,6 @@ func etypesign(e types.EType) int8 { return 0 } -// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node. -// This improves the effectiveness of cse by using the same Aux values for the -// same symbols. -func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} { - switch sym.(type) { - default: - s.Fatalf("sym %v is of unknown type %T", sym, sym) - case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol: - // these are the only valid types - } - - if lsym, ok := s.varsyms[n]; ok { - return lsym - } - s.varsyms[n] = sym - return sym -} - // addr converts the address of the expression n to SSA, adds it to s and returns the SSA result. // The value that the returned Value represents is guaranteed to be non-nil. // If bounded is true then this address does not require a nil check for its operand @@ -3228,8 +3484,7 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value { switch n.Class() { case PEXTERN: // global variable - aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Sym.Linksym()}) - v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb) + v := s.entryNewValue1A(ssa.OpAddr, t, n.Sym.Linksym(), s.sb) // TODO: Make OpAddr use AuxInt as well as Aux. if n.Xoffset != 0 { v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v) @@ -3243,21 +3498,18 @@ func (s *state) addr(n *Node, bounded bool) *ssa.Value { } if n == nodfp { // Special arg that points to the frame pointer (Used by ORECOVER). - aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) - return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp) + return s.entryNewValue1A(ssa.OpAddr, t, n, s.sp) } s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs) return nil case PAUTO: - aux := s.lookupSymbol(n, &ssa.AutoSymbol{Node: n}) - return s.newValue1A(ssa.OpAddr, t, aux, s.sp) + return s.newValue1A(ssa.OpAddr, t, n, s.sp) case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early. // ensure that we reuse symbols for out parameters so // that cse works on their addresses - aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n}) - return s.newValue1A(ssa.OpAddr, t, aux, s.sp) + return s.newValue1A(ssa.OpAddr, t, n, s.sp) default: - s.Fatalf("variable address class %v not implemented", classnames[n.Class()]) + s.Fatalf("variable address class %v not implemented", n.Class()) return nil } case OINDREGSP: @@ -3342,7 +3594,7 @@ func (s *state) canSSA(n *Node) bool { return false case PPARAMOUT: if s.hasdefer { - // TODO: handle this case? Named return values must be + // TODO: handle this case? Named return values must be // in memory so that the deferred function can see them. // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false } // Or maybe not, see issue 18860. Even unnamed return values @@ -3610,8 +3862,9 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) { ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right) s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) case t.IsSlice(): - ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right) - s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem()) + elType := types.NewPtr(t.Elem()) + ptr := s.newValue1(ssa.OpSlicePtr, elType, right) + s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, elType, left, ptr, s.mem()) case t.IsInterface(): // itab field is treated as a scalar. idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right) @@ -3743,7 +3996,7 @@ type u642fcvtTab struct { one func(*state, *types.Type, int64) *ssa.Value } -var u64_f64 u642fcvtTab = u642fcvtTab{ +var u64_f64 = u642fcvtTab{ geq: ssa.OpGeq64, cvt2F: ssa.OpCvt64to64F, and: ssa.OpAnd64, @@ -3753,7 +4006,7 @@ var u64_f64 u642fcvtTab = u642fcvtTab{ one: (*state).constInt64, } -var u64_f32 u642fcvtTab = u642fcvtTab{ +var u64_f32 = u642fcvtTab{ geq: ssa.OpGeq64, cvt2F: ssa.OpCvt64to32F, and: ssa.OpAnd64, @@ -3834,12 +4087,12 @@ type u322fcvtTab struct { cvtI2F, cvtF2F ssa.Op } -var u32_f64 u322fcvtTab = u322fcvtTab{ +var u32_f64 = u322fcvtTab{ cvtI2F: ssa.OpCvt32to64F, cvtF2F: ssa.OpCopy, } -var u32_f32 u322fcvtTab = u322fcvtTab{ +var u32_f32 = u322fcvtTab{ cvtI2F: ssa.OpCvt32to32F, cvtF2F: ssa.OpCvt64Fto32F, } @@ -3924,14 +4177,15 @@ func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value { b.AddEdgeTo(bElse) s.startBlock(bElse) - if n.Op == OLEN { + switch n.Op { + case OLEN: // length is stored in the first word for map/chan s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem()) - } else if n.Op == OCAP { + case OCAP: // capacity is stored in the second word for chan sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x) s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem()) - } else { + default: s.Fatalf("op must be OLEN or OCAP") } s.endBlock() @@ -3948,7 +4202,7 @@ type f2uCvtTab struct { cutoff uint64 } -var f32_u64 f2uCvtTab = f2uCvtTab{ +var f32_u64 = f2uCvtTab{ ltf: ssa.OpLess32F, cvt2U: ssa.OpCvt32Fto64, subf: ssa.OpSub32F, @@ -3958,7 +4212,7 @@ var f32_u64 f2uCvtTab = f2uCvtTab{ cutoff: 9223372036854775808, } -var f64_u64 f2uCvtTab = f2uCvtTab{ +var f64_u64 = f2uCvtTab{ ltf: ssa.OpLess64F, cvt2U: ssa.OpCvt64Fto64, subf: ssa.OpSub64F, @@ -3968,7 +4222,7 @@ var f64_u64 f2uCvtTab = f2uCvtTab{ cutoff: 9223372036854775808, } -var f32_u32 f2uCvtTab = f2uCvtTab{ +var f32_u32 = f2uCvtTab{ ltf: ssa.OpLess32F, cvt2U: ssa.OpCvt32Fto32, subf: ssa.OpSub32F, @@ -3978,7 +4232,7 @@ var f32_u32 f2uCvtTab = f2uCvtTab{ cutoff: 2147483648, } -var f64_u32 f2uCvtTab = f2uCvtTab{ +var f64_u32 = f2uCvtTab{ ltf: ssa.OpLess64F, cvt2U: ssa.OpCvt64Fto32, subf: ssa.OpSub64F, @@ -4377,23 +4631,20 @@ func genssa(f *ssa.Func, pp *Progs) { e := f.Frontend().(*ssafn) - // Generate GC bitmaps, except if the stack is too large, - // in which compilation will fail later anyway (issue 20529). - if e.stksize < maxStackSize { - s.stackMapIndex = liveness(e, f) - } + s.stackMapIndex = liveness(e, f) // Remember where each block starts. s.bstart = make([]*obj.Prog, f.NumBlocks()) s.pp = pp - var valueProgs map[*obj.Prog]*ssa.Value - var blockProgs map[*obj.Prog]*ssa.Block + var progToValue map[*obj.Prog]*ssa.Value + var progToBlock map[*obj.Prog]*ssa.Block + var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point. var logProgs = e.log if logProgs { - valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues()) - blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) + progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues()) + progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks()) f.Logf("genssa %s\n", f.Name) - blockProgs[s.pp.next] = f.Blocks[0] + progToBlock[s.pp.next] = f.Blocks[0] } if thearch.Use387 { @@ -4402,6 +4653,12 @@ func genssa(f *ssa.Func, pp *Progs) { s.ScratchFpMem = e.scratchFpMem + logLocationLists := Debug_locationlist != 0 + if Ctxt.Flag_locationlists { + e.curfn.Func.DebugInfo = ssa.BuildFuncDebug(f, logLocationLists) + valueToProgAfter = make([]*obj.Prog, f.NumValues()) + } + // Emit basic blocks for i, b := range f.Blocks { s.bstart[b.ID] = s.pp.next @@ -4442,15 +4699,19 @@ func genssa(f *ssa.Func, pp *Progs) { } case ssa.OpPhi: CheckLoweredPhi(v) - + case ssa.OpRegKill: + // nothing to do default: // let the backend handle it thearch.SSAGenValue(&s, v) } + if Ctxt.Flag_locationlists { + valueToProgAfter[v.ID] = s.pp.next + } if logProgs { for ; x != s.pp.next; x = x.Link { - valueProgs[x] = v + progToValue[x] = v } } } @@ -4468,7 +4729,47 @@ func genssa(f *ssa.Func, pp *Progs) { thearch.SSAGenBlock(&s, b, next) if logProgs { for ; x != s.pp.next; x = x.Link { - blockProgs[x] = b + progToBlock[x] = b + } + } + } + + if Ctxt.Flag_locationlists { + for i := range f.Blocks { + blockDebug := e.curfn.Func.DebugInfo.Blocks[i] + for _, locList := range blockDebug.Variables { + for _, loc := range locList.Locations { + if loc.Start == ssa.BlockStart { + loc.StartProg = s.bstart[f.Blocks[i].ID] + } else { + loc.StartProg = valueToProgAfter[loc.Start.ID] + } + if loc.End == nil { + Fatalf("empty loc %v compiling %v", loc, f.Name) + } + + if loc.End == ssa.BlockEnd { + // If this variable was live at the end of the block, it should be + // live over the control flow instructions. Extend it up to the + // beginning of the next block. + // If this is the last block, then there's no Prog to use for it, and + // EndProg is unset. + if i < len(f.Blocks)-1 { + loc.EndProg = s.bstart[f.Blocks[i+1].ID] + } + } else { + // Advance the "end" forward by one; the end-of-range doesn't take effect + // until the instruction actually executes. + loc.EndProg = valueToProgAfter[loc.End.ID].Link + if loc.EndProg == nil { + Fatalf("nil loc.EndProg compiling %v, loc=%v", f.Name, loc) + } + } + if !logLocationLists { + loc.Start = nil + loc.End = nil + } + } } } } @@ -4479,16 +4780,22 @@ func genssa(f *ssa.Func, pp *Progs) { } if logProgs { + filename := "" for p := pp.Text; p != nil; p = p.Link { + if p.Pos.IsKnown() && p.InnermostFilename() != filename { + filename = p.InnermostFilename() + f.Logf("# %s\n", filename) + } + var s string - if v, ok := valueProgs[p]; ok { + if v, ok := progToValue[p]; ok { s = v.String() - } else if b, ok := blockProgs[p]; ok { + } else if b, ok := progToBlock[p]; ok { s = b.String() } else { s = " " // most value and branch strings are 2-3 characters long } - f.Logf("%s\t%s\n", s, p) + f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString()) } if f.HTMLWriter != nil { // LineHist is defunct now - this code won't do @@ -4499,22 +4806,31 @@ func genssa(f *ssa.Func, pp *Progs) { var buf bytes.Buffer buf.WriteString("") buf.WriteString("
    ") + filename := "" for p := pp.Text; p != nil; p = p.Link { + // Don't spam every line with the file name, which is often huge. + // Only print changes, and "unknown" is not a change. + if p.Pos.IsKnown() && p.InnermostFilename() != filename { + filename = p.InnermostFilename() + buf.WriteString("
    ") + buf.WriteString(html.EscapeString("# " + filename)) + buf.WriteString("
    ") + } + buf.WriteString("
    ") - if v, ok := valueProgs[p]; ok { + if v, ok := progToValue[p]; ok { buf.WriteString(v.HTML()) - } else if b, ok := blockProgs[p]; ok { - buf.WriteString(b.HTML()) + } else if b, ok := progToBlock[p]; ok { + buf.WriteString("" + b.HTML() + "") } buf.WriteString("
    ") buf.WriteString("
    ") - buf.WriteString(html.EscapeString(p.String())) + buf.WriteString(fmt.Sprintf("%.5d (%s) %s", p.Pc, p.InnermostLineNumber(), html.EscapeString(p.InstructionString()))) buf.WriteString("
    ") - buf.WriteString("") } buf.WriteString("
    ") buf.WriteString("
    ") - f.HTMLWriter.WriteColumn("genssa", buf.String()) + f.HTMLWriter.WriteColumn("genssa", "ssa-prog", buf.String()) // pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved } } @@ -4589,6 +4905,7 @@ type FloatingEQNEJump struct { func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) { p := s.Prog(jumps.Jump) p.To.Type = obj.TYPE_BRANCH + p.Pos = b.Pos to := jumps.Index s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()}) } @@ -4605,6 +4922,7 @@ func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) s.oneFPJump(b, &jumps[1][0]) s.oneFPJump(b, &jumps[1][1]) q := s.Prog(obj.AJMP) + q.Pos = b.Pos q.To.Type = obj.TYPE_BRANCH s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()}) } @@ -4614,10 +4932,11 @@ func AuxOffset(v *ssa.Value) (offset int64) { if v.Aux == nil { return 0 } - switch sym := v.Aux.(type) { - - case *ssa.AutoSymbol: - n := sym.Node.(*Node) + n, ok := v.Aux.(*Node) + if !ok { + v.Fatalf("bad aux type in %s\n", v.LongString()) + } + if n.Class() == PAUTO { return n.Xoffset } return 0 @@ -4639,17 +4958,17 @@ func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) { return } // Add symbol's offset from its base register. - switch sym := v.Aux.(type) { - case *ssa.ExternSymbol: + switch n := v.Aux.(type) { + case *obj.LSym: a.Name = obj.NAME_EXTERN - a.Sym = sym.Sym - case *ssa.ArgSymbol: - n := sym.Node.(*Node) - a.Name = obj.NAME_PARAM - a.Sym = n.Orig.Sym.Linksym() - a.Offset += n.Xoffset - case *ssa.AutoSymbol: - n := sym.Node.(*Node) + a.Sym = n + case *Node: + if n.Class() == PPARAM || n.Class() == PPARAMOUT { + a.Name = obj.NAME_PARAM + a.Sym = n.Orig.Sym.Linksym() + a.Offset += n.Xoffset + break + } a.Name = obj.NAME_AUTO a.Sym = n.Sym.Linksym() a.Offset += n.Xoffset @@ -4725,7 +5044,7 @@ func CheckLoweredPhi(v *ssa.Value) { loc := f.RegAlloc[v.ID] for _, a := range v.Args { if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead? - v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func) + v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func) } } } @@ -4800,6 +5119,12 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = sym + + // Record call graph information for nowritebarrierrec + // analysis. + if nowritebarrierrecCheck != nil { + nowritebarrierrecCheck.recordCall(s.pp.curfn, sym, v.Pos) + } } else { // TODO(mdempsky): Can these differences be eliminated? switch thearch.LinkArch.Family { @@ -4863,10 +5188,9 @@ func (e *ssafn) StringData(s string) interface{} { if e.strings == nil { e.strings = make(map[string]interface{}) } - data := stringsym(s) - aux := &ssa.ExternSymbol{Sym: data} - e.strings[s] = aux - return aux + data := stringsym(e.curfn.Pos, s) + e.strings[s] = data + return data } func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode { @@ -4880,9 +5204,9 @@ func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { lenType := types.Types[TINT] if n.Class() == PAUTO && !n.Addrtaken() { // Split this string up into two separate variables. - p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos) - l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos) - return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0} + p := e.splitSlot(&name, ".ptr", 0, ptrType) + l := e.splitSlot(&name, ".len", ptrType.Size(), lenType) + return p, l } // Return the two parts of the larger variable. return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)} @@ -4897,9 +5221,9 @@ func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot if n.Type.IsEmptyInterface() { f = ".type" } - c := e.namedAuto(n.Sym.Name+f, t, n.Pos) - d := e.namedAuto(n.Sym.Name+".data", t, n.Pos) - return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} + c := e.splitSlot(&name, f, 0, t) + d := e.splitSlot(&name, ".data", t.Size(), t) + return c, d } // Return the two parts of the larger variable. return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)} @@ -4911,10 +5235,10 @@ func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ss lenType := types.Types[TINT] if n.Class() == PAUTO && !n.Addrtaken() { // Split this slice up into three separate variables. - p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos) - l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos) - c := e.namedAuto(n.Sym.Name+".cap", lenType, n.Pos) - return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0} + p := e.splitSlot(&name, ".ptr", 0, ptrType) + l := e.splitSlot(&name, ".len", ptrType.Size(), lenType) + c := e.splitSlot(&name, ".cap", ptrType.Size()+lenType.Size(), lenType) + return p, l, c } // Return the three parts of the larger variable. return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, @@ -4933,9 +5257,9 @@ func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) } if n.Class() == PAUTO && !n.Addrtaken() { // Split this complex up into two separate variables. - c := e.namedAuto(n.Sym.Name+".real", t, n.Pos) - d := e.namedAuto(n.Sym.Name+".imag", t, n.Pos) - return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0} + r := e.splitSlot(&name, ".real", 0, t) + i := e.splitSlot(&name, ".imag", t.Size(), t) + return r, i } // Return the two parts of the larger variable. return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s} @@ -4951,9 +5275,10 @@ func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) { } if n.Class() == PAUTO && !n.Addrtaken() { // Split this int64 up into two separate variables. - h := e.namedAuto(n.Sym.Name+".hi", t, n.Pos) - l := e.namedAuto(n.Sym.Name+".lo", types.Types[TUINT32], n.Pos) - return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: types.Types[TUINT32], Off: 0} + if thearch.LinkArch.ByteOrder == binary.BigEndian { + return e.splitSlot(&name, ".hi", 0, t), e.splitSlot(&name, ".lo", t.Size(), types.Types[TUINT32]) + } + return e.splitSlot(&name, ".hi", t.Size(), t), e.splitSlot(&name, ".lo", 0, types.Types[TUINT32]) } // Return the two parts of the larger variable. if thearch.LinkArch.ByteOrder == binary.BigEndian { @@ -4966,12 +5291,15 @@ func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot { n := name.N.(*Node) st := name.Type ft := st.FieldType(i) + var offset int64 + for f := 0; f < i; f++ { + offset += st.FieldType(f).Size() + } if n.Class() == PAUTO && !n.Addrtaken() { // Note: the _ field may appear several times. But // have no fear, identically-named but distinct Autos are // ok, albeit maybe confusing for a debugger. - x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft, n.Pos) - return ssa.LocalSlot{N: x, Type: ft, Off: 0} + return e.splitSlot(&name, "."+st.FieldName(i), offset, ft) } return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)} } @@ -4984,8 +5312,7 @@ func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot { } et := at.ElemType() if n.Class() == PAUTO && !n.Addrtaken() { - x := e.namedAuto(n.Sym.Name+"[0]", et, n.Pos) - return ssa.LocalSlot{N: x, Type: et, Off: 0} + return e.splitSlot(&name, "[0]", 0, et) } return ssa.LocalSlot{N: n, Type: et, Off: name.Off} } @@ -4994,16 +5321,15 @@ func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym { return itabsym(it, offset) } -// namedAuto returns a new AUTO variable with the given name and type. -// These are exposed to the debugger. -func (e *ssafn) namedAuto(name string, typ *types.Type, pos src.XPos) ssa.GCNode { - t := typ - s := &types.Sym{Name: name, Pkg: localpkg} +// splitSlot returns a slot representing the data of parent starting at offset. +func (e *ssafn) splitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot { + s := &types.Sym{Name: parent.N.(*Node).Sym.Name + suffix, Pkg: localpkg} - n := new(Node) - n.Name = new(Name) - n.Op = ONAME - n.Pos = pos + n := &Node{ + Name: new(Name), + Op: ONAME, + Pos: parent.N.(*Node).Pos, + } n.Orig = n s.Def = asTypesNode(n) @@ -5016,7 +5342,7 @@ func (e *ssafn) namedAuto(name string, typ *types.Type, pos src.XPos) ssa.GCNode n.Name.Curfn = e.curfn e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n) dowidth(t) - return n + return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset} } func (e *ssafn) CanSSA(t *types.Type) bool { @@ -5054,8 +5380,8 @@ func (e *ssafn) Debug_checknil() bool { return Debug_checknil != 0 } -func (e *ssafn) Debug_wb() bool { - return Debug_wb != 0 +func (e *ssafn) Debug_eagerwb() bool { + return Debug_eagerwb != 0 } func (e *ssafn) UseWriteBarrier() bool { @@ -5070,6 +5396,8 @@ func (e *ssafn) Syslook(name string) *obj.LSym { return writeBarrier case "writebarrierptr": return writebarrierptr + case "gcWriteBarrier": + return gcWriteBarrier case "typedmemmove": return typedmemmove case "typedmemclr": @@ -5079,6 +5407,23 @@ func (e *ssafn) Syslook(name string) *obj.LSym { return nil } +func (e *ssafn) SetWBPos(pos src.XPos) { + e.curfn.Func.setWBPos(pos) +} + func (n *Node) Typ() *types.Type { return n.Type } +func (n *Node) StorageClass() ssa.StorageClass { + switch n.Class() { + case PPARAM: + return ssa.ClassParam + case PPARAMOUT: + return ssa.ClassParamOut + case PAUTO: + return ssa.ClassAuto + default: + Fatalf("untranslateable storage class for %v: %s", n, n.Class()) + return 0 + } +} diff --git a/src/cmd/compile/internal/gc/ssa_test.go b/src/cmd/compile/internal/gc/ssa_test.go index bb315b97e83..13fb98b2765 100644 --- a/src/cmd/compile/internal/gc/ssa_test.go +++ b/src/cmd/compile/internal/gc/ssa_test.go @@ -7,29 +7,42 @@ package gc import ( "bytes" "internal/testenv" + "io/ioutil" + "os" "os/exec" "path/filepath" + "runtime" "strings" "testing" ) // TODO: move all these tests elsewhere? // Perhaps teach test/run.go how to run them with a new action verb. -func runTest(t *testing.T, filename string) { +func runTest(t *testing.T, filename string, flags ...string) { t.Parallel() - doTest(t, filename, "run") + doTest(t, filename, "run", flags...) } -func buildTest(t *testing.T, filename string) { +func buildTest(t *testing.T, filename string, flags ...string) { t.Parallel() - doTest(t, filename, "build") + doTest(t, filename, "build", flags...) } -func doTest(t *testing.T, filename string, kind string) { +func doTest(t *testing.T, filename string, kind string, flags ...string) { testenv.MustHaveGoBuild(t) + gotool := testenv.GoToolPath(t) + var stdout, stderr bytes.Buffer - cmd := exec.Command(testenv.GoToolPath(t), kind, filepath.Join("testdata", filename)) + args := []string{kind} + if len(flags) == 0 { + args = append(args, "-gcflags=-d=ssa/check/on") + } else { + args = append(args, flags...) + } + args = append(args, filepath.Join("testdata", filename)) + cmd := exec.Command(gotool, args...) cmd.Stdout = &stdout cmd.Stderr = &stderr - if err := cmd.Run(); err != nil { + err := cmd.Run() + if err != nil { t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr) } if s := stdout.String(); s != "" { @@ -40,6 +53,58 @@ func doTest(t *testing.T, filename string, kind string) { } } +// runGenTest runs a test-generator, then runs the generated test. +// Generated test can either fail in compilation or execution. +// The environment variable parameter(s) is passed to the run +// of the generated test. +func runGenTest(t *testing.T, filename, tmpname string, ev ...string) { + testenv.MustHaveGoRun(t) + gotool := testenv.GoToolPath(t) + var stdout, stderr bytes.Buffer + cmd := exec.Command(gotool, "run", filepath.Join("testdata", filename)) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr) + } + // Write stdout into a temporary file + tmpdir, ok := ioutil.TempDir("", tmpname) + if ok != nil { + t.Fatalf("Failed to create temporary directory") + } + defer os.RemoveAll(tmpdir) + + rungo := filepath.Join(tmpdir, "run.go") + ok = ioutil.WriteFile(rungo, stdout.Bytes(), 0600) + if ok != nil { + t.Fatalf("Failed to create temporary file " + rungo) + } + + stdout.Reset() + stderr.Reset() + cmd = exec.Command("go", "run", "-gcflags=-d=ssa/check/on", rungo) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmd.Env = append(cmd.Env, ev...) + err := cmd.Run() + if err != nil { + t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr) + } + if s := stderr.String(); s != "" { + t.Errorf("Stderr = %s\nWant empty", s) + } + if s := stdout.String(); s != "" { + t.Errorf("Stdout = %s\nWant empty", s) + } +} + +func TestGenFlowGraph(t *testing.T) { + runGenTest(t, "flowgraph_generator1.go", "ssa_fg_tmp1") + if runtime.GOOS != "windows" { + runGenTest(t, "flowgraph_generator1.go", "ssa_fg_tmp2", "GO_SSA_PHI_LOC_CUTOFF=0") + } +} + // TestShortCircuit tests OANDAND and OOROR expressions and short circuiting. func TestShortCircuit(t *testing.T) { runTest(t, "short.go") } @@ -55,6 +120,10 @@ func TestArithmetic(t *testing.T) { runTest(t, "arith.go") } // TestFP tests that both backends have the same result for floating point expressions. func TestFP(t *testing.T) { runTest(t, "fp.go") } +func TestFPSoftFloat(t *testing.T) { + runTest(t, "fp.go", "-gcflags=-d=softfloat,ssa/check/on") +} + // TestArithmeticBoundary tests boundary results for arithmetic operations. func TestArithmeticBoundary(t *testing.T) { runTest(t, "arithBoundary.go") } diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index 047acee05f7..a45c15a44ec 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -93,7 +93,7 @@ func hcrash() { } func linestr(pos src.XPos) string { - return Ctxt.OutermostPos(pos).Format(Debug['C'] == 0) + return Ctxt.OutermostPos(pos).Format(Debug['C'] == 0, Debug['L'] == 1) } // lasterror keeps track of the most recently issued error. @@ -257,9 +257,6 @@ func restrictlookup(name string, pkg *types.Pkg) *types.Sym { // find all the exported symbols in package opkg // and make them available in the current package func importdot(opkg *types.Pkg, pack *Node) { - var s1 *types.Sym - var pkgerror string - n := 0 for _, s := range opkg.Syms { if s.Def == nil { @@ -268,9 +265,9 @@ func importdot(opkg *types.Pkg, pack *Node) { if !exportname(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot continue } - s1 = lookup(s.Name) + s1 := lookup(s.Name) if s1.Def != nil { - pkgerror = fmt.Sprintf("during import %q", opkg.Path) + pkgerror := fmt.Sprintf("during import %q", opkg.Path) redeclare(s1, pkgerror) continue } @@ -450,6 +447,10 @@ func nodbool(b bool) *Node { return c } +func nodstr(s string) *Node { + return nodlit(Val{s}) +} + // treecopy recursively copies n, with the exception of // ONAME, OLITERAL, OTYPE, and non-iota ONONAME leaves. // Copies of iota ONONAME nodes are assigned the current @@ -926,6 +927,14 @@ func convertop(src *types.Type, dst *types.Type, why *string) Op { return OCONVNOP } + // src is map and dst is a pointer to corresponding hmap. + // This rule is needed for the implementation detail that + // go gc maps are implemented as a pointer to a hmap struct. + if src.Etype == TMAP && dst.IsPtr() && + src.MapType().Hmap == dst.Elem() { + return OCONVNOP + } + return 0 } @@ -1129,53 +1138,57 @@ func updateHasCall(n *Node) { if n == nil { return } + n.SetHasCall(calcHasCall(n)) +} - b := false +func calcHasCall(n *Node) bool { if n.Ninit.Len() != 0 { // TODO(mdempsky): This seems overly conservative. - b = true - goto out + return true } switch n.Op { case OLITERAL, ONAME, OTYPE: - if b || n.HasCall() { + if n.HasCall() { Fatalf("OLITERAL/ONAME/OTYPE should never have calls: %+v", n) } - return - case OAS: - if needwritebarrier(n.Left) { - b = true - goto out - } + return false case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER: - b = true - goto out + return true case OANDAND, OOROR: // hard with instrumented code if instrumenting { - b = true - goto out + return true } case OINDEX, OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR, OSLICESTR, OIND, ODOTPTR, ODOTTYPE, ODIV, OMOD: // These ops might panic, make sure they are done // before we start marshaling args for a call. See issue 16760. - b = true - goto out + return true + + // When using soft-float, these ops might be rewritten to function calls + // so we ensure they are evaluated first. + case OADD, OSUB, OMINUS: + if thearch.SoftFloat && (isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) { + return true + } + case OLT, OEQ, ONE, OLE, OGE, OGT: + if thearch.SoftFloat && (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype]) { + return true + } + case OCONV: + if thearch.SoftFloat && ((isFloat[n.Type.Etype] || isComplex[n.Type.Etype]) || (isFloat[n.Left.Type.Etype] || isComplex[n.Left.Type.Etype])) { + return true + } } if n.Left != nil && n.Left.HasCall() { - b = true - goto out + return true } if n.Right != nil && n.Right.HasCall() { - b = true - goto out + return true } - -out: - n.SetHasCall(b) + return false } func badtype(op Op, tl *types.Type, tr *types.Type) { @@ -1387,6 +1400,7 @@ func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase return } t.SetRecur(true) + defer t.SetRecur(false) var u *types.Type d-- @@ -1396,7 +1410,7 @@ func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase // below for embedded fields. c = lookdot0(s, t, save, ignorecase) if c != 0 { - goto out + return c, false } } @@ -1405,7 +1419,7 @@ func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase u = u.Elem() } if !u.IsStruct() && !u.IsInterface() { - goto out + return c, false } for _, f := range u.Fields().Slice() { @@ -1414,8 +1428,7 @@ func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase } if d < 0 { // Found an embedded field at target depth. - more = true - goto out + return c, true } a, more1 := adddot1(s, f.Type, d, save, ignorecase) if a != 0 && c == 0 { @@ -1427,8 +1440,6 @@ func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase } } -out: - t.SetRecur(false) return c, more } @@ -1557,21 +1568,18 @@ func expand1(t *types.Type, top, followptr bool) { u = u.Elem() } - if !u.IsStruct() && !u.IsInterface() { - goto out + if u.IsStruct() || u.IsInterface() { + for _, f := range u.Fields().Slice() { + if f.Embedded == 0 { + continue + } + if f.Sym == nil { + continue + } + expand1(f.Type, false, followptr) + } } - for _, f := range u.Fields().Slice() { - if f.Embedded == 0 { - continue - } - if f.Sym == nil { - continue - } - expand1(f.Type, false, followptr) - } - -out: t.SetRecur(false) } @@ -1669,11 +1677,17 @@ func structargs(tl *types.Type, mustname bool) []*Node { // rcvr - U // method - M func (t T)(), a TFIELD type struct // newnam - the eventual mangled name of this function -func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym, iface int) { +func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym, iface bool) { if false && Debug['r'] != 0 { fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam) } + // Only generate (*T).M wrappers for T.M in T's own package. + if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && + rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != localpkg { + return + } + lineno = autogeneratedPos dclcontext = PEXTERN @@ -1686,7 +1700,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym, iface t := nod(OTFUNC, nil, nil) l := []*Node{this} - if iface != 0 && rcvr.Width < int64(Widthptr) { + if iface && rcvr.Width < int64(Widthptr) { // Building method for interface table and receiver // is smaller than the single pointer-sized word // that the interface call will pass in. @@ -1744,9 +1758,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym, iface as := nod(OAS, this.Left, nod(OCONVNOP, dot, nil)) as.Right.Type = rcvr fn.Nbody.Append(as) - n := nod(ORETJMP, nil, nil) - n.Left = newname(methodsym(method.Sym, methodrcvr, false)) - fn.Nbody.Append(n) + fn.Nbody.Append(nodSym(ORETJMP, nil, methodsym(method.Sym, methodrcvr, false))) // When tail-calling, we can't use a frame pointer. fn.Func.SetNoFramePointer(true) } else { @@ -1754,7 +1766,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym, iface call := nod(OCALL, dot, nil) call.List.Set(args) call.SetIsddd(isddd) - if method.Type.Results().NumFields() > 0 { + if method.Type.NumResults() > 0 { n := nod(ORETURN, nil, nil) n.List.Set1(call) call = n @@ -1767,7 +1779,7 @@ func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym, iface dumplist("genwrapper body", fn.Nbody) } - funcbody(fn) + funcbody() Curfn = fn types.Popdcl() if debug_dclstack != 0 { @@ -1803,35 +1815,32 @@ func hashmem(t *types.Type) *Node { return n } -func ifacelookdot(s *types.Sym, t *types.Type, followptr *bool, ignorecase bool) *types.Field { - *followptr = false - +func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) { if t == nil { - return nil + return nil, false } - var m *types.Field path, ambig := dotpath(s, t, &m, ignorecase) if path == nil { if ambig { yyerror("%v.%v is ambiguous", t, s) } - return nil + return nil, false } for _, d := range path { if d.field.Type.IsPtr() { - *followptr = true + followptr = true break } } if m.Type.Etype != TFUNC || m.Type.Recv() == nil { yyerror("%v.%v is a field, not a method", t, s) - return nil + return nil, followptr } - return m + return m, followptr } func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool { @@ -1845,11 +1854,12 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool // and then do one loop. if t.IsInterface() { + Outer: for _, im := range iface.Fields().Slice() { for _, tm := range t.Fields().Slice() { if tm.Sym == im.Sym { if eqtype(tm.Type, im.Type) { - goto found + continue Outer } *m = im *samename = tm @@ -1862,7 +1872,6 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool *samename = nil *ptr = 0 return false - found: } return true @@ -1876,11 +1885,10 @@ func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool if im.Broke() { continue } - var followptr bool - tm := ifacelookdot(im.Sym, t, &followptr, false) + tm, followptr := ifacelookdot(im.Sym, t, false) if tm == nil || tm.Nointerface() || !eqtype(tm.Type, im.Type) { if tm == nil { - tm = ifacelookdot(im.Sym, t, &followptr, true) + tm, followptr = ifacelookdot(im.Sym, t, true) } *m = im *samename = tm @@ -2029,6 +2037,10 @@ func checknil(x *Node, init *Nodes) { // Can this type be stored directly in an interface word? // Yes, if the representation is a single pointer. func isdirectiface(t *types.Type) bool { + if t.Broke() { + return false + } + switch t.Etype { case TPTR32, TPTR64, diff --git a/src/cmd/compile/internal/gc/swt.go b/src/cmd/compile/internal/gc/swt.go index 1b76650a7f9..8d425506d34 100644 --- a/src/cmd/compile/internal/gc/swt.go +++ b/src/cmd/compile/internal/gc/swt.go @@ -257,7 +257,7 @@ func (s *exprSwitch) walk(sw *Node) { var cas []*Node if s.kind == switchKindTrue || s.kind == switchKindFalse { s.exprname = nodbool(s.kind == switchKindTrue) - } else if consttype(cond) >= 0 { + } else if consttype(cond) > 0 { // leave constants to enable dead code elimination (issue 9608) s.exprname = cond } else { @@ -273,21 +273,15 @@ func (s *exprSwitch) walk(sw *Node) { // handle the cases in order for len(cc) > 0 { - // deal with expressions one at a time - if !okforcmp[t.Etype] || !cc[0].isconst { - a := s.walkCases(cc[:1]) - cas = append(cas, a) - cc = cc[1:] - continue + run := 1 + if okforcmp[t.Etype] && cc[0].isconst { + // do binary search on runs of constants + for ; run < len(cc) && cc[run].isconst; run++ { + } + // sort and compile constants + sort.Sort(caseClauseByConstVal(cc[:run])) } - // do binary search on runs of constants - var run int - for run = 1; run < len(cc) && cc[run].isconst; run++ { - } - - // sort and compile constants - sort.Sort(caseClauseByConstVal(cc[:run])) a := s.walkCases(cc[:run]) cas = append(cas, a) cc = cc[run:] @@ -380,7 +374,7 @@ func casebody(sw *Node, typeswvar *Node) { var def *Node // defaults br := nod(OBREAK, nil, nil) - for i, n := range sw.List.Slice() { + for _, n := range sw.List.Slice() { setlineno(n) if n.Op != OXCASE { Fatalf("casebody %v", n.Op) @@ -393,7 +387,7 @@ func casebody(sw *Node, typeswvar *Node) { case 0: // default if def != nil { - yyerror("more than one default case") + yyerrorl(n.Pos, "more than one default case") } // reuse original default case n.Right = jmp @@ -474,21 +468,7 @@ func casebody(sw *Node, typeswvar *Node) { fallIndex-- } last := stat[fallIndex] - - // botch - shouldn't fall through declaration - if last.Xoffset == n.Xoffset && last.Op == OXFALL { - if typeswvar != nil { - setlineno(last) - yyerror("cannot fallthrough in type switch") - } - - if i+1 >= sw.List.Len() { - setlineno(last) - yyerror("cannot fallthrough final case in switch") - } - - last.Op = OFALL - } else { + if last.Op != OFALL { stat = append(stat, br) } } @@ -588,7 +568,7 @@ Outer: if !ok { // First entry for this hash. nn = append(nn, c.node) - seen[c.hash] = nn[len(nn)-1 : len(nn):len(nn)] + seen[c.hash] = nn[len(nn)-1 : len(nn) : len(nn)] continue } for _, n := range prev { @@ -621,7 +601,7 @@ func checkDupExprCases(exprname *Node, clauses []*Node) { // case GOARCH == "arm" && GOARM == "5": // case GOARCH == "arm": // which would both evaluate to false for non-ARM compiles. - if ct := consttype(n); ct < 0 || ct == CTBOOL { + if ct := consttype(n); ct == 0 || ct == CTBOOL { continue } @@ -646,7 +626,7 @@ func checkDupExprCases(exprname *Node, clauses []*Node) { seen := make(map[typeVal]*Node) for _, ncase := range clauses { for _, n := range ncase.List.Slice() { - if ct := consttype(n); ct < 0 || ct == CTBOOL { + if ct := consttype(n); ct == 0 || ct == CTBOOL { continue } tv := typeVal{ @@ -687,14 +667,13 @@ func (s *typeSwitch) walk(sw *Node) { return } if cond.Right == nil { - setlineno(sw) - yyerror("type switch must have an assignment") + yyerrorl(sw.Pos, "type switch must have an assignment") return } cond.Right = walkexpr(cond.Right, &sw.Ninit) if !cond.Right.Type.IsInterface() { - yyerror("type switch must be on an interface") + yyerrorl(sw.Pos, "type switch must be on an interface") return } @@ -757,7 +736,7 @@ func (s *typeSwitch) walk(sw *Node) { if cond.Right.Type.IsEmptyInterface() { h.Xoffset = int64(2 * Widthptr) // offset of hash in runtime._type } else { - h.Xoffset = int64(3 * Widthptr) // offset of hash in runtime.itab + h.Xoffset = int64(2 * Widthptr) // offset of hash in runtime.itab } h.SetBounded(true) // guaranteed not to fault a = nod(OAS, s.hashname, h) diff --git a/src/cmd/compile/internal/gc/syntax.go b/src/cmd/compile/internal/gc/syntax.go index 0fd146bca26..5044ea0fe27 100644 --- a/src/cmd/compile/internal/gc/syntax.go +++ b/src/cmd/compile/internal/gc/syntax.go @@ -7,6 +7,7 @@ package gc import ( + "cmd/compile/internal/ssa" "cmd/compile/internal/syntax" "cmd/compile/internal/types" "cmd/internal/obj" @@ -44,7 +45,6 @@ type Node struct { // - ONAME nodes that refer to local variables use it to identify their stack frame position. // - ODOT, ODOTPTR, and OINDREGSP use it to indicate offset relative to their base address. // - OSTRUCTKEY uses it to store the named field's offset. - // - OXCASE and OXFALL use it to validate the use of fallthrough. // - Named OLITERALs use it to to store their ambient iota value. // Possibly still more uses. If you find any, document them. Xoffset int64 @@ -85,19 +85,20 @@ const ( _, nodeAssigned // is the variable ever assigned to _, nodeAddrtaken // address taken, even if not moved to heap _, nodeImplicit - _, nodeIsddd // is the argument variadic - _, nodeLocal // type created in this file (see also Type.Local) - _, nodeDiag // already printed error about this - _, nodeColas // OAS resulting from := - _, nodeNonNil // guaranteed to be non-nil - _, nodeNoescape // func arguments do not escape; TODO(rsc): move Noescape to Func struct (see CL 7360) - _, nodeBounded // bounds check unnecessary - _, nodeAddable // addressable - _, nodeHasCall // expression contains a function call - _, nodeLikely // if statement condition likely - _, nodeHasVal // node.E contains a Val - _, nodeHasOpt // node.E contains an Opt - _, nodeEmbedded // ODCLFIELD embedded type + _, nodeIsddd // is the argument variadic + _, nodeDiag // already printed error about this + _, nodeColas // OAS resulting from := + _, nodeNonNil // guaranteed to be non-nil + _, nodeNoescape // func arguments do not escape; TODO(rsc): move Noescape to Func struct (see CL 7360) + _, nodeBounded // bounds check unnecessary + _, nodeAddable // addressable + _, nodeHasCall // expression contains a function call + _, nodeLikely // if statement condition likely + _, nodeHasVal // node.E contains a Val + _, nodeHasOpt // node.E contains an Opt + _, nodeEmbedded // ODCLFIELD embedded type + _, nodeInlFormal // OPAUTO created by inliner, derived from callee formal + _, nodeInlLocal // OPAUTO created by inliner, derived from callee local ) func (n *Node) Class() Class { return Class(n.flags.get3(nodeClass)) } @@ -113,7 +114,6 @@ func (n *Node) Assigned() bool { return n.flags&nodeAssigned != 0 } func (n *Node) Addrtaken() bool { return n.flags&nodeAddrtaken != 0 } func (n *Node) Implicit() bool { return n.flags&nodeImplicit != 0 } func (n *Node) Isddd() bool { return n.flags&nodeIsddd != 0 } -func (n *Node) Local() bool { return n.flags&nodeLocal != 0 } func (n *Node) Diag() bool { return n.flags&nodeDiag != 0 } func (n *Node) Colas() bool { return n.flags&nodeColas != 0 } func (n *Node) NonNil() bool { return n.flags&nodeNonNil != 0 } @@ -125,6 +125,8 @@ func (n *Node) Likely() bool { return n.flags&nodeLikely != 0 } func (n *Node) HasVal() bool { return n.flags&nodeHasVal != 0 } func (n *Node) HasOpt() bool { return n.flags&nodeHasOpt != 0 } func (n *Node) Embedded() bool { return n.flags&nodeEmbedded != 0 } +func (n *Node) InlFormal() bool { return n.flags&nodeInlFormal != 0 } +func (n *Node) InlLocal() bool { return n.flags&nodeInlLocal != 0 } func (n *Node) SetClass(b Class) { n.flags.set3(nodeClass, uint8(b)) } func (n *Node) SetWalkdef(b uint8) { n.flags.set2(nodeWalkdef, b) } @@ -139,7 +141,6 @@ func (n *Node) SetAssigned(b bool) { n.flags.set(nodeAssigned, b) } func (n *Node) SetAddrtaken(b bool) { n.flags.set(nodeAddrtaken, b) } func (n *Node) SetImplicit(b bool) { n.flags.set(nodeImplicit, b) } func (n *Node) SetIsddd(b bool) { n.flags.set(nodeIsddd, b) } -func (n *Node) SetLocal(b bool) { n.flags.set(nodeLocal, b) } func (n *Node) SetDiag(b bool) { n.flags.set(nodeDiag, b) } func (n *Node) SetColas(b bool) { n.flags.set(nodeColas, b) } func (n *Node) SetNonNil(b bool) { n.flags.set(nodeNonNil, b) } @@ -151,6 +152,8 @@ func (n *Node) SetLikely(b bool) { n.flags.set(nodeLikely, b) } func (n *Node) SetHasVal(b bool) { n.flags.set(nodeHasVal, b) } func (n *Node) SetHasOpt(b bool) { n.flags.set(nodeHasOpt, b) } func (n *Node) SetEmbedded(b bool) { n.flags.set(nodeEmbedded, b) } +func (n *Node) SetInlFormal(b bool) { n.flags.set(nodeInlFormal, b) } +func (n *Node) SetInlLocal(b bool) { n.flags.set(nodeInlLocal, b) } // Val returns the Val for the node. func (n *Node) Val() Val { @@ -212,6 +215,11 @@ func (n *Node) mayBeShared() bool { return false } +// isMethodExpression reports whether n represents a method expression T.M. +func (n *Node) isMethodExpression() bool { + return n.Op == ONAME && n.Left != nil && n.Left.Op == OTYPE && n.Right != nil && n.Right.Op == ONAME +} + // funcname returns the name of the function n. func (n *Node) funcname() string { if n == nil || n.Func == nil || n.Func.Nname == nil { @@ -349,6 +357,48 @@ type Param struct { Alias bool // node is alias for Ntype (only used when type-checking ODCLTYPE) } +// Functions +// +// A simple function declaration is represented as an ODCLFUNC node f +// and an ONAME node n. They're linked to one another through +// f.Func.Nname == n and n.Name.Defn == f. When functions are +// referenced by name in an expression, the function's ONAME node is +// used directly. +// +// Function names have n.Class() == PFUNC. This distinguishes them +// from variables of function type. +// +// Confusingly, n.Func and f.Func both exist, but commonly point to +// different Funcs. (Exception: an OCALLPART's Func does point to its +// ODCLFUNC's Func.) +// +// A method declaration is represented like functions, except n.Sym +// will be the qualified method name (e.g., "T.m") and +// f.Func.Shortname is the bare method name (e.g., "m"). +// +// Method expressions are represented as ONAME/PFUNC nodes like +// function names, but their Left and Right fields still point to the +// type and method, respectively. They can be distinguished from +// normal functions with isMethodExpression. Also, unlike function +// name nodes, method expression nodes exist for each method +// expression. The declaration ONAME can be accessed with +// x.Type.Nname(), where x is the method expression ONAME node. +// +// Method values are represented by ODOTMETH/ODOTINTER when called +// immediately, and OCALLPART otherwise. They are like method +// expressions, except that for ODOTMETH/ODOTINTER the method name is +// stored in Sym instead of Right. +// +// Closures are represented by OCLOSURE node c. They link back and +// forth with the ODCLFUNC via Func.Closure; that is, c.Func.Closure +// == f and f.Func.Closure == c. +// +// Function bodies are stored in f.Nbody, and inline function bodies +// are stored in n.Func.Inl. Pragmas are stored in f.Func.Pragma. +// +// Imported functions skip the ODCLFUNC, so n.Name.Defn is nil. They +// also use Dcl instead of Inldcl. + // Func holds Node fields used only with function-like nodes. type Func struct { Shortname *types.Sym @@ -369,6 +419,7 @@ type Func struct { Closgen int Outerfunc *Node // outer function (for closure) FieldTrack map[*types.Sym]struct{} + DebugInfo *ssa.FuncDebug Ntype *Node // signature Top int // top context (Ecall, Eproc, etc) Closure *Node // OCLOSURE <-> ODCLFUNC @@ -382,11 +433,16 @@ type Func struct { Label int32 // largest auto-generated label in this function Endlineno src.XPos - WBPos src.XPos // position of first write barrier + WBPos src.XPos // position of first write barrier; see SetWBPos Pragma syntax.Pragma // go:xxx function annotations - flags bitset8 + flags bitset16 + + // nwbrCalls records the LSyms of functions called by this + // function for go:nowritebarrierrec analysis. Only filled in + // if nowritebarrierrecCheck != nil. + nwbrCalls *[]nowritebarrierrecCallSym } // A Mark represents a scope boundary. @@ -408,34 +464,51 @@ const ( funcNeedctxt // function uses context register (has closure variables) funcReflectMethod // function calls reflect.Type.Method or MethodByName funcIsHiddenClosure - funcNoFramePointer // Must not use a frame pointer for this function - funcHasDefer // contains a defer statement - funcNilCheckDisabled // disable nil checks when compiling this function + funcNoFramePointer // Must not use a frame pointer for this function + funcHasDefer // contains a defer statement + funcNilCheckDisabled // disable nil checks when compiling this function + funcInlinabilityChecked // inliner has already determined whether the function is inlinable + funcExportInline // include inline body in export data ) -func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 } -func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 } -func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 } -func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 } -func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 } -func (f *Func) NoFramePointer() bool { return f.flags&funcNoFramePointer != 0 } -func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 } -func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 } +func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 } +func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 } +func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 } +func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 } +func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 } +func (f *Func) NoFramePointer() bool { return f.flags&funcNoFramePointer != 0 } +func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 } +func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 } +func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 } +func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 } -func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) } -func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) } -func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) } -func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) } -func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) } -func (f *Func) SetNoFramePointer(b bool) { f.flags.set(funcNoFramePointer, b) } -func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) } -func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) } +func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) } +func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) } +func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) } +func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) } +func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) } +func (f *Func) SetNoFramePointer(b bool) { f.flags.set(funcNoFramePointer, b) } +func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) } +func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) } +func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) } +func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) } + +func (f *Func) setWBPos(pos src.XPos) { + if Debug_wb != 0 { + Warnl(pos, "write barrier") + } + if !f.WBPos.IsKnown() { + f.WBPos = pos + } +} + +//go:generate stringer -type=Op -trimprefix=O type Op uint8 // Node ops. const ( - OXXX = Op(iota) + OXXX Op = iota // names ONAME // var, const or func name @@ -562,8 +635,7 @@ const ( OCONTINUE // continue ODEFER // defer Left (Left must be call) OEMPTY // no-op (empty statement) - OFALL // fallthrough (after processing) - OXFALL // fallthrough (before processing) + OFALL // fallthrough OFOR // for Ninit; Left; Right { Nbody } OFORUNTIL // for Ninit; Left; Right { Nbody } ; test applied after executing body, not before OGOTO // goto Left @@ -574,7 +646,7 @@ const ( ORETURN // return List OSELECT // select { List } (List is list of OXCASE or OCASE) OSWITCH // switch Ninit; Left { List } (List is a list of OXCASE or OCASE) - OTYPESW // List = Left.(type) (appears as .Left of OSWITCH) + OTYPESW // Left = Right.(type) (appears as .Left of OSWITCH) // types OTCHAN // chan int @@ -744,3 +816,69 @@ func (n *Nodes) AppendNodes(n2 *Nodes) { } n2.slice = nil } + +// inspect invokes f on each node in an AST in depth-first order. +// If f(n) returns false, inspect skips visiting n's children. +func inspect(n *Node, f func(*Node) bool) { + if n == nil || !f(n) { + return + } + inspectList(n.Ninit, f) + inspect(n.Left, f) + inspect(n.Right, f) + inspectList(n.List, f) + inspectList(n.Nbody, f) + inspectList(n.Rlist, f) +} + +func inspectList(l Nodes, f func(*Node) bool) { + for _, n := range l.Slice() { + inspect(n, f) + } +} + +// nodeQueue is a FIFO queue of *Node. The zero value of nodeQueue is +// a ready-to-use empty queue. +type nodeQueue struct { + ring []*Node + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *nodeQueue) empty() bool { + return q.head == q.tail +} + +// pushRight appends n to the right of the queue. +func (q *nodeQueue) pushRight(n *Node) { + if len(q.ring) == 0 { + q.ring = make([]*Node, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]*Node, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = n + q.tail++ +} + +// popLeft pops a node from the left of the queue. It panics if q is +// empty. +func (q *nodeQueue) popLeft() *Node { + if q.empty() { + panic("dequeue empty") + } + n := q.ring[q.head%len(q.ring)] + q.head++ + return n +} diff --git a/src/cmd/compile/internal/gc/testdata/arithConst.go b/src/cmd/compile/internal/gc/testdata/arithConst.go index ef42359c4b6..ef565bff481 100644 --- a/src/cmd/compile/internal/gc/testdata/arithConst.go +++ b/src/cmd/compile/internal/gc/testdata/arithConst.go @@ -345,6 +345,156 @@ func mod_18446744073709551615_uint64_ssa(a uint64) uint64 { return 18446744073709551615 % a } +//go:noinline +func and_uint64_0_ssa(a uint64) uint64 { + return a & 0 +} + +//go:noinline +func and_0_uint64_ssa(a uint64) uint64 { + return 0 & a +} + +//go:noinline +func and_uint64_1_ssa(a uint64) uint64 { + return a & 1 +} + +//go:noinline +func and_1_uint64_ssa(a uint64) uint64 { + return 1 & a +} + +//go:noinline +func and_uint64_4294967296_ssa(a uint64) uint64 { + return a & 4294967296 +} + +//go:noinline +func and_4294967296_uint64_ssa(a uint64) uint64 { + return 4294967296 & a +} + +//go:noinline +func and_uint64_9223372036854775808_ssa(a uint64) uint64 { + return a & 9223372036854775808 +} + +//go:noinline +func and_9223372036854775808_uint64_ssa(a uint64) uint64 { + return 9223372036854775808 & a +} + +//go:noinline +func and_uint64_18446744073709551615_ssa(a uint64) uint64 { + return a & 18446744073709551615 +} + +//go:noinline +func and_18446744073709551615_uint64_ssa(a uint64) uint64 { + return 18446744073709551615 & a +} + +//go:noinline +func or_uint64_0_ssa(a uint64) uint64 { + return a | 0 +} + +//go:noinline +func or_0_uint64_ssa(a uint64) uint64 { + return 0 | a +} + +//go:noinline +func or_uint64_1_ssa(a uint64) uint64 { + return a | 1 +} + +//go:noinline +func or_1_uint64_ssa(a uint64) uint64 { + return 1 | a +} + +//go:noinline +func or_uint64_4294967296_ssa(a uint64) uint64 { + return a | 4294967296 +} + +//go:noinline +func or_4294967296_uint64_ssa(a uint64) uint64 { + return 4294967296 | a +} + +//go:noinline +func or_uint64_9223372036854775808_ssa(a uint64) uint64 { + return a | 9223372036854775808 +} + +//go:noinline +func or_9223372036854775808_uint64_ssa(a uint64) uint64 { + return 9223372036854775808 | a +} + +//go:noinline +func or_uint64_18446744073709551615_ssa(a uint64) uint64 { + return a | 18446744073709551615 +} + +//go:noinline +func or_18446744073709551615_uint64_ssa(a uint64) uint64 { + return 18446744073709551615 | a +} + +//go:noinline +func xor_uint64_0_ssa(a uint64) uint64 { + return a ^ 0 +} + +//go:noinline +func xor_0_uint64_ssa(a uint64) uint64 { + return 0 ^ a +} + +//go:noinline +func xor_uint64_1_ssa(a uint64) uint64 { + return a ^ 1 +} + +//go:noinline +func xor_1_uint64_ssa(a uint64) uint64 { + return 1 ^ a +} + +//go:noinline +func xor_uint64_4294967296_ssa(a uint64) uint64 { + return a ^ 4294967296 +} + +//go:noinline +func xor_4294967296_uint64_ssa(a uint64) uint64 { + return 4294967296 ^ a +} + +//go:noinline +func xor_uint64_9223372036854775808_ssa(a uint64) uint64 { + return a ^ 9223372036854775808 +} + +//go:noinline +func xor_9223372036854775808_uint64_ssa(a uint64) uint64 { + return 9223372036854775808 ^ a +} + +//go:noinline +func xor_uint64_18446744073709551615_ssa(a uint64) uint64 { + return a ^ 18446744073709551615 +} + +//go:noinline +func xor_18446744073709551615_uint64_ssa(a uint64) uint64 { + return 18446744073709551615 ^ a +} + //go:noinline func add_int64_Neg9223372036854775808_ssa(a int64) int64 { return a + -9223372036854775808 @@ -785,6 +935,276 @@ func mod_9223372036854775807_int64_ssa(a int64) int64 { return 9223372036854775807 % a } +//go:noinline +func and_int64_Neg9223372036854775808_ssa(a int64) int64 { + return a & -9223372036854775808 +} + +//go:noinline +func and_Neg9223372036854775808_int64_ssa(a int64) int64 { + return -9223372036854775808 & a +} + +//go:noinline +func and_int64_Neg9223372036854775807_ssa(a int64) int64 { + return a & -9223372036854775807 +} + +//go:noinline +func and_Neg9223372036854775807_int64_ssa(a int64) int64 { + return -9223372036854775807 & a +} + +//go:noinline +func and_int64_Neg4294967296_ssa(a int64) int64 { + return a & -4294967296 +} + +//go:noinline +func and_Neg4294967296_int64_ssa(a int64) int64 { + return -4294967296 & a +} + +//go:noinline +func and_int64_Neg1_ssa(a int64) int64 { + return a & -1 +} + +//go:noinline +func and_Neg1_int64_ssa(a int64) int64 { + return -1 & a +} + +//go:noinline +func and_int64_0_ssa(a int64) int64 { + return a & 0 +} + +//go:noinline +func and_0_int64_ssa(a int64) int64 { + return 0 & a +} + +//go:noinline +func and_int64_1_ssa(a int64) int64 { + return a & 1 +} + +//go:noinline +func and_1_int64_ssa(a int64) int64 { + return 1 & a +} + +//go:noinline +func and_int64_4294967296_ssa(a int64) int64 { + return a & 4294967296 +} + +//go:noinline +func and_4294967296_int64_ssa(a int64) int64 { + return 4294967296 & a +} + +//go:noinline +func and_int64_9223372036854775806_ssa(a int64) int64 { + return a & 9223372036854775806 +} + +//go:noinline +func and_9223372036854775806_int64_ssa(a int64) int64 { + return 9223372036854775806 & a +} + +//go:noinline +func and_int64_9223372036854775807_ssa(a int64) int64 { + return a & 9223372036854775807 +} + +//go:noinline +func and_9223372036854775807_int64_ssa(a int64) int64 { + return 9223372036854775807 & a +} + +//go:noinline +func or_int64_Neg9223372036854775808_ssa(a int64) int64 { + return a | -9223372036854775808 +} + +//go:noinline +func or_Neg9223372036854775808_int64_ssa(a int64) int64 { + return -9223372036854775808 | a +} + +//go:noinline +func or_int64_Neg9223372036854775807_ssa(a int64) int64 { + return a | -9223372036854775807 +} + +//go:noinline +func or_Neg9223372036854775807_int64_ssa(a int64) int64 { + return -9223372036854775807 | a +} + +//go:noinline +func or_int64_Neg4294967296_ssa(a int64) int64 { + return a | -4294967296 +} + +//go:noinline +func or_Neg4294967296_int64_ssa(a int64) int64 { + return -4294967296 | a +} + +//go:noinline +func or_int64_Neg1_ssa(a int64) int64 { + return a | -1 +} + +//go:noinline +func or_Neg1_int64_ssa(a int64) int64 { + return -1 | a +} + +//go:noinline +func or_int64_0_ssa(a int64) int64 { + return a | 0 +} + +//go:noinline +func or_0_int64_ssa(a int64) int64 { + return 0 | a +} + +//go:noinline +func or_int64_1_ssa(a int64) int64 { + return a | 1 +} + +//go:noinline +func or_1_int64_ssa(a int64) int64 { + return 1 | a +} + +//go:noinline +func or_int64_4294967296_ssa(a int64) int64 { + return a | 4294967296 +} + +//go:noinline +func or_4294967296_int64_ssa(a int64) int64 { + return 4294967296 | a +} + +//go:noinline +func or_int64_9223372036854775806_ssa(a int64) int64 { + return a | 9223372036854775806 +} + +//go:noinline +func or_9223372036854775806_int64_ssa(a int64) int64 { + return 9223372036854775806 | a +} + +//go:noinline +func or_int64_9223372036854775807_ssa(a int64) int64 { + return a | 9223372036854775807 +} + +//go:noinline +func or_9223372036854775807_int64_ssa(a int64) int64 { + return 9223372036854775807 | a +} + +//go:noinline +func xor_int64_Neg9223372036854775808_ssa(a int64) int64 { + return a ^ -9223372036854775808 +} + +//go:noinline +func xor_Neg9223372036854775808_int64_ssa(a int64) int64 { + return -9223372036854775808 ^ a +} + +//go:noinline +func xor_int64_Neg9223372036854775807_ssa(a int64) int64 { + return a ^ -9223372036854775807 +} + +//go:noinline +func xor_Neg9223372036854775807_int64_ssa(a int64) int64 { + return -9223372036854775807 ^ a +} + +//go:noinline +func xor_int64_Neg4294967296_ssa(a int64) int64 { + return a ^ -4294967296 +} + +//go:noinline +func xor_Neg4294967296_int64_ssa(a int64) int64 { + return -4294967296 ^ a +} + +//go:noinline +func xor_int64_Neg1_ssa(a int64) int64 { + return a ^ -1 +} + +//go:noinline +func xor_Neg1_int64_ssa(a int64) int64 { + return -1 ^ a +} + +//go:noinline +func xor_int64_0_ssa(a int64) int64 { + return a ^ 0 +} + +//go:noinline +func xor_0_int64_ssa(a int64) int64 { + return 0 ^ a +} + +//go:noinline +func xor_int64_1_ssa(a int64) int64 { + return a ^ 1 +} + +//go:noinline +func xor_1_int64_ssa(a int64) int64 { + return 1 ^ a +} + +//go:noinline +func xor_int64_4294967296_ssa(a int64) int64 { + return a ^ 4294967296 +} + +//go:noinline +func xor_4294967296_int64_ssa(a int64) int64 { + return 4294967296 ^ a +} + +//go:noinline +func xor_int64_9223372036854775806_ssa(a int64) int64 { + return a ^ 9223372036854775806 +} + +//go:noinline +func xor_9223372036854775806_int64_ssa(a int64) int64 { + return 9223372036854775806 ^ a +} + +//go:noinline +func xor_int64_9223372036854775807_ssa(a int64) int64 { + return a ^ 9223372036854775807 +} + +//go:noinline +func xor_9223372036854775807_int64_ssa(a int64) int64 { + return 9223372036854775807 ^ a +} + //go:noinline func add_uint32_0_ssa(a uint32) uint32 { return a + 0 @@ -985,6 +1405,96 @@ func mod_4294967295_uint32_ssa(a uint32) uint32 { return 4294967295 % a } +//go:noinline +func and_uint32_0_ssa(a uint32) uint32 { + return a & 0 +} + +//go:noinline +func and_0_uint32_ssa(a uint32) uint32 { + return 0 & a +} + +//go:noinline +func and_uint32_1_ssa(a uint32) uint32 { + return a & 1 +} + +//go:noinline +func and_1_uint32_ssa(a uint32) uint32 { + return 1 & a +} + +//go:noinline +func and_uint32_4294967295_ssa(a uint32) uint32 { + return a & 4294967295 +} + +//go:noinline +func and_4294967295_uint32_ssa(a uint32) uint32 { + return 4294967295 & a +} + +//go:noinline +func or_uint32_0_ssa(a uint32) uint32 { + return a | 0 +} + +//go:noinline +func or_0_uint32_ssa(a uint32) uint32 { + return 0 | a +} + +//go:noinline +func or_uint32_1_ssa(a uint32) uint32 { + return a | 1 +} + +//go:noinline +func or_1_uint32_ssa(a uint32) uint32 { + return 1 | a +} + +//go:noinline +func or_uint32_4294967295_ssa(a uint32) uint32 { + return a | 4294967295 +} + +//go:noinline +func or_4294967295_uint32_ssa(a uint32) uint32 { + return 4294967295 | a +} + +//go:noinline +func xor_uint32_0_ssa(a uint32) uint32 { + return a ^ 0 +} + +//go:noinline +func xor_0_uint32_ssa(a uint32) uint32 { + return 0 ^ a +} + +//go:noinline +func xor_uint32_1_ssa(a uint32) uint32 { + return a ^ 1 +} + +//go:noinline +func xor_1_uint32_ssa(a uint32) uint32 { + return 1 ^ a +} + +//go:noinline +func xor_uint32_4294967295_ssa(a uint32) uint32 { + return a ^ 4294967295 +} + +//go:noinline +func xor_4294967295_uint32_ssa(a uint32) uint32 { + return 4294967295 ^ a +} + //go:noinline func add_int32_Neg2147483648_ssa(a int32) int32 { return a + -2147483648 @@ -1275,6 +1785,186 @@ func mod_2147483647_int32_ssa(a int32) int32 { return 2147483647 % a } +//go:noinline +func and_int32_Neg2147483648_ssa(a int32) int32 { + return a & -2147483648 +} + +//go:noinline +func and_Neg2147483648_int32_ssa(a int32) int32 { + return -2147483648 & a +} + +//go:noinline +func and_int32_Neg2147483647_ssa(a int32) int32 { + return a & -2147483647 +} + +//go:noinline +func and_Neg2147483647_int32_ssa(a int32) int32 { + return -2147483647 & a +} + +//go:noinline +func and_int32_Neg1_ssa(a int32) int32 { + return a & -1 +} + +//go:noinline +func and_Neg1_int32_ssa(a int32) int32 { + return -1 & a +} + +//go:noinline +func and_int32_0_ssa(a int32) int32 { + return a & 0 +} + +//go:noinline +func and_0_int32_ssa(a int32) int32 { + return 0 & a +} + +//go:noinline +func and_int32_1_ssa(a int32) int32 { + return a & 1 +} + +//go:noinline +func and_1_int32_ssa(a int32) int32 { + return 1 & a +} + +//go:noinline +func and_int32_2147483647_ssa(a int32) int32 { + return a & 2147483647 +} + +//go:noinline +func and_2147483647_int32_ssa(a int32) int32 { + return 2147483647 & a +} + +//go:noinline +func or_int32_Neg2147483648_ssa(a int32) int32 { + return a | -2147483648 +} + +//go:noinline +func or_Neg2147483648_int32_ssa(a int32) int32 { + return -2147483648 | a +} + +//go:noinline +func or_int32_Neg2147483647_ssa(a int32) int32 { + return a | -2147483647 +} + +//go:noinline +func or_Neg2147483647_int32_ssa(a int32) int32 { + return -2147483647 | a +} + +//go:noinline +func or_int32_Neg1_ssa(a int32) int32 { + return a | -1 +} + +//go:noinline +func or_Neg1_int32_ssa(a int32) int32 { + return -1 | a +} + +//go:noinline +func or_int32_0_ssa(a int32) int32 { + return a | 0 +} + +//go:noinline +func or_0_int32_ssa(a int32) int32 { + return 0 | a +} + +//go:noinline +func or_int32_1_ssa(a int32) int32 { + return a | 1 +} + +//go:noinline +func or_1_int32_ssa(a int32) int32 { + return 1 | a +} + +//go:noinline +func or_int32_2147483647_ssa(a int32) int32 { + return a | 2147483647 +} + +//go:noinline +func or_2147483647_int32_ssa(a int32) int32 { + return 2147483647 | a +} + +//go:noinline +func xor_int32_Neg2147483648_ssa(a int32) int32 { + return a ^ -2147483648 +} + +//go:noinline +func xor_Neg2147483648_int32_ssa(a int32) int32 { + return -2147483648 ^ a +} + +//go:noinline +func xor_int32_Neg2147483647_ssa(a int32) int32 { + return a ^ -2147483647 +} + +//go:noinline +func xor_Neg2147483647_int32_ssa(a int32) int32 { + return -2147483647 ^ a +} + +//go:noinline +func xor_int32_Neg1_ssa(a int32) int32 { + return a ^ -1 +} + +//go:noinline +func xor_Neg1_int32_ssa(a int32) int32 { + return -1 ^ a +} + +//go:noinline +func xor_int32_0_ssa(a int32) int32 { + return a ^ 0 +} + +//go:noinline +func xor_0_int32_ssa(a int32) int32 { + return 0 ^ a +} + +//go:noinline +func xor_int32_1_ssa(a int32) int32 { + return a ^ 1 +} + +//go:noinline +func xor_1_int32_ssa(a int32) int32 { + return 1 ^ a +} + +//go:noinline +func xor_int32_2147483647_ssa(a int32) int32 { + return a ^ 2147483647 +} + +//go:noinline +func xor_2147483647_int32_ssa(a int32) int32 { + return 2147483647 ^ a +} + //go:noinline func add_uint16_0_ssa(a uint16) uint16 { return a + 0 @@ -1475,6 +2165,96 @@ func mod_65535_uint16_ssa(a uint16) uint16 { return 65535 % a } +//go:noinline +func and_uint16_0_ssa(a uint16) uint16 { + return a & 0 +} + +//go:noinline +func and_0_uint16_ssa(a uint16) uint16 { + return 0 & a +} + +//go:noinline +func and_uint16_1_ssa(a uint16) uint16 { + return a & 1 +} + +//go:noinline +func and_1_uint16_ssa(a uint16) uint16 { + return 1 & a +} + +//go:noinline +func and_uint16_65535_ssa(a uint16) uint16 { + return a & 65535 +} + +//go:noinline +func and_65535_uint16_ssa(a uint16) uint16 { + return 65535 & a +} + +//go:noinline +func or_uint16_0_ssa(a uint16) uint16 { + return a | 0 +} + +//go:noinline +func or_0_uint16_ssa(a uint16) uint16 { + return 0 | a +} + +//go:noinline +func or_uint16_1_ssa(a uint16) uint16 { + return a | 1 +} + +//go:noinline +func or_1_uint16_ssa(a uint16) uint16 { + return 1 | a +} + +//go:noinline +func or_uint16_65535_ssa(a uint16) uint16 { + return a | 65535 +} + +//go:noinline +func or_65535_uint16_ssa(a uint16) uint16 { + return 65535 | a +} + +//go:noinline +func xor_uint16_0_ssa(a uint16) uint16 { + return a ^ 0 +} + +//go:noinline +func xor_0_uint16_ssa(a uint16) uint16 { + return 0 ^ a +} + +//go:noinline +func xor_uint16_1_ssa(a uint16) uint16 { + return a ^ 1 +} + +//go:noinline +func xor_1_uint16_ssa(a uint16) uint16 { + return 1 ^ a +} + +//go:noinline +func xor_uint16_65535_ssa(a uint16) uint16 { + return a ^ 65535 +} + +//go:noinline +func xor_65535_uint16_ssa(a uint16) uint16 { + return 65535 ^ a +} + //go:noinline func add_int16_Neg32768_ssa(a int16) int16 { return a + -32768 @@ -1815,6 +2595,216 @@ func mod_32767_int16_ssa(a int16) int16 { return 32767 % a } +//go:noinline +func and_int16_Neg32768_ssa(a int16) int16 { + return a & -32768 +} + +//go:noinline +func and_Neg32768_int16_ssa(a int16) int16 { + return -32768 & a +} + +//go:noinline +func and_int16_Neg32767_ssa(a int16) int16 { + return a & -32767 +} + +//go:noinline +func and_Neg32767_int16_ssa(a int16) int16 { + return -32767 & a +} + +//go:noinline +func and_int16_Neg1_ssa(a int16) int16 { + return a & -1 +} + +//go:noinline +func and_Neg1_int16_ssa(a int16) int16 { + return -1 & a +} + +//go:noinline +func and_int16_0_ssa(a int16) int16 { + return a & 0 +} + +//go:noinline +func and_0_int16_ssa(a int16) int16 { + return 0 & a +} + +//go:noinline +func and_int16_1_ssa(a int16) int16 { + return a & 1 +} + +//go:noinline +func and_1_int16_ssa(a int16) int16 { + return 1 & a +} + +//go:noinline +func and_int16_32766_ssa(a int16) int16 { + return a & 32766 +} + +//go:noinline +func and_32766_int16_ssa(a int16) int16 { + return 32766 & a +} + +//go:noinline +func and_int16_32767_ssa(a int16) int16 { + return a & 32767 +} + +//go:noinline +func and_32767_int16_ssa(a int16) int16 { + return 32767 & a +} + +//go:noinline +func or_int16_Neg32768_ssa(a int16) int16 { + return a | -32768 +} + +//go:noinline +func or_Neg32768_int16_ssa(a int16) int16 { + return -32768 | a +} + +//go:noinline +func or_int16_Neg32767_ssa(a int16) int16 { + return a | -32767 +} + +//go:noinline +func or_Neg32767_int16_ssa(a int16) int16 { + return -32767 | a +} + +//go:noinline +func or_int16_Neg1_ssa(a int16) int16 { + return a | -1 +} + +//go:noinline +func or_Neg1_int16_ssa(a int16) int16 { + return -1 | a +} + +//go:noinline +func or_int16_0_ssa(a int16) int16 { + return a | 0 +} + +//go:noinline +func or_0_int16_ssa(a int16) int16 { + return 0 | a +} + +//go:noinline +func or_int16_1_ssa(a int16) int16 { + return a | 1 +} + +//go:noinline +func or_1_int16_ssa(a int16) int16 { + return 1 | a +} + +//go:noinline +func or_int16_32766_ssa(a int16) int16 { + return a | 32766 +} + +//go:noinline +func or_32766_int16_ssa(a int16) int16 { + return 32766 | a +} + +//go:noinline +func or_int16_32767_ssa(a int16) int16 { + return a | 32767 +} + +//go:noinline +func or_32767_int16_ssa(a int16) int16 { + return 32767 | a +} + +//go:noinline +func xor_int16_Neg32768_ssa(a int16) int16 { + return a ^ -32768 +} + +//go:noinline +func xor_Neg32768_int16_ssa(a int16) int16 { + return -32768 ^ a +} + +//go:noinline +func xor_int16_Neg32767_ssa(a int16) int16 { + return a ^ -32767 +} + +//go:noinline +func xor_Neg32767_int16_ssa(a int16) int16 { + return -32767 ^ a +} + +//go:noinline +func xor_int16_Neg1_ssa(a int16) int16 { + return a ^ -1 +} + +//go:noinline +func xor_Neg1_int16_ssa(a int16) int16 { + return -1 ^ a +} + +//go:noinline +func xor_int16_0_ssa(a int16) int16 { + return a ^ 0 +} + +//go:noinline +func xor_0_int16_ssa(a int16) int16 { + return 0 ^ a +} + +//go:noinline +func xor_int16_1_ssa(a int16) int16 { + return a ^ 1 +} + +//go:noinline +func xor_1_int16_ssa(a int16) int16 { + return 1 ^ a +} + +//go:noinline +func xor_int16_32766_ssa(a int16) int16 { + return a ^ 32766 +} + +//go:noinline +func xor_32766_int16_ssa(a int16) int16 { + return 32766 ^ a +} + +//go:noinline +func xor_int16_32767_ssa(a int16) int16 { + return a ^ 32767 +} + +//go:noinline +func xor_32767_int16_ssa(a int16) int16 { + return 32767 ^ a +} + //go:noinline func add_uint8_0_ssa(a uint8) uint8 { return a + 0 @@ -2015,6 +3005,96 @@ func mod_255_uint8_ssa(a uint8) uint8 { return 255 % a } +//go:noinline +func and_uint8_0_ssa(a uint8) uint8 { + return a & 0 +} + +//go:noinline +func and_0_uint8_ssa(a uint8) uint8 { + return 0 & a +} + +//go:noinline +func and_uint8_1_ssa(a uint8) uint8 { + return a & 1 +} + +//go:noinline +func and_1_uint8_ssa(a uint8) uint8 { + return 1 & a +} + +//go:noinline +func and_uint8_255_ssa(a uint8) uint8 { + return a & 255 +} + +//go:noinline +func and_255_uint8_ssa(a uint8) uint8 { + return 255 & a +} + +//go:noinline +func or_uint8_0_ssa(a uint8) uint8 { + return a | 0 +} + +//go:noinline +func or_0_uint8_ssa(a uint8) uint8 { + return 0 | a +} + +//go:noinline +func or_uint8_1_ssa(a uint8) uint8 { + return a | 1 +} + +//go:noinline +func or_1_uint8_ssa(a uint8) uint8 { + return 1 | a +} + +//go:noinline +func or_uint8_255_ssa(a uint8) uint8 { + return a | 255 +} + +//go:noinline +func or_255_uint8_ssa(a uint8) uint8 { + return 255 | a +} + +//go:noinline +func xor_uint8_0_ssa(a uint8) uint8 { + return a ^ 0 +} + +//go:noinline +func xor_0_uint8_ssa(a uint8) uint8 { + return 0 ^ a +} + +//go:noinline +func xor_uint8_1_ssa(a uint8) uint8 { + return a ^ 1 +} + +//go:noinline +func xor_1_uint8_ssa(a uint8) uint8 { + return 1 ^ a +} + +//go:noinline +func xor_uint8_255_ssa(a uint8) uint8 { + return a ^ 255 +} + +//go:noinline +func xor_255_uint8_ssa(a uint8) uint8 { + return 255 ^ a +} + //go:noinline func add_int8_Neg128_ssa(a int8) int8 { return a + -128 @@ -2355,6 +3435,216 @@ func mod_127_int8_ssa(a int8) int8 { return 127 % a } +//go:noinline +func and_int8_Neg128_ssa(a int8) int8 { + return a & -128 +} + +//go:noinline +func and_Neg128_int8_ssa(a int8) int8 { + return -128 & a +} + +//go:noinline +func and_int8_Neg127_ssa(a int8) int8 { + return a & -127 +} + +//go:noinline +func and_Neg127_int8_ssa(a int8) int8 { + return -127 & a +} + +//go:noinline +func and_int8_Neg1_ssa(a int8) int8 { + return a & -1 +} + +//go:noinline +func and_Neg1_int8_ssa(a int8) int8 { + return -1 & a +} + +//go:noinline +func and_int8_0_ssa(a int8) int8 { + return a & 0 +} + +//go:noinline +func and_0_int8_ssa(a int8) int8 { + return 0 & a +} + +//go:noinline +func and_int8_1_ssa(a int8) int8 { + return a & 1 +} + +//go:noinline +func and_1_int8_ssa(a int8) int8 { + return 1 & a +} + +//go:noinline +func and_int8_126_ssa(a int8) int8 { + return a & 126 +} + +//go:noinline +func and_126_int8_ssa(a int8) int8 { + return 126 & a +} + +//go:noinline +func and_int8_127_ssa(a int8) int8 { + return a & 127 +} + +//go:noinline +func and_127_int8_ssa(a int8) int8 { + return 127 & a +} + +//go:noinline +func or_int8_Neg128_ssa(a int8) int8 { + return a | -128 +} + +//go:noinline +func or_Neg128_int8_ssa(a int8) int8 { + return -128 | a +} + +//go:noinline +func or_int8_Neg127_ssa(a int8) int8 { + return a | -127 +} + +//go:noinline +func or_Neg127_int8_ssa(a int8) int8 { + return -127 | a +} + +//go:noinline +func or_int8_Neg1_ssa(a int8) int8 { + return a | -1 +} + +//go:noinline +func or_Neg1_int8_ssa(a int8) int8 { + return -1 | a +} + +//go:noinline +func or_int8_0_ssa(a int8) int8 { + return a | 0 +} + +//go:noinline +func or_0_int8_ssa(a int8) int8 { + return 0 | a +} + +//go:noinline +func or_int8_1_ssa(a int8) int8 { + return a | 1 +} + +//go:noinline +func or_1_int8_ssa(a int8) int8 { + return 1 | a +} + +//go:noinline +func or_int8_126_ssa(a int8) int8 { + return a | 126 +} + +//go:noinline +func or_126_int8_ssa(a int8) int8 { + return 126 | a +} + +//go:noinline +func or_int8_127_ssa(a int8) int8 { + return a | 127 +} + +//go:noinline +func or_127_int8_ssa(a int8) int8 { + return 127 | a +} + +//go:noinline +func xor_int8_Neg128_ssa(a int8) int8 { + return a ^ -128 +} + +//go:noinline +func xor_Neg128_int8_ssa(a int8) int8 { + return -128 ^ a +} + +//go:noinline +func xor_int8_Neg127_ssa(a int8) int8 { + return a ^ -127 +} + +//go:noinline +func xor_Neg127_int8_ssa(a int8) int8 { + return -127 ^ a +} + +//go:noinline +func xor_int8_Neg1_ssa(a int8) int8 { + return a ^ -1 +} + +//go:noinline +func xor_Neg1_int8_ssa(a int8) int8 { + return -1 ^ a +} + +//go:noinline +func xor_int8_0_ssa(a int8) int8 { + return a ^ 0 +} + +//go:noinline +func xor_0_int8_ssa(a int8) int8 { + return 0 ^ a +} + +//go:noinline +func xor_int8_1_ssa(a int8) int8 { + return a ^ 1 +} + +//go:noinline +func xor_1_int8_ssa(a int8) int8 { + return 1 ^ a +} + +//go:noinline +func xor_int8_126_ssa(a int8) int8 { + return a ^ 126 +} + +//go:noinline +func xor_126_int8_ssa(a int8) int8 { + return 126 ^ a +} + +//go:noinline +func xor_int8_127_ssa(a int8) int8 { + return a ^ 127 +} + +//go:noinline +func xor_127_int8_ssa(a int8) int8 { + return 127 ^ a +} + var failed bool func main() { @@ -4009,6 +5299,756 @@ func main() { failed = true } + if got := and_0_uint64_ssa(0); got != 0 { + fmt.Printf("and_uint64 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_0_ssa(0); got != 0 { + fmt.Printf("and_uint64 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint64_ssa(1); got != 0 { + fmt.Printf("and_uint64 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_0_ssa(1); got != 0 { + fmt.Printf("and_uint64 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint64_ssa(4294967296); got != 0 { + fmt.Printf("and_uint64 0%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_0_ssa(4294967296); got != 0 { + fmt.Printf("and_uint64 4294967296%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint64_ssa(9223372036854775808); got != 0 { + fmt.Printf("and_uint64 0%s9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_0_ssa(9223372036854775808); got != 0 { + fmt.Printf("and_uint64 9223372036854775808%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint64_ssa(18446744073709551615); got != 0 { + fmt.Printf("and_uint64 0%s18446744073709551615 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_0_ssa(18446744073709551615); got != 0 { + fmt.Printf("and_uint64 18446744073709551615%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint64_ssa(0); got != 0 { + fmt.Printf("and_uint64 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_1_ssa(0); got != 0 { + fmt.Printf("and_uint64 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint64_ssa(1); got != 1 { + fmt.Printf("and_uint64 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint64_1_ssa(1); got != 1 { + fmt.Printf("and_uint64 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_uint64_ssa(4294967296); got != 0 { + fmt.Printf("and_uint64 1%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_1_ssa(4294967296); got != 0 { + fmt.Printf("and_uint64 4294967296%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint64_ssa(9223372036854775808); got != 0 { + fmt.Printf("and_uint64 1%s9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_1_ssa(9223372036854775808); got != 0 { + fmt.Printf("and_uint64 9223372036854775808%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint64_ssa(18446744073709551615); got != 1 { + fmt.Printf("and_uint64 1%s18446744073709551615 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint64_1_ssa(18446744073709551615); got != 1 { + fmt.Printf("and_uint64 18446744073709551615%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_4294967296_uint64_ssa(0); got != 0 { + fmt.Printf("and_uint64 4294967296%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_4294967296_ssa(0); got != 0 { + fmt.Printf("and_uint64 0%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_4294967296_uint64_ssa(1); got != 0 { + fmt.Printf("and_uint64 4294967296%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_4294967296_ssa(1); got != 0 { + fmt.Printf("and_uint64 1%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_4294967296_uint64_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_uint64 4294967296%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_uint64_4294967296_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_uint64 4294967296%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_4294967296_uint64_ssa(9223372036854775808); got != 0 { + fmt.Printf("and_uint64 4294967296%s9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_4294967296_ssa(9223372036854775808); got != 0 { + fmt.Printf("and_uint64 9223372036854775808%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_4294967296_uint64_ssa(18446744073709551615); got != 4294967296 { + fmt.Printf("and_uint64 4294967296%s18446744073709551615 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_uint64_4294967296_ssa(18446744073709551615); got != 4294967296 { + fmt.Printf("and_uint64 18446744073709551615%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_9223372036854775808_uint64_ssa(0); got != 0 { + fmt.Printf("and_uint64 9223372036854775808%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_9223372036854775808_ssa(0); got != 0 { + fmt.Printf("and_uint64 0%s9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775808_uint64_ssa(1); got != 0 { + fmt.Printf("and_uint64 9223372036854775808%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_9223372036854775808_ssa(1); got != 0 { + fmt.Printf("and_uint64 1%s9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775808_uint64_ssa(4294967296); got != 0 { + fmt.Printf("and_uint64 9223372036854775808%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_9223372036854775808_ssa(4294967296); got != 0 { + fmt.Printf("and_uint64 4294967296%s9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775808_uint64_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("and_uint64 9223372036854775808%s9223372036854775808 = %d, wanted 9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_uint64_9223372036854775808_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("and_uint64 9223372036854775808%s9223372036854775808 = %d, wanted 9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_9223372036854775808_uint64_ssa(18446744073709551615); got != 9223372036854775808 { + fmt.Printf("and_uint64 9223372036854775808%s18446744073709551615 = %d, wanted 9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_uint64_9223372036854775808_ssa(18446744073709551615); got != 9223372036854775808 { + fmt.Printf("and_uint64 18446744073709551615%s9223372036854775808 = %d, wanted 9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_18446744073709551615_uint64_ssa(0); got != 0 { + fmt.Printf("and_uint64 18446744073709551615%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint64_18446744073709551615_ssa(0); got != 0 { + fmt.Printf("and_uint64 0%s18446744073709551615 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_18446744073709551615_uint64_ssa(1); got != 1 { + fmt.Printf("and_uint64 18446744073709551615%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint64_18446744073709551615_ssa(1); got != 1 { + fmt.Printf("and_uint64 1%s18446744073709551615 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_18446744073709551615_uint64_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_uint64 18446744073709551615%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_uint64_18446744073709551615_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_uint64 4294967296%s18446744073709551615 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_18446744073709551615_uint64_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("and_uint64 18446744073709551615%s9223372036854775808 = %d, wanted 9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_uint64_18446744073709551615_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("and_uint64 9223372036854775808%s18446744073709551615 = %d, wanted 9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_18446744073709551615_uint64_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("and_uint64 18446744073709551615%s18446744073709551615 = %d, wanted 18446744073709551615\n", `&`, got) + failed = true + } + + if got := and_uint64_18446744073709551615_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("and_uint64 18446744073709551615%s18446744073709551615 = %d, wanted 18446744073709551615\n", `&`, got) + failed = true + } + + if got := or_0_uint64_ssa(0); got != 0 { + fmt.Printf("or_uint64 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_uint64_0_ssa(0); got != 0 { + fmt.Printf("or_uint64 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_0_uint64_ssa(1); got != 1 { + fmt.Printf("or_uint64 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint64_0_ssa(1); got != 1 { + fmt.Printf("or_uint64 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_0_uint64_ssa(4294967296); got != 4294967296 { + fmt.Printf("or_uint64 0%s4294967296 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_uint64_0_ssa(4294967296); got != 4294967296 { + fmt.Printf("or_uint64 4294967296%s0 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_0_uint64_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("or_uint64 0%s9223372036854775808 = %d, wanted 9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_uint64_0_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("or_uint64 9223372036854775808%s0 = %d, wanted 9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_0_uint64_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 0%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_0_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s0 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_1_uint64_ssa(0); got != 1 { + fmt.Printf("or_uint64 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint64_1_ssa(0); got != 1 { + fmt.Printf("or_uint64 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_uint64_ssa(1); got != 1 { + fmt.Printf("or_uint64 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint64_1_ssa(1); got != 1 { + fmt.Printf("or_uint64 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_uint64_ssa(4294967296); got != 4294967297 { + fmt.Printf("or_uint64 1%s4294967296 = %d, wanted 4294967297\n", `|`, got) + failed = true + } + + if got := or_uint64_1_ssa(4294967296); got != 4294967297 { + fmt.Printf("or_uint64 4294967296%s1 = %d, wanted 4294967297\n", `|`, got) + failed = true + } + + if got := or_1_uint64_ssa(9223372036854775808); got != 9223372036854775809 { + fmt.Printf("or_uint64 1%s9223372036854775808 = %d, wanted 9223372036854775809\n", `|`, got) + failed = true + } + + if got := or_uint64_1_ssa(9223372036854775808); got != 9223372036854775809 { + fmt.Printf("or_uint64 9223372036854775808%s1 = %d, wanted 9223372036854775809\n", `|`, got) + failed = true + } + + if got := or_1_uint64_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 1%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_1_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s1 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_4294967296_uint64_ssa(0); got != 4294967296 { + fmt.Printf("or_uint64 4294967296%s0 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_uint64_4294967296_ssa(0); got != 4294967296 { + fmt.Printf("or_uint64 0%s4294967296 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_4294967296_uint64_ssa(1); got != 4294967297 { + fmt.Printf("or_uint64 4294967296%s1 = %d, wanted 4294967297\n", `|`, got) + failed = true + } + + if got := or_uint64_4294967296_ssa(1); got != 4294967297 { + fmt.Printf("or_uint64 1%s4294967296 = %d, wanted 4294967297\n", `|`, got) + failed = true + } + + if got := or_4294967296_uint64_ssa(4294967296); got != 4294967296 { + fmt.Printf("or_uint64 4294967296%s4294967296 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_uint64_4294967296_ssa(4294967296); got != 4294967296 { + fmt.Printf("or_uint64 4294967296%s4294967296 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_4294967296_uint64_ssa(9223372036854775808); got != 9223372041149743104 { + fmt.Printf("or_uint64 4294967296%s9223372036854775808 = %d, wanted 9223372041149743104\n", `|`, got) + failed = true + } + + if got := or_uint64_4294967296_ssa(9223372036854775808); got != 9223372041149743104 { + fmt.Printf("or_uint64 9223372036854775808%s4294967296 = %d, wanted 9223372041149743104\n", `|`, got) + failed = true + } + + if got := or_4294967296_uint64_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 4294967296%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_4294967296_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s4294967296 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_9223372036854775808_uint64_ssa(0); got != 9223372036854775808 { + fmt.Printf("or_uint64 9223372036854775808%s0 = %d, wanted 9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_uint64_9223372036854775808_ssa(0); got != 9223372036854775808 { + fmt.Printf("or_uint64 0%s9223372036854775808 = %d, wanted 9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_9223372036854775808_uint64_ssa(1); got != 9223372036854775809 { + fmt.Printf("or_uint64 9223372036854775808%s1 = %d, wanted 9223372036854775809\n", `|`, got) + failed = true + } + + if got := or_uint64_9223372036854775808_ssa(1); got != 9223372036854775809 { + fmt.Printf("or_uint64 1%s9223372036854775808 = %d, wanted 9223372036854775809\n", `|`, got) + failed = true + } + + if got := or_9223372036854775808_uint64_ssa(4294967296); got != 9223372041149743104 { + fmt.Printf("or_uint64 9223372036854775808%s4294967296 = %d, wanted 9223372041149743104\n", `|`, got) + failed = true + } + + if got := or_uint64_9223372036854775808_ssa(4294967296); got != 9223372041149743104 { + fmt.Printf("or_uint64 4294967296%s9223372036854775808 = %d, wanted 9223372041149743104\n", `|`, got) + failed = true + } + + if got := or_9223372036854775808_uint64_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("or_uint64 9223372036854775808%s9223372036854775808 = %d, wanted 9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_uint64_9223372036854775808_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("or_uint64 9223372036854775808%s9223372036854775808 = %d, wanted 9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_9223372036854775808_uint64_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 9223372036854775808%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_9223372036854775808_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s9223372036854775808 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_18446744073709551615_uint64_ssa(0); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s0 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_18446744073709551615_ssa(0); got != 18446744073709551615 { + fmt.Printf("or_uint64 0%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_18446744073709551615_uint64_ssa(1); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s1 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_18446744073709551615_ssa(1); got != 18446744073709551615 { + fmt.Printf("or_uint64 1%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_18446744073709551615_uint64_ssa(4294967296); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s4294967296 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_18446744073709551615_ssa(4294967296); got != 18446744073709551615 { + fmt.Printf("or_uint64 4294967296%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_18446744073709551615_uint64_ssa(9223372036854775808); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s9223372036854775808 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_18446744073709551615_ssa(9223372036854775808); got != 18446744073709551615 { + fmt.Printf("or_uint64 9223372036854775808%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_18446744073709551615_uint64_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := or_uint64_18446744073709551615_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("or_uint64 18446744073709551615%s18446744073709551615 = %d, wanted 18446744073709551615\n", `|`, got) + failed = true + } + + if got := xor_0_uint64_ssa(0); got != 0 { + fmt.Printf("xor_uint64 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint64_0_ssa(0); got != 0 { + fmt.Printf("xor_uint64 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_0_uint64_ssa(1); got != 1 { + fmt.Printf("xor_uint64 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_uint64_0_ssa(1); got != 1 { + fmt.Printf("xor_uint64 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_0_uint64_ssa(4294967296); got != 4294967296 { + fmt.Printf("xor_uint64 0%s4294967296 = %d, wanted 4294967296\n", `^`, got) + failed = true + } + + if got := xor_uint64_0_ssa(4294967296); got != 4294967296 { + fmt.Printf("xor_uint64 4294967296%s0 = %d, wanted 4294967296\n", `^`, got) + failed = true + } + + if got := xor_0_uint64_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("xor_uint64 0%s9223372036854775808 = %d, wanted 9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_uint64_0_ssa(9223372036854775808); got != 9223372036854775808 { + fmt.Printf("xor_uint64 9223372036854775808%s0 = %d, wanted 9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_0_uint64_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("xor_uint64 0%s18446744073709551615 = %d, wanted 18446744073709551615\n", `^`, got) + failed = true + } + + if got := xor_uint64_0_ssa(18446744073709551615); got != 18446744073709551615 { + fmt.Printf("xor_uint64 18446744073709551615%s0 = %d, wanted 18446744073709551615\n", `^`, got) + failed = true + } + + if got := xor_1_uint64_ssa(0); got != 1 { + fmt.Printf("xor_uint64 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_uint64_1_ssa(0); got != 1 { + fmt.Printf("xor_uint64 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_1_uint64_ssa(1); got != 0 { + fmt.Printf("xor_uint64 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint64_1_ssa(1); got != 0 { + fmt.Printf("xor_uint64 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_1_uint64_ssa(4294967296); got != 4294967297 { + fmt.Printf("xor_uint64 1%s4294967296 = %d, wanted 4294967297\n", `^`, got) + failed = true + } + + if got := xor_uint64_1_ssa(4294967296); got != 4294967297 { + fmt.Printf("xor_uint64 4294967296%s1 = %d, wanted 4294967297\n", `^`, got) + failed = true + } + + if got := xor_1_uint64_ssa(9223372036854775808); got != 9223372036854775809 { + fmt.Printf("xor_uint64 1%s9223372036854775808 = %d, wanted 9223372036854775809\n", `^`, got) + failed = true + } + + if got := xor_uint64_1_ssa(9223372036854775808); got != 9223372036854775809 { + fmt.Printf("xor_uint64 9223372036854775808%s1 = %d, wanted 9223372036854775809\n", `^`, got) + failed = true + } + + if got := xor_1_uint64_ssa(18446744073709551615); got != 18446744073709551614 { + fmt.Printf("xor_uint64 1%s18446744073709551615 = %d, wanted 18446744073709551614\n", `^`, got) + failed = true + } + + if got := xor_uint64_1_ssa(18446744073709551615); got != 18446744073709551614 { + fmt.Printf("xor_uint64 18446744073709551615%s1 = %d, wanted 18446744073709551614\n", `^`, got) + failed = true + } + + if got := xor_4294967296_uint64_ssa(0); got != 4294967296 { + fmt.Printf("xor_uint64 4294967296%s0 = %d, wanted 4294967296\n", `^`, got) + failed = true + } + + if got := xor_uint64_4294967296_ssa(0); got != 4294967296 { + fmt.Printf("xor_uint64 0%s4294967296 = %d, wanted 4294967296\n", `^`, got) + failed = true + } + + if got := xor_4294967296_uint64_ssa(1); got != 4294967297 { + fmt.Printf("xor_uint64 4294967296%s1 = %d, wanted 4294967297\n", `^`, got) + failed = true + } + + if got := xor_uint64_4294967296_ssa(1); got != 4294967297 { + fmt.Printf("xor_uint64 1%s4294967296 = %d, wanted 4294967297\n", `^`, got) + failed = true + } + + if got := xor_4294967296_uint64_ssa(4294967296); got != 0 { + fmt.Printf("xor_uint64 4294967296%s4294967296 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint64_4294967296_ssa(4294967296); got != 0 { + fmt.Printf("xor_uint64 4294967296%s4294967296 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_4294967296_uint64_ssa(9223372036854775808); got != 9223372041149743104 { + fmt.Printf("xor_uint64 4294967296%s9223372036854775808 = %d, wanted 9223372041149743104\n", `^`, got) + failed = true + } + + if got := xor_uint64_4294967296_ssa(9223372036854775808); got != 9223372041149743104 { + fmt.Printf("xor_uint64 9223372036854775808%s4294967296 = %d, wanted 9223372041149743104\n", `^`, got) + failed = true + } + + if got := xor_4294967296_uint64_ssa(18446744073709551615); got != 18446744069414584319 { + fmt.Printf("xor_uint64 4294967296%s18446744073709551615 = %d, wanted 18446744069414584319\n", `^`, got) + failed = true + } + + if got := xor_uint64_4294967296_ssa(18446744073709551615); got != 18446744069414584319 { + fmt.Printf("xor_uint64 18446744073709551615%s4294967296 = %d, wanted 18446744069414584319\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775808_uint64_ssa(0); got != 9223372036854775808 { + fmt.Printf("xor_uint64 9223372036854775808%s0 = %d, wanted 9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_uint64_9223372036854775808_ssa(0); got != 9223372036854775808 { + fmt.Printf("xor_uint64 0%s9223372036854775808 = %d, wanted 9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775808_uint64_ssa(1); got != 9223372036854775809 { + fmt.Printf("xor_uint64 9223372036854775808%s1 = %d, wanted 9223372036854775809\n", `^`, got) + failed = true + } + + if got := xor_uint64_9223372036854775808_ssa(1); got != 9223372036854775809 { + fmt.Printf("xor_uint64 1%s9223372036854775808 = %d, wanted 9223372036854775809\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775808_uint64_ssa(4294967296); got != 9223372041149743104 { + fmt.Printf("xor_uint64 9223372036854775808%s4294967296 = %d, wanted 9223372041149743104\n", `^`, got) + failed = true + } + + if got := xor_uint64_9223372036854775808_ssa(4294967296); got != 9223372041149743104 { + fmt.Printf("xor_uint64 4294967296%s9223372036854775808 = %d, wanted 9223372041149743104\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775808_uint64_ssa(9223372036854775808); got != 0 { + fmt.Printf("xor_uint64 9223372036854775808%s9223372036854775808 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint64_9223372036854775808_ssa(9223372036854775808); got != 0 { + fmt.Printf("xor_uint64 9223372036854775808%s9223372036854775808 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775808_uint64_ssa(18446744073709551615); got != 9223372036854775807 { + fmt.Printf("xor_uint64 9223372036854775808%s18446744073709551615 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_uint64_9223372036854775808_ssa(18446744073709551615); got != 9223372036854775807 { + fmt.Printf("xor_uint64 18446744073709551615%s9223372036854775808 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_18446744073709551615_uint64_ssa(0); got != 18446744073709551615 { + fmt.Printf("xor_uint64 18446744073709551615%s0 = %d, wanted 18446744073709551615\n", `^`, got) + failed = true + } + + if got := xor_uint64_18446744073709551615_ssa(0); got != 18446744073709551615 { + fmt.Printf("xor_uint64 0%s18446744073709551615 = %d, wanted 18446744073709551615\n", `^`, got) + failed = true + } + + if got := xor_18446744073709551615_uint64_ssa(1); got != 18446744073709551614 { + fmt.Printf("xor_uint64 18446744073709551615%s1 = %d, wanted 18446744073709551614\n", `^`, got) + failed = true + } + + if got := xor_uint64_18446744073709551615_ssa(1); got != 18446744073709551614 { + fmt.Printf("xor_uint64 1%s18446744073709551615 = %d, wanted 18446744073709551614\n", `^`, got) + failed = true + } + + if got := xor_18446744073709551615_uint64_ssa(4294967296); got != 18446744069414584319 { + fmt.Printf("xor_uint64 18446744073709551615%s4294967296 = %d, wanted 18446744069414584319\n", `^`, got) + failed = true + } + + if got := xor_uint64_18446744073709551615_ssa(4294967296); got != 18446744069414584319 { + fmt.Printf("xor_uint64 4294967296%s18446744073709551615 = %d, wanted 18446744069414584319\n", `^`, got) + failed = true + } + + if got := xor_18446744073709551615_uint64_ssa(9223372036854775808); got != 9223372036854775807 { + fmt.Printf("xor_uint64 18446744073709551615%s9223372036854775808 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_uint64_18446744073709551615_ssa(9223372036854775808); got != 9223372036854775807 { + fmt.Printf("xor_uint64 9223372036854775808%s18446744073709551615 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_18446744073709551615_uint64_ssa(18446744073709551615); got != 0 { + fmt.Printf("xor_uint64 18446744073709551615%s18446744073709551615 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint64_18446744073709551615_ssa(18446744073709551615); got != 0 { + fmt.Printf("xor_uint64 18446744073709551615%s18446744073709551615 = %d, wanted 0\n", `^`, got) + failed = true + } + if got := add_Neg9223372036854775808_int64_ssa(-9223372036854775808); got != 0 { fmt.Printf("add_int64 -9223372036854775808%s-9223372036854775808 = %d, wanted 0\n", `+`, got) failed = true @@ -7879,6 +9919,2436 @@ func main() { failed = true } + if got := and_Neg9223372036854775808_int64_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775808%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775808%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775808_int64_ssa(-9223372036854775807); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775808%s-9223372036854775807 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(-9223372036854775807); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775807%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775808_int64_ssa(-4294967296); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775808%s-4294967296 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(-4294967296); got != -9223372036854775808 { + fmt.Printf("and_int64 -4294967296%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775808_int64_ssa(-1); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775808%s-1 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(-1); got != -9223372036854775808 { + fmt.Printf("and_int64 -1%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775808_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775808_int64_ssa(1); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(1); got != 0 { + fmt.Printf("and_int64 1%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775808_int64_ssa(4294967296); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(4294967296); got != 0 { + fmt.Printf("and_int64 4294967296%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775808_int64_ssa(9223372036854775806); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s9223372036854775806 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(9223372036854775806); got != 0 { + fmt.Printf("and_int64 9223372036854775806%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775808_int64_ssa(9223372036854775807); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775808_ssa(9223372036854775807); got != 0 { + fmt.Printf("and_int64 9223372036854775807%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775807%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775808%s-9223372036854775807 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("and_int64 -9223372036854775807%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("and_int64 -9223372036854775807%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(-4294967296); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775807%s-4294967296 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(-4294967296); got != -9223372036854775808 { + fmt.Printf("and_int64 -4294967296%s-9223372036854775807 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(-1); got != -9223372036854775807 { + fmt.Printf("and_int64 -9223372036854775807%s-1 = %d, wanted -9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(-1); got != -9223372036854775807 { + fmt.Printf("and_int64 -1%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 -9223372036854775807%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s-9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(1); got != 1 { + fmt.Printf("and_int64 -9223372036854775807%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(1); got != 1 { + fmt.Printf("and_int64 1%s-9223372036854775807 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(4294967296); got != 0 { + fmt.Printf("and_int64 -9223372036854775807%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(4294967296); got != 0 { + fmt.Printf("and_int64 4294967296%s-9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(9223372036854775806); got != 0 { + fmt.Printf("and_int64 -9223372036854775807%s9223372036854775806 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(9223372036854775806); got != 0 { + fmt.Printf("and_int64 9223372036854775806%s-9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg9223372036854775807_int64_ssa(9223372036854775807); got != 1 { + fmt.Printf("and_int64 -9223372036854775807%s9223372036854775807 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_Neg9223372036854775807_ssa(9223372036854775807); got != 1 { + fmt.Printf("and_int64 9223372036854775807%s-9223372036854775807 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("and_int64 -4294967296%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775808%s-4294967296 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(-9223372036854775807); got != -9223372036854775808 { + fmt.Printf("and_int64 -4294967296%s-9223372036854775807 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(-9223372036854775807); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775807%s-4294967296 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(-4294967296); got != -4294967296 { + fmt.Printf("and_int64 -4294967296%s-4294967296 = %d, wanted -4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(-4294967296); got != -4294967296 { + fmt.Printf("and_int64 -4294967296%s-4294967296 = %d, wanted -4294967296\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(-1); got != -4294967296 { + fmt.Printf("and_int64 -4294967296%s-1 = %d, wanted -4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(-1); got != -4294967296 { + fmt.Printf("and_int64 -1%s-4294967296 = %d, wanted -4294967296\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 -4294967296%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s-4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(1); got != 0 { + fmt.Printf("and_int64 -4294967296%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(1); got != 0 { + fmt.Printf("and_int64 1%s-4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 -4294967296%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s-4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(9223372036854775806); got != 9223372032559808512 { + fmt.Printf("and_int64 -4294967296%s9223372036854775806 = %d, wanted 9223372032559808512\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(9223372036854775806); got != 9223372032559808512 { + fmt.Printf("and_int64 9223372036854775806%s-4294967296 = %d, wanted 9223372032559808512\n", `&`, got) + failed = true + } + + if got := and_Neg4294967296_int64_ssa(9223372036854775807); got != 9223372032559808512 { + fmt.Printf("and_int64 -4294967296%s9223372036854775807 = %d, wanted 9223372032559808512\n", `&`, got) + failed = true + } + + if got := and_int64_Neg4294967296_ssa(9223372036854775807); got != 9223372032559808512 { + fmt.Printf("and_int64 9223372036854775807%s-4294967296 = %d, wanted 9223372032559808512\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("and_int64 -1%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("and_int64 -9223372036854775808%s-1 = %d, wanted -9223372036854775808\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("and_int64 -1%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("and_int64 -9223372036854775807%s-1 = %d, wanted -9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(-4294967296); got != -4294967296 { + fmt.Printf("and_int64 -1%s-4294967296 = %d, wanted -4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(-4294967296); got != -4294967296 { + fmt.Printf("and_int64 -4294967296%s-1 = %d, wanted -4294967296\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(-1); got != -1 { + fmt.Printf("and_int64 -1%s-1 = %d, wanted -1\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(-1); got != -1 { + fmt.Printf("and_int64 -1%s-1 = %d, wanted -1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 -1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s-1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(1); got != 1 { + fmt.Printf("and_int64 -1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(1); got != 1 { + fmt.Printf("and_int64 1%s-1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 -1%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s-1 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("and_int64 -1%s9223372036854775806 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("and_int64 9223372036854775806%s-1 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_Neg1_int64_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("and_int64 -1%s9223372036854775807 = %d, wanted 9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_int64_Neg1_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("and_int64 9223372036854775807%s-1 = %d, wanted 9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 0%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(-9223372036854775807); got != 0 { + fmt.Printf("and_int64 0%s-9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(-9223372036854775807); got != 0 { + fmt.Printf("and_int64 -9223372036854775807%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(-4294967296); got != 0 { + fmt.Printf("and_int64 0%s-4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(-4294967296); got != 0 { + fmt.Printf("and_int64 -4294967296%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(-1); got != 0 { + fmt.Printf("and_int64 0%s-1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(-1); got != 0 { + fmt.Printf("and_int64 -1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(1); got != 0 { + fmt.Printf("and_int64 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(1); got != 0 { + fmt.Printf("and_int64 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(4294967296); got != 0 { + fmt.Printf("and_int64 0%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(4294967296); got != 0 { + fmt.Printf("and_int64 4294967296%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(9223372036854775806); got != 0 { + fmt.Printf("and_int64 0%s9223372036854775806 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(9223372036854775806); got != 0 { + fmt.Printf("and_int64 9223372036854775806%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int64_ssa(9223372036854775807); got != 0 { + fmt.Printf("and_int64 0%s9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_0_ssa(9223372036854775807); got != 0 { + fmt.Printf("and_int64 9223372036854775807%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 1%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(-9223372036854775807); got != 1 { + fmt.Printf("and_int64 1%s-9223372036854775807 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(-9223372036854775807); got != 1 { + fmt.Printf("and_int64 -9223372036854775807%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(-4294967296); got != 0 { + fmt.Printf("and_int64 1%s-4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(-4294967296); got != 0 { + fmt.Printf("and_int64 -4294967296%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(-1); got != 1 { + fmt.Printf("and_int64 1%s-1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(-1); got != 1 { + fmt.Printf("and_int64 -1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(1); got != 1 { + fmt.Printf("and_int64 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(1); got != 1 { + fmt.Printf("and_int64 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(4294967296); got != 0 { + fmt.Printf("and_int64 1%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(4294967296); got != 0 { + fmt.Printf("and_int64 4294967296%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(9223372036854775806); got != 0 { + fmt.Printf("and_int64 1%s9223372036854775806 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(9223372036854775806); got != 0 { + fmt.Printf("and_int64 9223372036854775806%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int64_ssa(9223372036854775807); got != 1 { + fmt.Printf("and_int64 1%s9223372036854775807 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_1_ssa(9223372036854775807); got != 1 { + fmt.Printf("and_int64 9223372036854775807%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 4294967296%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(-9223372036854775807); got != 0 { + fmt.Printf("and_int64 4294967296%s-9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(-9223372036854775807); got != 0 { + fmt.Printf("and_int64 -9223372036854775807%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(-4294967296); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s-4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(-4294967296); got != 4294967296 { + fmt.Printf("and_int64 -4294967296%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(-1); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s-1 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(-1); got != 4294967296 { + fmt.Printf("and_int64 -1%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 4294967296%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(1); got != 0 { + fmt.Printf("and_int64 4294967296%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(1); got != 0 { + fmt.Printf("and_int64 1%s4294967296 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(9223372036854775806); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s9223372036854775806 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(9223372036854775806); got != 4294967296 { + fmt.Printf("and_int64 9223372036854775806%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_4294967296_int64_ssa(9223372036854775807); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s9223372036854775807 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_4294967296_ssa(9223372036854775807); got != 4294967296 { + fmt.Printf("and_int64 9223372036854775807%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 9223372036854775806%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s9223372036854775806 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(-9223372036854775807); got != 0 { + fmt.Printf("and_int64 9223372036854775806%s-9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(-9223372036854775807); got != 0 { + fmt.Printf("and_int64 -9223372036854775807%s9223372036854775806 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(-4294967296); got != 9223372032559808512 { + fmt.Printf("and_int64 9223372036854775806%s-4294967296 = %d, wanted 9223372032559808512\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(-4294967296); got != 9223372032559808512 { + fmt.Printf("and_int64 -4294967296%s9223372036854775806 = %d, wanted 9223372032559808512\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(-1); got != 9223372036854775806 { + fmt.Printf("and_int64 9223372036854775806%s-1 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(-1); got != 9223372036854775806 { + fmt.Printf("and_int64 -1%s9223372036854775806 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 9223372036854775806%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s9223372036854775806 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(1); got != 0 { + fmt.Printf("and_int64 9223372036854775806%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(1); got != 0 { + fmt.Printf("and_int64 1%s9223372036854775806 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 9223372036854775806%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s9223372036854775806 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("and_int64 9223372036854775806%s9223372036854775806 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("and_int64 9223372036854775806%s9223372036854775806 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_9223372036854775806_int64_ssa(9223372036854775807); got != 9223372036854775806 { + fmt.Printf("and_int64 9223372036854775806%s9223372036854775807 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775806_ssa(9223372036854775807); got != 9223372036854775806 { + fmt.Printf("and_int64 9223372036854775807%s9223372036854775806 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 9223372036854775807%s-9223372036854775808 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(-9223372036854775808); got != 0 { + fmt.Printf("and_int64 -9223372036854775808%s9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(-9223372036854775807); got != 1 { + fmt.Printf("and_int64 9223372036854775807%s-9223372036854775807 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(-9223372036854775807); got != 1 { + fmt.Printf("and_int64 -9223372036854775807%s9223372036854775807 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(-4294967296); got != 9223372032559808512 { + fmt.Printf("and_int64 9223372036854775807%s-4294967296 = %d, wanted 9223372032559808512\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(-4294967296); got != 9223372032559808512 { + fmt.Printf("and_int64 -4294967296%s9223372036854775807 = %d, wanted 9223372032559808512\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(-1); got != 9223372036854775807 { + fmt.Printf("and_int64 9223372036854775807%s-1 = %d, wanted 9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(-1); got != 9223372036854775807 { + fmt.Printf("and_int64 -1%s9223372036854775807 = %d, wanted 9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(0); got != 0 { + fmt.Printf("and_int64 9223372036854775807%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(0); got != 0 { + fmt.Printf("and_int64 0%s9223372036854775807 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(1); got != 1 { + fmt.Printf("and_int64 9223372036854775807%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(1); got != 1 { + fmt.Printf("and_int64 1%s9223372036854775807 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 9223372036854775807%s4294967296 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(4294967296); got != 4294967296 { + fmt.Printf("and_int64 4294967296%s9223372036854775807 = %d, wanted 4294967296\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("and_int64 9223372036854775807%s9223372036854775806 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("and_int64 9223372036854775806%s9223372036854775807 = %d, wanted 9223372036854775806\n", `&`, got) + failed = true + } + + if got := and_9223372036854775807_int64_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("and_int64 9223372036854775807%s9223372036854775807 = %d, wanted 9223372036854775807\n", `&`, got) + failed = true + } + + if got := and_int64_9223372036854775807_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("and_int64 9223372036854775807%s9223372036854775807 = %d, wanted 9223372036854775807\n", `&`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("or_int64 -9223372036854775808%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("or_int64 -9223372036854775808%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775808%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775807%s-9223372036854775808 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(-4294967296); got != -4294967296 { + fmt.Printf("or_int64 -9223372036854775808%s-4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(-4294967296); got != -4294967296 { + fmt.Printf("or_int64 -4294967296%s-9223372036854775808 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 -9223372036854775808%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s-9223372036854775808 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(0); got != -9223372036854775808 { + fmt.Printf("or_int64 -9223372036854775808%s0 = %d, wanted -9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(0); got != -9223372036854775808 { + fmt.Printf("or_int64 0%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(1); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775808%s1 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(1); got != -9223372036854775807 { + fmt.Printf("or_int64 1%s-9223372036854775808 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(4294967296); got != -9223372032559808512 { + fmt.Printf("or_int64 -9223372036854775808%s4294967296 = %d, wanted -9223372032559808512\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(4294967296); got != -9223372032559808512 { + fmt.Printf("or_int64 4294967296%s-9223372036854775808 = %d, wanted -9223372032559808512\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(9223372036854775806); got != -2 { + fmt.Printf("or_int64 -9223372036854775808%s9223372036854775806 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(9223372036854775806); got != -2 { + fmt.Printf("or_int64 9223372036854775806%s-9223372036854775808 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775808_int64_ssa(9223372036854775807); got != -1 { + fmt.Printf("or_int64 -9223372036854775808%s9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775808_ssa(9223372036854775807); got != -1 { + fmt.Printf("or_int64 9223372036854775807%s-9223372036854775808 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(-9223372036854775808); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775807%s-9223372036854775808 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(-9223372036854775808); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775808%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775807%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775807%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(-4294967296); got != -4294967295 { + fmt.Printf("or_int64 -9223372036854775807%s-4294967296 = %d, wanted -4294967295\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(-4294967296); got != -4294967295 { + fmt.Printf("or_int64 -4294967296%s-9223372036854775807 = %d, wanted -4294967295\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 -9223372036854775807%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s-9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(0); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775807%s0 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(0); got != -9223372036854775807 { + fmt.Printf("or_int64 0%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(1); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775807%s1 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(1); got != -9223372036854775807 { + fmt.Printf("or_int64 1%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(4294967296); got != -9223372032559808511 { + fmt.Printf("or_int64 -9223372036854775807%s4294967296 = %d, wanted -9223372032559808511\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(4294967296); got != -9223372032559808511 { + fmt.Printf("or_int64 4294967296%s-9223372036854775807 = %d, wanted -9223372032559808511\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(9223372036854775806); got != -1 { + fmt.Printf("or_int64 -9223372036854775807%s9223372036854775806 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(9223372036854775806); got != -1 { + fmt.Printf("or_int64 9223372036854775806%s-9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg9223372036854775807_int64_ssa(9223372036854775807); got != -1 { + fmt.Printf("or_int64 -9223372036854775807%s9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg9223372036854775807_ssa(9223372036854775807); got != -1 { + fmt.Printf("or_int64 9223372036854775807%s-9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(-9223372036854775808); got != -4294967296 { + fmt.Printf("or_int64 -4294967296%s-9223372036854775808 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(-9223372036854775808); got != -4294967296 { + fmt.Printf("or_int64 -9223372036854775808%s-4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(-9223372036854775807); got != -4294967295 { + fmt.Printf("or_int64 -4294967296%s-9223372036854775807 = %d, wanted -4294967295\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(-9223372036854775807); got != -4294967295 { + fmt.Printf("or_int64 -9223372036854775807%s-4294967296 = %d, wanted -4294967295\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(-4294967296); got != -4294967296 { + fmt.Printf("or_int64 -4294967296%s-4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(-4294967296); got != -4294967296 { + fmt.Printf("or_int64 -4294967296%s-4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 -4294967296%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s-4294967296 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(0); got != -4294967296 { + fmt.Printf("or_int64 -4294967296%s0 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(0); got != -4294967296 { + fmt.Printf("or_int64 0%s-4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(1); got != -4294967295 { + fmt.Printf("or_int64 -4294967296%s1 = %d, wanted -4294967295\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(1); got != -4294967295 { + fmt.Printf("or_int64 1%s-4294967296 = %d, wanted -4294967295\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(4294967296); got != -4294967296 { + fmt.Printf("or_int64 -4294967296%s4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(4294967296); got != -4294967296 { + fmt.Printf("or_int64 4294967296%s-4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(9223372036854775806); got != -2 { + fmt.Printf("or_int64 -4294967296%s9223372036854775806 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(9223372036854775806); got != -2 { + fmt.Printf("or_int64 9223372036854775806%s-4294967296 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_Neg4294967296_int64_ssa(9223372036854775807); got != -1 { + fmt.Printf("or_int64 -4294967296%s9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg4294967296_ssa(9223372036854775807); got != -1 { + fmt.Printf("or_int64 9223372036854775807%s-4294967296 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(-9223372036854775808); got != -1 { + fmt.Printf("or_int64 -1%s-9223372036854775808 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(-9223372036854775808); got != -1 { + fmt.Printf("or_int64 -9223372036854775808%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(-9223372036854775807); got != -1 { + fmt.Printf("or_int64 -1%s-9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(-9223372036854775807); got != -1 { + fmt.Printf("or_int64 -9223372036854775807%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(-4294967296); got != -1 { + fmt.Printf("or_int64 -1%s-4294967296 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(-4294967296); got != -1 { + fmt.Printf("or_int64 -4294967296%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(0); got != -1 { + fmt.Printf("or_int64 -1%s0 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(0); got != -1 { + fmt.Printf("or_int64 0%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(1); got != -1 { + fmt.Printf("or_int64 -1%s1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(1); got != -1 { + fmt.Printf("or_int64 1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(4294967296); got != -1 { + fmt.Printf("or_int64 -1%s4294967296 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(4294967296); got != -1 { + fmt.Printf("or_int64 4294967296%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(9223372036854775806); got != -1 { + fmt.Printf("or_int64 -1%s9223372036854775806 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(9223372036854775806); got != -1 { + fmt.Printf("or_int64 9223372036854775806%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int64_ssa(9223372036854775807); got != -1 { + fmt.Printf("or_int64 -1%s9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_Neg1_ssa(9223372036854775807); got != -1 { + fmt.Printf("or_int64 9223372036854775807%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("or_int64 0%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("or_int64 -9223372036854775808%s0 = %d, wanted -9223372036854775808\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("or_int64 0%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775807%s0 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(-4294967296); got != -4294967296 { + fmt.Printf("or_int64 0%s-4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(-4294967296); got != -4294967296 { + fmt.Printf("or_int64 -4294967296%s0 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 0%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s0 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(0); got != 0 { + fmt.Printf("or_int64 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(0); got != 0 { + fmt.Printf("or_int64 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(1); got != 1 { + fmt.Printf("or_int64 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(1); got != 1 { + fmt.Printf("or_int64 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(4294967296); got != 4294967296 { + fmt.Printf("or_int64 0%s4294967296 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(4294967296); got != 4294967296 { + fmt.Printf("or_int64 4294967296%s0 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("or_int64 0%s9223372036854775806 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("or_int64 9223372036854775806%s0 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_0_int64_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 0%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_0_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s0 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(-9223372036854775808); got != -9223372036854775807 { + fmt.Printf("or_int64 1%s-9223372036854775808 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(-9223372036854775808); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775808%s1 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("or_int64 1%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("or_int64 -9223372036854775807%s1 = %d, wanted -9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(-4294967296); got != -4294967295 { + fmt.Printf("or_int64 1%s-4294967296 = %d, wanted -4294967295\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(-4294967296); got != -4294967295 { + fmt.Printf("or_int64 -4294967296%s1 = %d, wanted -4294967295\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(0); got != 1 { + fmt.Printf("or_int64 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(0); got != 1 { + fmt.Printf("or_int64 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(1); got != 1 { + fmt.Printf("or_int64 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(1); got != 1 { + fmt.Printf("or_int64 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(4294967296); got != 4294967297 { + fmt.Printf("or_int64 1%s4294967296 = %d, wanted 4294967297\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(4294967296); got != 4294967297 { + fmt.Printf("or_int64 4294967296%s1 = %d, wanted 4294967297\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(9223372036854775806); got != 9223372036854775807 { + fmt.Printf("or_int64 1%s9223372036854775806 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(9223372036854775806); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775806%s1 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_1_int64_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 1%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_1_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s1 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(-9223372036854775808); got != -9223372032559808512 { + fmt.Printf("or_int64 4294967296%s-9223372036854775808 = %d, wanted -9223372032559808512\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(-9223372036854775808); got != -9223372032559808512 { + fmt.Printf("or_int64 -9223372036854775808%s4294967296 = %d, wanted -9223372032559808512\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(-9223372036854775807); got != -9223372032559808511 { + fmt.Printf("or_int64 4294967296%s-9223372036854775807 = %d, wanted -9223372032559808511\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(-9223372036854775807); got != -9223372032559808511 { + fmt.Printf("or_int64 -9223372036854775807%s4294967296 = %d, wanted -9223372032559808511\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(-4294967296); got != -4294967296 { + fmt.Printf("or_int64 4294967296%s-4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(-4294967296); got != -4294967296 { + fmt.Printf("or_int64 -4294967296%s4294967296 = %d, wanted -4294967296\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 4294967296%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s4294967296 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(0); got != 4294967296 { + fmt.Printf("or_int64 4294967296%s0 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(0); got != 4294967296 { + fmt.Printf("or_int64 0%s4294967296 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(1); got != 4294967297 { + fmt.Printf("or_int64 4294967296%s1 = %d, wanted 4294967297\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(1); got != 4294967297 { + fmt.Printf("or_int64 1%s4294967296 = %d, wanted 4294967297\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(4294967296); got != 4294967296 { + fmt.Printf("or_int64 4294967296%s4294967296 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(4294967296); got != 4294967296 { + fmt.Printf("or_int64 4294967296%s4294967296 = %d, wanted 4294967296\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("or_int64 4294967296%s9223372036854775806 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("or_int64 9223372036854775806%s4294967296 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_4294967296_int64_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 4294967296%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_4294967296_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s4294967296 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(-9223372036854775808); got != -2 { + fmt.Printf("or_int64 9223372036854775806%s-9223372036854775808 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(-9223372036854775808); got != -2 { + fmt.Printf("or_int64 -9223372036854775808%s9223372036854775806 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(-9223372036854775807); got != -1 { + fmt.Printf("or_int64 9223372036854775806%s-9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(-9223372036854775807); got != -1 { + fmt.Printf("or_int64 -9223372036854775807%s9223372036854775806 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(-4294967296); got != -2 { + fmt.Printf("or_int64 9223372036854775806%s-4294967296 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(-4294967296); got != -2 { + fmt.Printf("or_int64 -4294967296%s9223372036854775806 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 9223372036854775806%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s9223372036854775806 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(0); got != 9223372036854775806 { + fmt.Printf("or_int64 9223372036854775806%s0 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(0); got != 9223372036854775806 { + fmt.Printf("or_int64 0%s9223372036854775806 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(1); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775806%s1 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(1); got != 9223372036854775807 { + fmt.Printf("or_int64 1%s9223372036854775806 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(4294967296); got != 9223372036854775806 { + fmt.Printf("or_int64 9223372036854775806%s4294967296 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(4294967296); got != 9223372036854775806 { + fmt.Printf("or_int64 4294967296%s9223372036854775806 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("or_int64 9223372036854775806%s9223372036854775806 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("or_int64 9223372036854775806%s9223372036854775806 = %d, wanted 9223372036854775806\n", `|`, got) + failed = true + } + + if got := or_9223372036854775806_int64_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775806%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775806_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s9223372036854775806 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(-9223372036854775808); got != -1 { + fmt.Printf("or_int64 9223372036854775807%s-9223372036854775808 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(-9223372036854775808); got != -1 { + fmt.Printf("or_int64 -9223372036854775808%s9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(-9223372036854775807); got != -1 { + fmt.Printf("or_int64 9223372036854775807%s-9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(-9223372036854775807); got != -1 { + fmt.Printf("or_int64 -9223372036854775807%s9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(-4294967296); got != -1 { + fmt.Printf("or_int64 9223372036854775807%s-4294967296 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(-4294967296); got != -1 { + fmt.Printf("or_int64 -4294967296%s9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(-1); got != -1 { + fmt.Printf("or_int64 9223372036854775807%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(-1); got != -1 { + fmt.Printf("or_int64 -1%s9223372036854775807 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(0); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s0 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(0); got != 9223372036854775807 { + fmt.Printf("or_int64 0%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(1); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s1 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(1); got != 9223372036854775807 { + fmt.Printf("or_int64 1%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(4294967296); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s4294967296 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(4294967296); got != 9223372036854775807 { + fmt.Printf("or_int64 4294967296%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(9223372036854775806); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s9223372036854775806 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(9223372036854775806); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775806%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_9223372036854775807_int64_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := or_int64_9223372036854775807_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("or_int64 9223372036854775807%s9223372036854775807 = %d, wanted 9223372036854775807\n", `|`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(-9223372036854775808); got != 0 { + fmt.Printf("xor_int64 -9223372036854775808%s-9223372036854775808 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(-9223372036854775808); got != 0 { + fmt.Printf("xor_int64 -9223372036854775808%s-9223372036854775808 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(-9223372036854775807); got != 1 { + fmt.Printf("xor_int64 -9223372036854775808%s-9223372036854775807 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(-9223372036854775807); got != 1 { + fmt.Printf("xor_int64 -9223372036854775807%s-9223372036854775808 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(-4294967296); got != 9223372032559808512 { + fmt.Printf("xor_int64 -9223372036854775808%s-4294967296 = %d, wanted 9223372032559808512\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(-4294967296); got != 9223372032559808512 { + fmt.Printf("xor_int64 -4294967296%s-9223372036854775808 = %d, wanted 9223372032559808512\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(-1); got != 9223372036854775807 { + fmt.Printf("xor_int64 -9223372036854775808%s-1 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(-1); got != 9223372036854775807 { + fmt.Printf("xor_int64 -1%s-9223372036854775808 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(0); got != -9223372036854775808 { + fmt.Printf("xor_int64 -9223372036854775808%s0 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(0); got != -9223372036854775808 { + fmt.Printf("xor_int64 0%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(1); got != -9223372036854775807 { + fmt.Printf("xor_int64 -9223372036854775808%s1 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(1); got != -9223372036854775807 { + fmt.Printf("xor_int64 1%s-9223372036854775808 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(4294967296); got != -9223372032559808512 { + fmt.Printf("xor_int64 -9223372036854775808%s4294967296 = %d, wanted -9223372032559808512\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(4294967296); got != -9223372032559808512 { + fmt.Printf("xor_int64 4294967296%s-9223372036854775808 = %d, wanted -9223372032559808512\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(9223372036854775806); got != -2 { + fmt.Printf("xor_int64 -9223372036854775808%s9223372036854775806 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(9223372036854775806); got != -2 { + fmt.Printf("xor_int64 9223372036854775806%s-9223372036854775808 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775808_int64_ssa(9223372036854775807); got != -1 { + fmt.Printf("xor_int64 -9223372036854775808%s9223372036854775807 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775808_ssa(9223372036854775807); got != -1 { + fmt.Printf("xor_int64 9223372036854775807%s-9223372036854775808 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(-9223372036854775808); got != 1 { + fmt.Printf("xor_int64 -9223372036854775807%s-9223372036854775808 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(-9223372036854775808); got != 1 { + fmt.Printf("xor_int64 -9223372036854775808%s-9223372036854775807 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(-9223372036854775807); got != 0 { + fmt.Printf("xor_int64 -9223372036854775807%s-9223372036854775807 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(-9223372036854775807); got != 0 { + fmt.Printf("xor_int64 -9223372036854775807%s-9223372036854775807 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(-4294967296); got != 9223372032559808513 { + fmt.Printf("xor_int64 -9223372036854775807%s-4294967296 = %d, wanted 9223372032559808513\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(-4294967296); got != 9223372032559808513 { + fmt.Printf("xor_int64 -4294967296%s-9223372036854775807 = %d, wanted 9223372032559808513\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(-1); got != 9223372036854775806 { + fmt.Printf("xor_int64 -9223372036854775807%s-1 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(-1); got != 9223372036854775806 { + fmt.Printf("xor_int64 -1%s-9223372036854775807 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(0); got != -9223372036854775807 { + fmt.Printf("xor_int64 -9223372036854775807%s0 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(0); got != -9223372036854775807 { + fmt.Printf("xor_int64 0%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(1); got != -9223372036854775808 { + fmt.Printf("xor_int64 -9223372036854775807%s1 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(1); got != -9223372036854775808 { + fmt.Printf("xor_int64 1%s-9223372036854775807 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(4294967296); got != -9223372032559808511 { + fmt.Printf("xor_int64 -9223372036854775807%s4294967296 = %d, wanted -9223372032559808511\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(4294967296); got != -9223372032559808511 { + fmt.Printf("xor_int64 4294967296%s-9223372036854775807 = %d, wanted -9223372032559808511\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(9223372036854775806); got != -1 { + fmt.Printf("xor_int64 -9223372036854775807%s9223372036854775806 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(9223372036854775806); got != -1 { + fmt.Printf("xor_int64 9223372036854775806%s-9223372036854775807 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg9223372036854775807_int64_ssa(9223372036854775807); got != -2 { + fmt.Printf("xor_int64 -9223372036854775807%s9223372036854775807 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg9223372036854775807_ssa(9223372036854775807); got != -2 { + fmt.Printf("xor_int64 9223372036854775807%s-9223372036854775807 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(-9223372036854775808); got != 9223372032559808512 { + fmt.Printf("xor_int64 -4294967296%s-9223372036854775808 = %d, wanted 9223372032559808512\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(-9223372036854775808); got != 9223372032559808512 { + fmt.Printf("xor_int64 -9223372036854775808%s-4294967296 = %d, wanted 9223372032559808512\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(-9223372036854775807); got != 9223372032559808513 { + fmt.Printf("xor_int64 -4294967296%s-9223372036854775807 = %d, wanted 9223372032559808513\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(-9223372036854775807); got != 9223372032559808513 { + fmt.Printf("xor_int64 -9223372036854775807%s-4294967296 = %d, wanted 9223372032559808513\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(-4294967296); got != 0 { + fmt.Printf("xor_int64 -4294967296%s-4294967296 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(-4294967296); got != 0 { + fmt.Printf("xor_int64 -4294967296%s-4294967296 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(-1); got != 4294967295 { + fmt.Printf("xor_int64 -4294967296%s-1 = %d, wanted 4294967295\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(-1); got != 4294967295 { + fmt.Printf("xor_int64 -1%s-4294967296 = %d, wanted 4294967295\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(0); got != -4294967296 { + fmt.Printf("xor_int64 -4294967296%s0 = %d, wanted -4294967296\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(0); got != -4294967296 { + fmt.Printf("xor_int64 0%s-4294967296 = %d, wanted -4294967296\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(1); got != -4294967295 { + fmt.Printf("xor_int64 -4294967296%s1 = %d, wanted -4294967295\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(1); got != -4294967295 { + fmt.Printf("xor_int64 1%s-4294967296 = %d, wanted -4294967295\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(4294967296); got != -8589934592 { + fmt.Printf("xor_int64 -4294967296%s4294967296 = %d, wanted -8589934592\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(4294967296); got != -8589934592 { + fmt.Printf("xor_int64 4294967296%s-4294967296 = %d, wanted -8589934592\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(9223372036854775806); got != -9223372032559808514 { + fmt.Printf("xor_int64 -4294967296%s9223372036854775806 = %d, wanted -9223372032559808514\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(9223372036854775806); got != -9223372032559808514 { + fmt.Printf("xor_int64 9223372036854775806%s-4294967296 = %d, wanted -9223372032559808514\n", `^`, got) + failed = true + } + + if got := xor_Neg4294967296_int64_ssa(9223372036854775807); got != -9223372032559808513 { + fmt.Printf("xor_int64 -4294967296%s9223372036854775807 = %d, wanted -9223372032559808513\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg4294967296_ssa(9223372036854775807); got != -9223372032559808513 { + fmt.Printf("xor_int64 9223372036854775807%s-4294967296 = %d, wanted -9223372032559808513\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(-9223372036854775808); got != 9223372036854775807 { + fmt.Printf("xor_int64 -1%s-9223372036854775808 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(-9223372036854775808); got != 9223372036854775807 { + fmt.Printf("xor_int64 -9223372036854775808%s-1 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(-9223372036854775807); got != 9223372036854775806 { + fmt.Printf("xor_int64 -1%s-9223372036854775807 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(-9223372036854775807); got != 9223372036854775806 { + fmt.Printf("xor_int64 -9223372036854775807%s-1 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(-4294967296); got != 4294967295 { + fmt.Printf("xor_int64 -1%s-4294967296 = %d, wanted 4294967295\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(-4294967296); got != 4294967295 { + fmt.Printf("xor_int64 -4294967296%s-1 = %d, wanted 4294967295\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(-1); got != 0 { + fmt.Printf("xor_int64 -1%s-1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(-1); got != 0 { + fmt.Printf("xor_int64 -1%s-1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(0); got != -1 { + fmt.Printf("xor_int64 -1%s0 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(0); got != -1 { + fmt.Printf("xor_int64 0%s-1 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(1); got != -2 { + fmt.Printf("xor_int64 -1%s1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(1); got != -2 { + fmt.Printf("xor_int64 1%s-1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(4294967296); got != -4294967297 { + fmt.Printf("xor_int64 -1%s4294967296 = %d, wanted -4294967297\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(4294967296); got != -4294967297 { + fmt.Printf("xor_int64 4294967296%s-1 = %d, wanted -4294967297\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(9223372036854775806); got != -9223372036854775807 { + fmt.Printf("xor_int64 -1%s9223372036854775806 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(9223372036854775806); got != -9223372036854775807 { + fmt.Printf("xor_int64 9223372036854775806%s-1 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int64_ssa(9223372036854775807); got != -9223372036854775808 { + fmt.Printf("xor_int64 -1%s9223372036854775807 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_int64_Neg1_ssa(9223372036854775807); got != -9223372036854775808 { + fmt.Printf("xor_int64 9223372036854775807%s-1 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("xor_int64 0%s-9223372036854775808 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(-9223372036854775808); got != -9223372036854775808 { + fmt.Printf("xor_int64 -9223372036854775808%s0 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("xor_int64 0%s-9223372036854775807 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(-9223372036854775807); got != -9223372036854775807 { + fmt.Printf("xor_int64 -9223372036854775807%s0 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(-4294967296); got != -4294967296 { + fmt.Printf("xor_int64 0%s-4294967296 = %d, wanted -4294967296\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(-4294967296); got != -4294967296 { + fmt.Printf("xor_int64 -4294967296%s0 = %d, wanted -4294967296\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(-1); got != -1 { + fmt.Printf("xor_int64 0%s-1 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(-1); got != -1 { + fmt.Printf("xor_int64 -1%s0 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(0); got != 0 { + fmt.Printf("xor_int64 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(0); got != 0 { + fmt.Printf("xor_int64 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(1); got != 1 { + fmt.Printf("xor_int64 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(1); got != 1 { + fmt.Printf("xor_int64 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(4294967296); got != 4294967296 { + fmt.Printf("xor_int64 0%s4294967296 = %d, wanted 4294967296\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(4294967296); got != 4294967296 { + fmt.Printf("xor_int64 4294967296%s0 = %d, wanted 4294967296\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("xor_int64 0%s9223372036854775806 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(9223372036854775806); got != 9223372036854775806 { + fmt.Printf("xor_int64 9223372036854775806%s0 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_0_int64_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("xor_int64 0%s9223372036854775807 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_0_ssa(9223372036854775807); got != 9223372036854775807 { + fmt.Printf("xor_int64 9223372036854775807%s0 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(-9223372036854775808); got != -9223372036854775807 { + fmt.Printf("xor_int64 1%s-9223372036854775808 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(-9223372036854775808); got != -9223372036854775807 { + fmt.Printf("xor_int64 -9223372036854775808%s1 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(-9223372036854775807); got != -9223372036854775808 { + fmt.Printf("xor_int64 1%s-9223372036854775807 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(-9223372036854775807); got != -9223372036854775808 { + fmt.Printf("xor_int64 -9223372036854775807%s1 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(-4294967296); got != -4294967295 { + fmt.Printf("xor_int64 1%s-4294967296 = %d, wanted -4294967295\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(-4294967296); got != -4294967295 { + fmt.Printf("xor_int64 -4294967296%s1 = %d, wanted -4294967295\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(-1); got != -2 { + fmt.Printf("xor_int64 1%s-1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(-1); got != -2 { + fmt.Printf("xor_int64 -1%s1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(0); got != 1 { + fmt.Printf("xor_int64 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(0); got != 1 { + fmt.Printf("xor_int64 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(1); got != 0 { + fmt.Printf("xor_int64 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(1); got != 0 { + fmt.Printf("xor_int64 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(4294967296); got != 4294967297 { + fmt.Printf("xor_int64 1%s4294967296 = %d, wanted 4294967297\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(4294967296); got != 4294967297 { + fmt.Printf("xor_int64 4294967296%s1 = %d, wanted 4294967297\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(9223372036854775806); got != 9223372036854775807 { + fmt.Printf("xor_int64 1%s9223372036854775806 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(9223372036854775806); got != 9223372036854775807 { + fmt.Printf("xor_int64 9223372036854775806%s1 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_1_int64_ssa(9223372036854775807); got != 9223372036854775806 { + fmt.Printf("xor_int64 1%s9223372036854775807 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_int64_1_ssa(9223372036854775807); got != 9223372036854775806 { + fmt.Printf("xor_int64 9223372036854775807%s1 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(-9223372036854775808); got != -9223372032559808512 { + fmt.Printf("xor_int64 4294967296%s-9223372036854775808 = %d, wanted -9223372032559808512\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(-9223372036854775808); got != -9223372032559808512 { + fmt.Printf("xor_int64 -9223372036854775808%s4294967296 = %d, wanted -9223372032559808512\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(-9223372036854775807); got != -9223372032559808511 { + fmt.Printf("xor_int64 4294967296%s-9223372036854775807 = %d, wanted -9223372032559808511\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(-9223372036854775807); got != -9223372032559808511 { + fmt.Printf("xor_int64 -9223372036854775807%s4294967296 = %d, wanted -9223372032559808511\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(-4294967296); got != -8589934592 { + fmt.Printf("xor_int64 4294967296%s-4294967296 = %d, wanted -8589934592\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(-4294967296); got != -8589934592 { + fmt.Printf("xor_int64 -4294967296%s4294967296 = %d, wanted -8589934592\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(-1); got != -4294967297 { + fmt.Printf("xor_int64 4294967296%s-1 = %d, wanted -4294967297\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(-1); got != -4294967297 { + fmt.Printf("xor_int64 -1%s4294967296 = %d, wanted -4294967297\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(0); got != 4294967296 { + fmt.Printf("xor_int64 4294967296%s0 = %d, wanted 4294967296\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(0); got != 4294967296 { + fmt.Printf("xor_int64 0%s4294967296 = %d, wanted 4294967296\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(1); got != 4294967297 { + fmt.Printf("xor_int64 4294967296%s1 = %d, wanted 4294967297\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(1); got != 4294967297 { + fmt.Printf("xor_int64 1%s4294967296 = %d, wanted 4294967297\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(4294967296); got != 0 { + fmt.Printf("xor_int64 4294967296%s4294967296 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(4294967296); got != 0 { + fmt.Printf("xor_int64 4294967296%s4294967296 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(9223372036854775806); got != 9223372032559808510 { + fmt.Printf("xor_int64 4294967296%s9223372036854775806 = %d, wanted 9223372032559808510\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(9223372036854775806); got != 9223372032559808510 { + fmt.Printf("xor_int64 9223372036854775806%s4294967296 = %d, wanted 9223372032559808510\n", `^`, got) + failed = true + } + + if got := xor_4294967296_int64_ssa(9223372036854775807); got != 9223372032559808511 { + fmt.Printf("xor_int64 4294967296%s9223372036854775807 = %d, wanted 9223372032559808511\n", `^`, got) + failed = true + } + + if got := xor_int64_4294967296_ssa(9223372036854775807); got != 9223372032559808511 { + fmt.Printf("xor_int64 9223372036854775807%s4294967296 = %d, wanted 9223372032559808511\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(-9223372036854775808); got != -2 { + fmt.Printf("xor_int64 9223372036854775806%s-9223372036854775808 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(-9223372036854775808); got != -2 { + fmt.Printf("xor_int64 -9223372036854775808%s9223372036854775806 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(-9223372036854775807); got != -1 { + fmt.Printf("xor_int64 9223372036854775806%s-9223372036854775807 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(-9223372036854775807); got != -1 { + fmt.Printf("xor_int64 -9223372036854775807%s9223372036854775806 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(-4294967296); got != -9223372032559808514 { + fmt.Printf("xor_int64 9223372036854775806%s-4294967296 = %d, wanted -9223372032559808514\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(-4294967296); got != -9223372032559808514 { + fmt.Printf("xor_int64 -4294967296%s9223372036854775806 = %d, wanted -9223372032559808514\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(-1); got != -9223372036854775807 { + fmt.Printf("xor_int64 9223372036854775806%s-1 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(-1); got != -9223372036854775807 { + fmt.Printf("xor_int64 -1%s9223372036854775806 = %d, wanted -9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(0); got != 9223372036854775806 { + fmt.Printf("xor_int64 9223372036854775806%s0 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(0); got != 9223372036854775806 { + fmt.Printf("xor_int64 0%s9223372036854775806 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(1); got != 9223372036854775807 { + fmt.Printf("xor_int64 9223372036854775806%s1 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(1); got != 9223372036854775807 { + fmt.Printf("xor_int64 1%s9223372036854775806 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(4294967296); got != 9223372032559808510 { + fmt.Printf("xor_int64 9223372036854775806%s4294967296 = %d, wanted 9223372032559808510\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(4294967296); got != 9223372032559808510 { + fmt.Printf("xor_int64 4294967296%s9223372036854775806 = %d, wanted 9223372032559808510\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(9223372036854775806); got != 0 { + fmt.Printf("xor_int64 9223372036854775806%s9223372036854775806 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(9223372036854775806); got != 0 { + fmt.Printf("xor_int64 9223372036854775806%s9223372036854775806 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775806_int64_ssa(9223372036854775807); got != 1 { + fmt.Printf("xor_int64 9223372036854775806%s9223372036854775807 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775806_ssa(9223372036854775807); got != 1 { + fmt.Printf("xor_int64 9223372036854775807%s9223372036854775806 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(-9223372036854775808); got != -1 { + fmt.Printf("xor_int64 9223372036854775807%s-9223372036854775808 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(-9223372036854775808); got != -1 { + fmt.Printf("xor_int64 -9223372036854775808%s9223372036854775807 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(-9223372036854775807); got != -2 { + fmt.Printf("xor_int64 9223372036854775807%s-9223372036854775807 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(-9223372036854775807); got != -2 { + fmt.Printf("xor_int64 -9223372036854775807%s9223372036854775807 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(-4294967296); got != -9223372032559808513 { + fmt.Printf("xor_int64 9223372036854775807%s-4294967296 = %d, wanted -9223372032559808513\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(-4294967296); got != -9223372032559808513 { + fmt.Printf("xor_int64 -4294967296%s9223372036854775807 = %d, wanted -9223372032559808513\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(-1); got != -9223372036854775808 { + fmt.Printf("xor_int64 9223372036854775807%s-1 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(-1); got != -9223372036854775808 { + fmt.Printf("xor_int64 -1%s9223372036854775807 = %d, wanted -9223372036854775808\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(0); got != 9223372036854775807 { + fmt.Printf("xor_int64 9223372036854775807%s0 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(0); got != 9223372036854775807 { + fmt.Printf("xor_int64 0%s9223372036854775807 = %d, wanted 9223372036854775807\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(1); got != 9223372036854775806 { + fmt.Printf("xor_int64 9223372036854775807%s1 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(1); got != 9223372036854775806 { + fmt.Printf("xor_int64 1%s9223372036854775807 = %d, wanted 9223372036854775806\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(4294967296); got != 9223372032559808511 { + fmt.Printf("xor_int64 9223372036854775807%s4294967296 = %d, wanted 9223372032559808511\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(4294967296); got != 9223372032559808511 { + fmt.Printf("xor_int64 4294967296%s9223372036854775807 = %d, wanted 9223372032559808511\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(9223372036854775806); got != 1 { + fmt.Printf("xor_int64 9223372036854775807%s9223372036854775806 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(9223372036854775806); got != 1 { + fmt.Printf("xor_int64 9223372036854775806%s9223372036854775807 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_9223372036854775807_int64_ssa(9223372036854775807); got != 0 { + fmt.Printf("xor_int64 9223372036854775807%s9223372036854775807 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int64_9223372036854775807_ssa(9223372036854775807); got != 0 { + fmt.Printf("xor_int64 9223372036854775807%s9223372036854775807 = %d, wanted 0\n", `^`, got) + failed = true + } + if got := add_0_uint32_ssa(0); got != 0 { fmt.Printf("add_uint32 0%s0 = %d, wanted 0\n", `+`, got) failed = true @@ -8449,6 +12919,276 @@ func main() { failed = true } + if got := and_0_uint32_ssa(0); got != 0 { + fmt.Printf("and_uint32 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint32_0_ssa(0); got != 0 { + fmt.Printf("and_uint32 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint32_ssa(1); got != 0 { + fmt.Printf("and_uint32 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint32_0_ssa(1); got != 0 { + fmt.Printf("and_uint32 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint32_ssa(4294967295); got != 0 { + fmt.Printf("and_uint32 0%s4294967295 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint32_0_ssa(4294967295); got != 0 { + fmt.Printf("and_uint32 4294967295%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint32_ssa(0); got != 0 { + fmt.Printf("and_uint32 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint32_1_ssa(0); got != 0 { + fmt.Printf("and_uint32 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint32_ssa(1); got != 1 { + fmt.Printf("and_uint32 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint32_1_ssa(1); got != 1 { + fmt.Printf("and_uint32 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_uint32_ssa(4294967295); got != 1 { + fmt.Printf("and_uint32 1%s4294967295 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint32_1_ssa(4294967295); got != 1 { + fmt.Printf("and_uint32 4294967295%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_4294967295_uint32_ssa(0); got != 0 { + fmt.Printf("and_uint32 4294967295%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint32_4294967295_ssa(0); got != 0 { + fmt.Printf("and_uint32 0%s4294967295 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_4294967295_uint32_ssa(1); got != 1 { + fmt.Printf("and_uint32 4294967295%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint32_4294967295_ssa(1); got != 1 { + fmt.Printf("and_uint32 1%s4294967295 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_4294967295_uint32_ssa(4294967295); got != 4294967295 { + fmt.Printf("and_uint32 4294967295%s4294967295 = %d, wanted 4294967295\n", `&`, got) + failed = true + } + + if got := and_uint32_4294967295_ssa(4294967295); got != 4294967295 { + fmt.Printf("and_uint32 4294967295%s4294967295 = %d, wanted 4294967295\n", `&`, got) + failed = true + } + + if got := or_0_uint32_ssa(0); got != 0 { + fmt.Printf("or_uint32 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_uint32_0_ssa(0); got != 0 { + fmt.Printf("or_uint32 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_0_uint32_ssa(1); got != 1 { + fmt.Printf("or_uint32 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint32_0_ssa(1); got != 1 { + fmt.Printf("or_uint32 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_0_uint32_ssa(4294967295); got != 4294967295 { + fmt.Printf("or_uint32 0%s4294967295 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_uint32_0_ssa(4294967295); got != 4294967295 { + fmt.Printf("or_uint32 4294967295%s0 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_1_uint32_ssa(0); got != 1 { + fmt.Printf("or_uint32 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint32_1_ssa(0); got != 1 { + fmt.Printf("or_uint32 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_uint32_ssa(1); got != 1 { + fmt.Printf("or_uint32 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint32_1_ssa(1); got != 1 { + fmt.Printf("or_uint32 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_uint32_ssa(4294967295); got != 4294967295 { + fmt.Printf("or_uint32 1%s4294967295 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_uint32_1_ssa(4294967295); got != 4294967295 { + fmt.Printf("or_uint32 4294967295%s1 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_4294967295_uint32_ssa(0); got != 4294967295 { + fmt.Printf("or_uint32 4294967295%s0 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_uint32_4294967295_ssa(0); got != 4294967295 { + fmt.Printf("or_uint32 0%s4294967295 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_4294967295_uint32_ssa(1); got != 4294967295 { + fmt.Printf("or_uint32 4294967295%s1 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_uint32_4294967295_ssa(1); got != 4294967295 { + fmt.Printf("or_uint32 1%s4294967295 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_4294967295_uint32_ssa(4294967295); got != 4294967295 { + fmt.Printf("or_uint32 4294967295%s4294967295 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := or_uint32_4294967295_ssa(4294967295); got != 4294967295 { + fmt.Printf("or_uint32 4294967295%s4294967295 = %d, wanted 4294967295\n", `|`, got) + failed = true + } + + if got := xor_0_uint32_ssa(0); got != 0 { + fmt.Printf("xor_uint32 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint32_0_ssa(0); got != 0 { + fmt.Printf("xor_uint32 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_0_uint32_ssa(1); got != 1 { + fmt.Printf("xor_uint32 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_uint32_0_ssa(1); got != 1 { + fmt.Printf("xor_uint32 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_0_uint32_ssa(4294967295); got != 4294967295 { + fmt.Printf("xor_uint32 0%s4294967295 = %d, wanted 4294967295\n", `^`, got) + failed = true + } + + if got := xor_uint32_0_ssa(4294967295); got != 4294967295 { + fmt.Printf("xor_uint32 4294967295%s0 = %d, wanted 4294967295\n", `^`, got) + failed = true + } + + if got := xor_1_uint32_ssa(0); got != 1 { + fmt.Printf("xor_uint32 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_uint32_1_ssa(0); got != 1 { + fmt.Printf("xor_uint32 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_1_uint32_ssa(1); got != 0 { + fmt.Printf("xor_uint32 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint32_1_ssa(1); got != 0 { + fmt.Printf("xor_uint32 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_1_uint32_ssa(4294967295); got != 4294967294 { + fmt.Printf("xor_uint32 1%s4294967295 = %d, wanted 4294967294\n", `^`, got) + failed = true + } + + if got := xor_uint32_1_ssa(4294967295); got != 4294967294 { + fmt.Printf("xor_uint32 4294967295%s1 = %d, wanted 4294967294\n", `^`, got) + failed = true + } + + if got := xor_4294967295_uint32_ssa(0); got != 4294967295 { + fmt.Printf("xor_uint32 4294967295%s0 = %d, wanted 4294967295\n", `^`, got) + failed = true + } + + if got := xor_uint32_4294967295_ssa(0); got != 4294967295 { + fmt.Printf("xor_uint32 0%s4294967295 = %d, wanted 4294967295\n", `^`, got) + failed = true + } + + if got := xor_4294967295_uint32_ssa(1); got != 4294967294 { + fmt.Printf("xor_uint32 4294967295%s1 = %d, wanted 4294967294\n", `^`, got) + failed = true + } + + if got := xor_uint32_4294967295_ssa(1); got != 4294967294 { + fmt.Printf("xor_uint32 1%s4294967295 = %d, wanted 4294967294\n", `^`, got) + failed = true + } + + if got := xor_4294967295_uint32_ssa(4294967295); got != 0 { + fmt.Printf("xor_uint32 4294967295%s4294967295 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint32_4294967295_ssa(4294967295); got != 0 { + fmt.Printf("xor_uint32 4294967295%s4294967295 = %d, wanted 0\n", `^`, got) + failed = true + } + if got := add_Neg2147483648_int32_ssa(-2147483648); got != 0 { fmt.Printf("add_int32 -2147483648%s-2147483648 = %d, wanted 0\n", `+`, got) failed = true @@ -10129,6 +14869,1086 @@ func main() { failed = true } + if got := and_Neg2147483648_int32_ssa(-2147483648); got != -2147483648 { + fmt.Printf("and_int32 -2147483648%s-2147483648 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483648_ssa(-2147483648); got != -2147483648 { + fmt.Printf("and_int32 -2147483648%s-2147483648 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_Neg2147483648_int32_ssa(-2147483647); got != -2147483648 { + fmt.Printf("and_int32 -2147483648%s-2147483647 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483648_ssa(-2147483647); got != -2147483648 { + fmt.Printf("and_int32 -2147483647%s-2147483648 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_Neg2147483648_int32_ssa(-1); got != -2147483648 { + fmt.Printf("and_int32 -2147483648%s-1 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483648_ssa(-1); got != -2147483648 { + fmt.Printf("and_int32 -1%s-2147483648 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_Neg2147483648_int32_ssa(0); got != 0 { + fmt.Printf("and_int32 -2147483648%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483648_ssa(0); got != 0 { + fmt.Printf("and_int32 0%s-2147483648 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg2147483648_int32_ssa(1); got != 0 { + fmt.Printf("and_int32 -2147483648%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483648_ssa(1); got != 0 { + fmt.Printf("and_int32 1%s-2147483648 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg2147483648_int32_ssa(2147483647); got != 0 { + fmt.Printf("and_int32 -2147483648%s2147483647 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483648_ssa(2147483647); got != 0 { + fmt.Printf("and_int32 2147483647%s-2147483648 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg2147483647_int32_ssa(-2147483648); got != -2147483648 { + fmt.Printf("and_int32 -2147483647%s-2147483648 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483647_ssa(-2147483648); got != -2147483648 { + fmt.Printf("and_int32 -2147483648%s-2147483647 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_Neg2147483647_int32_ssa(-2147483647); got != -2147483647 { + fmt.Printf("and_int32 -2147483647%s-2147483647 = %d, wanted -2147483647\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483647_ssa(-2147483647); got != -2147483647 { + fmt.Printf("and_int32 -2147483647%s-2147483647 = %d, wanted -2147483647\n", `&`, got) + failed = true + } + + if got := and_Neg2147483647_int32_ssa(-1); got != -2147483647 { + fmt.Printf("and_int32 -2147483647%s-1 = %d, wanted -2147483647\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483647_ssa(-1); got != -2147483647 { + fmt.Printf("and_int32 -1%s-2147483647 = %d, wanted -2147483647\n", `&`, got) + failed = true + } + + if got := and_Neg2147483647_int32_ssa(0); got != 0 { + fmt.Printf("and_int32 -2147483647%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483647_ssa(0); got != 0 { + fmt.Printf("and_int32 0%s-2147483647 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg2147483647_int32_ssa(1); got != 1 { + fmt.Printf("and_int32 -2147483647%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483647_ssa(1); got != 1 { + fmt.Printf("and_int32 1%s-2147483647 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg2147483647_int32_ssa(2147483647); got != 1 { + fmt.Printf("and_int32 -2147483647%s2147483647 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_Neg2147483647_ssa(2147483647); got != 1 { + fmt.Printf("and_int32 2147483647%s-2147483647 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int32_ssa(-2147483648); got != -2147483648 { + fmt.Printf("and_int32 -1%s-2147483648 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_int32_Neg1_ssa(-2147483648); got != -2147483648 { + fmt.Printf("and_int32 -2147483648%s-1 = %d, wanted -2147483648\n", `&`, got) + failed = true + } + + if got := and_Neg1_int32_ssa(-2147483647); got != -2147483647 { + fmt.Printf("and_int32 -1%s-2147483647 = %d, wanted -2147483647\n", `&`, got) + failed = true + } + + if got := and_int32_Neg1_ssa(-2147483647); got != -2147483647 { + fmt.Printf("and_int32 -2147483647%s-1 = %d, wanted -2147483647\n", `&`, got) + failed = true + } + + if got := and_Neg1_int32_ssa(-1); got != -1 { + fmt.Printf("and_int32 -1%s-1 = %d, wanted -1\n", `&`, got) + failed = true + } + + if got := and_int32_Neg1_ssa(-1); got != -1 { + fmt.Printf("and_int32 -1%s-1 = %d, wanted -1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int32_ssa(0); got != 0 { + fmt.Printf("and_int32 -1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_Neg1_ssa(0); got != 0 { + fmt.Printf("and_int32 0%s-1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg1_int32_ssa(1); got != 1 { + fmt.Printf("and_int32 -1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_Neg1_ssa(1); got != 1 { + fmt.Printf("and_int32 1%s-1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int32_ssa(2147483647); got != 2147483647 { + fmt.Printf("and_int32 -1%s2147483647 = %d, wanted 2147483647\n", `&`, got) + failed = true + } + + if got := and_int32_Neg1_ssa(2147483647); got != 2147483647 { + fmt.Printf("and_int32 2147483647%s-1 = %d, wanted 2147483647\n", `&`, got) + failed = true + } + + if got := and_0_int32_ssa(-2147483648); got != 0 { + fmt.Printf("and_int32 0%s-2147483648 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_0_ssa(-2147483648); got != 0 { + fmt.Printf("and_int32 -2147483648%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int32_ssa(-2147483647); got != 0 { + fmt.Printf("and_int32 0%s-2147483647 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_0_ssa(-2147483647); got != 0 { + fmt.Printf("and_int32 -2147483647%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int32_ssa(-1); got != 0 { + fmt.Printf("and_int32 0%s-1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_0_ssa(-1); got != 0 { + fmt.Printf("and_int32 -1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int32_ssa(0); got != 0 { + fmt.Printf("and_int32 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_0_ssa(0); got != 0 { + fmt.Printf("and_int32 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int32_ssa(1); got != 0 { + fmt.Printf("and_int32 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_0_ssa(1); got != 0 { + fmt.Printf("and_int32 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int32_ssa(2147483647); got != 0 { + fmt.Printf("and_int32 0%s2147483647 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_0_ssa(2147483647); got != 0 { + fmt.Printf("and_int32 2147483647%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int32_ssa(-2147483648); got != 0 { + fmt.Printf("and_int32 1%s-2147483648 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_1_ssa(-2147483648); got != 0 { + fmt.Printf("and_int32 -2147483648%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int32_ssa(-2147483647); got != 1 { + fmt.Printf("and_int32 1%s-2147483647 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_1_ssa(-2147483647); got != 1 { + fmt.Printf("and_int32 -2147483647%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int32_ssa(-1); got != 1 { + fmt.Printf("and_int32 1%s-1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_1_ssa(-1); got != 1 { + fmt.Printf("and_int32 -1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int32_ssa(0); got != 0 { + fmt.Printf("and_int32 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_1_ssa(0); got != 0 { + fmt.Printf("and_int32 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int32_ssa(1); got != 1 { + fmt.Printf("and_int32 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_1_ssa(1); got != 1 { + fmt.Printf("and_int32 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int32_ssa(2147483647); got != 1 { + fmt.Printf("and_int32 1%s2147483647 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_1_ssa(2147483647); got != 1 { + fmt.Printf("and_int32 2147483647%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_2147483647_int32_ssa(-2147483648); got != 0 { + fmt.Printf("and_int32 2147483647%s-2147483648 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_2147483647_ssa(-2147483648); got != 0 { + fmt.Printf("and_int32 -2147483648%s2147483647 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_2147483647_int32_ssa(-2147483647); got != 1 { + fmt.Printf("and_int32 2147483647%s-2147483647 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_2147483647_ssa(-2147483647); got != 1 { + fmt.Printf("and_int32 -2147483647%s2147483647 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_2147483647_int32_ssa(-1); got != 2147483647 { + fmt.Printf("and_int32 2147483647%s-1 = %d, wanted 2147483647\n", `&`, got) + failed = true + } + + if got := and_int32_2147483647_ssa(-1); got != 2147483647 { + fmt.Printf("and_int32 -1%s2147483647 = %d, wanted 2147483647\n", `&`, got) + failed = true + } + + if got := and_2147483647_int32_ssa(0); got != 0 { + fmt.Printf("and_int32 2147483647%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int32_2147483647_ssa(0); got != 0 { + fmt.Printf("and_int32 0%s2147483647 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_2147483647_int32_ssa(1); got != 1 { + fmt.Printf("and_int32 2147483647%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int32_2147483647_ssa(1); got != 1 { + fmt.Printf("and_int32 1%s2147483647 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_2147483647_int32_ssa(2147483647); got != 2147483647 { + fmt.Printf("and_int32 2147483647%s2147483647 = %d, wanted 2147483647\n", `&`, got) + failed = true + } + + if got := and_int32_2147483647_ssa(2147483647); got != 2147483647 { + fmt.Printf("and_int32 2147483647%s2147483647 = %d, wanted 2147483647\n", `&`, got) + failed = true + } + + if got := or_Neg2147483648_int32_ssa(-2147483648); got != -2147483648 { + fmt.Printf("or_int32 -2147483648%s-2147483648 = %d, wanted -2147483648\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483648_ssa(-2147483648); got != -2147483648 { + fmt.Printf("or_int32 -2147483648%s-2147483648 = %d, wanted -2147483648\n", `|`, got) + failed = true + } + + if got := or_Neg2147483648_int32_ssa(-2147483647); got != -2147483647 { + fmt.Printf("or_int32 -2147483648%s-2147483647 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483648_ssa(-2147483647); got != -2147483647 { + fmt.Printf("or_int32 -2147483647%s-2147483648 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_Neg2147483648_int32_ssa(-1); got != -1 { + fmt.Printf("or_int32 -2147483648%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483648_ssa(-1); got != -1 { + fmt.Printf("or_int32 -1%s-2147483648 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg2147483648_int32_ssa(0); got != -2147483648 { + fmt.Printf("or_int32 -2147483648%s0 = %d, wanted -2147483648\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483648_ssa(0); got != -2147483648 { + fmt.Printf("or_int32 0%s-2147483648 = %d, wanted -2147483648\n", `|`, got) + failed = true + } + + if got := or_Neg2147483648_int32_ssa(1); got != -2147483647 { + fmt.Printf("or_int32 -2147483648%s1 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483648_ssa(1); got != -2147483647 { + fmt.Printf("or_int32 1%s-2147483648 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_Neg2147483648_int32_ssa(2147483647); got != -1 { + fmt.Printf("or_int32 -2147483648%s2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483648_ssa(2147483647); got != -1 { + fmt.Printf("or_int32 2147483647%s-2147483648 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg2147483647_int32_ssa(-2147483648); got != -2147483647 { + fmt.Printf("or_int32 -2147483647%s-2147483648 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483647_ssa(-2147483648); got != -2147483647 { + fmt.Printf("or_int32 -2147483648%s-2147483647 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_Neg2147483647_int32_ssa(-2147483647); got != -2147483647 { + fmt.Printf("or_int32 -2147483647%s-2147483647 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483647_ssa(-2147483647); got != -2147483647 { + fmt.Printf("or_int32 -2147483647%s-2147483647 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_Neg2147483647_int32_ssa(-1); got != -1 { + fmt.Printf("or_int32 -2147483647%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483647_ssa(-1); got != -1 { + fmt.Printf("or_int32 -1%s-2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg2147483647_int32_ssa(0); got != -2147483647 { + fmt.Printf("or_int32 -2147483647%s0 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483647_ssa(0); got != -2147483647 { + fmt.Printf("or_int32 0%s-2147483647 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_Neg2147483647_int32_ssa(1); got != -2147483647 { + fmt.Printf("or_int32 -2147483647%s1 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483647_ssa(1); got != -2147483647 { + fmt.Printf("or_int32 1%s-2147483647 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_Neg2147483647_int32_ssa(2147483647); got != -1 { + fmt.Printf("or_int32 -2147483647%s2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg2147483647_ssa(2147483647); got != -1 { + fmt.Printf("or_int32 2147483647%s-2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int32_ssa(-2147483648); got != -1 { + fmt.Printf("or_int32 -1%s-2147483648 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg1_ssa(-2147483648); got != -1 { + fmt.Printf("or_int32 -2147483648%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int32_ssa(-2147483647); got != -1 { + fmt.Printf("or_int32 -1%s-2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg1_ssa(-2147483647); got != -1 { + fmt.Printf("or_int32 -2147483647%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int32_ssa(-1); got != -1 { + fmt.Printf("or_int32 -1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg1_ssa(-1); got != -1 { + fmt.Printf("or_int32 -1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int32_ssa(0); got != -1 { + fmt.Printf("or_int32 -1%s0 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg1_ssa(0); got != -1 { + fmt.Printf("or_int32 0%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int32_ssa(1); got != -1 { + fmt.Printf("or_int32 -1%s1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg1_ssa(1); got != -1 { + fmt.Printf("or_int32 1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int32_ssa(2147483647); got != -1 { + fmt.Printf("or_int32 -1%s2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_Neg1_ssa(2147483647); got != -1 { + fmt.Printf("or_int32 2147483647%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_0_int32_ssa(-2147483648); got != -2147483648 { + fmt.Printf("or_int32 0%s-2147483648 = %d, wanted -2147483648\n", `|`, got) + failed = true + } + + if got := or_int32_0_ssa(-2147483648); got != -2147483648 { + fmt.Printf("or_int32 -2147483648%s0 = %d, wanted -2147483648\n", `|`, got) + failed = true + } + + if got := or_0_int32_ssa(-2147483647); got != -2147483647 { + fmt.Printf("or_int32 0%s-2147483647 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_0_ssa(-2147483647); got != -2147483647 { + fmt.Printf("or_int32 -2147483647%s0 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_0_int32_ssa(-1); got != -1 { + fmt.Printf("or_int32 0%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_0_ssa(-1); got != -1 { + fmt.Printf("or_int32 -1%s0 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_0_int32_ssa(0); got != 0 { + fmt.Printf("or_int32 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_int32_0_ssa(0); got != 0 { + fmt.Printf("or_int32 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_0_int32_ssa(1); got != 1 { + fmt.Printf("or_int32 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int32_0_ssa(1); got != 1 { + fmt.Printf("or_int32 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_0_int32_ssa(2147483647); got != 2147483647 { + fmt.Printf("or_int32 0%s2147483647 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_0_ssa(2147483647); got != 2147483647 { + fmt.Printf("or_int32 2147483647%s0 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_1_int32_ssa(-2147483648); got != -2147483647 { + fmt.Printf("or_int32 1%s-2147483648 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_1_ssa(-2147483648); got != -2147483647 { + fmt.Printf("or_int32 -2147483648%s1 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_1_int32_ssa(-2147483647); got != -2147483647 { + fmt.Printf("or_int32 1%s-2147483647 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_1_ssa(-2147483647); got != -2147483647 { + fmt.Printf("or_int32 -2147483647%s1 = %d, wanted -2147483647\n", `|`, got) + failed = true + } + + if got := or_1_int32_ssa(-1); got != -1 { + fmt.Printf("or_int32 1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_1_ssa(-1); got != -1 { + fmt.Printf("or_int32 -1%s1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_1_int32_ssa(0); got != 1 { + fmt.Printf("or_int32 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int32_1_ssa(0); got != 1 { + fmt.Printf("or_int32 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_int32_ssa(1); got != 1 { + fmt.Printf("or_int32 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int32_1_ssa(1); got != 1 { + fmt.Printf("or_int32 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_int32_ssa(2147483647); got != 2147483647 { + fmt.Printf("or_int32 1%s2147483647 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_1_ssa(2147483647); got != 2147483647 { + fmt.Printf("or_int32 2147483647%s1 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_2147483647_int32_ssa(-2147483648); got != -1 { + fmt.Printf("or_int32 2147483647%s-2147483648 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_2147483647_ssa(-2147483648); got != -1 { + fmt.Printf("or_int32 -2147483648%s2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_2147483647_int32_ssa(-2147483647); got != -1 { + fmt.Printf("or_int32 2147483647%s-2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_2147483647_ssa(-2147483647); got != -1 { + fmt.Printf("or_int32 -2147483647%s2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_2147483647_int32_ssa(-1); got != -1 { + fmt.Printf("or_int32 2147483647%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int32_2147483647_ssa(-1); got != -1 { + fmt.Printf("or_int32 -1%s2147483647 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_2147483647_int32_ssa(0); got != 2147483647 { + fmt.Printf("or_int32 2147483647%s0 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_2147483647_ssa(0); got != 2147483647 { + fmt.Printf("or_int32 0%s2147483647 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_2147483647_int32_ssa(1); got != 2147483647 { + fmt.Printf("or_int32 2147483647%s1 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_2147483647_ssa(1); got != 2147483647 { + fmt.Printf("or_int32 1%s2147483647 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_2147483647_int32_ssa(2147483647); got != 2147483647 { + fmt.Printf("or_int32 2147483647%s2147483647 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := or_int32_2147483647_ssa(2147483647); got != 2147483647 { + fmt.Printf("or_int32 2147483647%s2147483647 = %d, wanted 2147483647\n", `|`, got) + failed = true + } + + if got := xor_Neg2147483648_int32_ssa(-2147483648); got != 0 { + fmt.Printf("xor_int32 -2147483648%s-2147483648 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483648_ssa(-2147483648); got != 0 { + fmt.Printf("xor_int32 -2147483648%s-2147483648 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483648_int32_ssa(-2147483647); got != 1 { + fmt.Printf("xor_int32 -2147483648%s-2147483647 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483648_ssa(-2147483647); got != 1 { + fmt.Printf("xor_int32 -2147483647%s-2147483648 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483648_int32_ssa(-1); got != 2147483647 { + fmt.Printf("xor_int32 -2147483648%s-1 = %d, wanted 2147483647\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483648_ssa(-1); got != 2147483647 { + fmt.Printf("xor_int32 -1%s-2147483648 = %d, wanted 2147483647\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483648_int32_ssa(0); got != -2147483648 { + fmt.Printf("xor_int32 -2147483648%s0 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483648_ssa(0); got != -2147483648 { + fmt.Printf("xor_int32 0%s-2147483648 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483648_int32_ssa(1); got != -2147483647 { + fmt.Printf("xor_int32 -2147483648%s1 = %d, wanted -2147483647\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483648_ssa(1); got != -2147483647 { + fmt.Printf("xor_int32 1%s-2147483648 = %d, wanted -2147483647\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483648_int32_ssa(2147483647); got != -1 { + fmt.Printf("xor_int32 -2147483648%s2147483647 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483648_ssa(2147483647); got != -1 { + fmt.Printf("xor_int32 2147483647%s-2147483648 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483647_int32_ssa(-2147483648); got != 1 { + fmt.Printf("xor_int32 -2147483647%s-2147483648 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483647_ssa(-2147483648); got != 1 { + fmt.Printf("xor_int32 -2147483648%s-2147483647 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483647_int32_ssa(-2147483647); got != 0 { + fmt.Printf("xor_int32 -2147483647%s-2147483647 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483647_ssa(-2147483647); got != 0 { + fmt.Printf("xor_int32 -2147483647%s-2147483647 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483647_int32_ssa(-1); got != 2147483646 { + fmt.Printf("xor_int32 -2147483647%s-1 = %d, wanted 2147483646\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483647_ssa(-1); got != 2147483646 { + fmt.Printf("xor_int32 -1%s-2147483647 = %d, wanted 2147483646\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483647_int32_ssa(0); got != -2147483647 { + fmt.Printf("xor_int32 -2147483647%s0 = %d, wanted -2147483647\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483647_ssa(0); got != -2147483647 { + fmt.Printf("xor_int32 0%s-2147483647 = %d, wanted -2147483647\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483647_int32_ssa(1); got != -2147483648 { + fmt.Printf("xor_int32 -2147483647%s1 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483647_ssa(1); got != -2147483648 { + fmt.Printf("xor_int32 1%s-2147483647 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_Neg2147483647_int32_ssa(2147483647); got != -2 { + fmt.Printf("xor_int32 -2147483647%s2147483647 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg2147483647_ssa(2147483647); got != -2 { + fmt.Printf("xor_int32 2147483647%s-2147483647 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int32_ssa(-2147483648); got != 2147483647 { + fmt.Printf("xor_int32 -1%s-2147483648 = %d, wanted 2147483647\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg1_ssa(-2147483648); got != 2147483647 { + fmt.Printf("xor_int32 -2147483648%s-1 = %d, wanted 2147483647\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int32_ssa(-2147483647); got != 2147483646 { + fmt.Printf("xor_int32 -1%s-2147483647 = %d, wanted 2147483646\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg1_ssa(-2147483647); got != 2147483646 { + fmt.Printf("xor_int32 -2147483647%s-1 = %d, wanted 2147483646\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int32_ssa(-1); got != 0 { + fmt.Printf("xor_int32 -1%s-1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg1_ssa(-1); got != 0 { + fmt.Printf("xor_int32 -1%s-1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int32_ssa(0); got != -1 { + fmt.Printf("xor_int32 -1%s0 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg1_ssa(0); got != -1 { + fmt.Printf("xor_int32 0%s-1 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int32_ssa(1); got != -2 { + fmt.Printf("xor_int32 -1%s1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg1_ssa(1); got != -2 { + fmt.Printf("xor_int32 1%s-1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int32_ssa(2147483647); got != -2147483648 { + fmt.Printf("xor_int32 -1%s2147483647 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_int32_Neg1_ssa(2147483647); got != -2147483648 { + fmt.Printf("xor_int32 2147483647%s-1 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_0_int32_ssa(-2147483648); got != -2147483648 { + fmt.Printf("xor_int32 0%s-2147483648 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_int32_0_ssa(-2147483648); got != -2147483648 { + fmt.Printf("xor_int32 -2147483648%s0 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_0_int32_ssa(-2147483647); got != -2147483647 { + fmt.Printf("xor_int32 0%s-2147483647 = %d, wanted -2147483647\n", `^`, got) + failed = true + } + + if got := xor_int32_0_ssa(-2147483647); got != -2147483647 { + fmt.Printf("xor_int32 -2147483647%s0 = %d, wanted -2147483647\n", `^`, got) + failed = true + } + + if got := xor_0_int32_ssa(-1); got != -1 { + fmt.Printf("xor_int32 0%s-1 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int32_0_ssa(-1); got != -1 { + fmt.Printf("xor_int32 -1%s0 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_0_int32_ssa(0); got != 0 { + fmt.Printf("xor_int32 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int32_0_ssa(0); got != 0 { + fmt.Printf("xor_int32 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_0_int32_ssa(1); got != 1 { + fmt.Printf("xor_int32 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int32_0_ssa(1); got != 1 { + fmt.Printf("xor_int32 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_0_int32_ssa(2147483647); got != 2147483647 { + fmt.Printf("xor_int32 0%s2147483647 = %d, wanted 2147483647\n", `^`, got) + failed = true + } + + if got := xor_int32_0_ssa(2147483647); got != 2147483647 { + fmt.Printf("xor_int32 2147483647%s0 = %d, wanted 2147483647\n", `^`, got) + failed = true + } + + if got := xor_1_int32_ssa(-2147483648); got != -2147483647 { + fmt.Printf("xor_int32 1%s-2147483648 = %d, wanted -2147483647\n", `^`, got) + failed = true + } + + if got := xor_int32_1_ssa(-2147483648); got != -2147483647 { + fmt.Printf("xor_int32 -2147483648%s1 = %d, wanted -2147483647\n", `^`, got) + failed = true + } + + if got := xor_1_int32_ssa(-2147483647); got != -2147483648 { + fmt.Printf("xor_int32 1%s-2147483647 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_int32_1_ssa(-2147483647); got != -2147483648 { + fmt.Printf("xor_int32 -2147483647%s1 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_1_int32_ssa(-1); got != -2 { + fmt.Printf("xor_int32 1%s-1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int32_1_ssa(-1); got != -2 { + fmt.Printf("xor_int32 -1%s1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_1_int32_ssa(0); got != 1 { + fmt.Printf("xor_int32 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int32_1_ssa(0); got != 1 { + fmt.Printf("xor_int32 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_1_int32_ssa(1); got != 0 { + fmt.Printf("xor_int32 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int32_1_ssa(1); got != 0 { + fmt.Printf("xor_int32 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_1_int32_ssa(2147483647); got != 2147483646 { + fmt.Printf("xor_int32 1%s2147483647 = %d, wanted 2147483646\n", `^`, got) + failed = true + } + + if got := xor_int32_1_ssa(2147483647); got != 2147483646 { + fmt.Printf("xor_int32 2147483647%s1 = %d, wanted 2147483646\n", `^`, got) + failed = true + } + + if got := xor_2147483647_int32_ssa(-2147483648); got != -1 { + fmt.Printf("xor_int32 2147483647%s-2147483648 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int32_2147483647_ssa(-2147483648); got != -1 { + fmt.Printf("xor_int32 -2147483648%s2147483647 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_2147483647_int32_ssa(-2147483647); got != -2 { + fmt.Printf("xor_int32 2147483647%s-2147483647 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int32_2147483647_ssa(-2147483647); got != -2 { + fmt.Printf("xor_int32 -2147483647%s2147483647 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_2147483647_int32_ssa(-1); got != -2147483648 { + fmt.Printf("xor_int32 2147483647%s-1 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_int32_2147483647_ssa(-1); got != -2147483648 { + fmt.Printf("xor_int32 -1%s2147483647 = %d, wanted -2147483648\n", `^`, got) + failed = true + } + + if got := xor_2147483647_int32_ssa(0); got != 2147483647 { + fmt.Printf("xor_int32 2147483647%s0 = %d, wanted 2147483647\n", `^`, got) + failed = true + } + + if got := xor_int32_2147483647_ssa(0); got != 2147483647 { + fmt.Printf("xor_int32 0%s2147483647 = %d, wanted 2147483647\n", `^`, got) + failed = true + } + + if got := xor_2147483647_int32_ssa(1); got != 2147483646 { + fmt.Printf("xor_int32 2147483647%s1 = %d, wanted 2147483646\n", `^`, got) + failed = true + } + + if got := xor_int32_2147483647_ssa(1); got != 2147483646 { + fmt.Printf("xor_int32 1%s2147483647 = %d, wanted 2147483646\n", `^`, got) + failed = true + } + + if got := xor_2147483647_int32_ssa(2147483647); got != 0 { + fmt.Printf("xor_int32 2147483647%s2147483647 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int32_2147483647_ssa(2147483647); got != 0 { + fmt.Printf("xor_int32 2147483647%s2147483647 = %d, wanted 0\n", `^`, got) + failed = true + } + if got := add_0_uint16_ssa(0); got != 0 { fmt.Printf("add_uint16 0%s0 = %d, wanted 0\n", `+`, got) failed = true @@ -10699,6 +16519,276 @@ func main() { failed = true } + if got := and_0_uint16_ssa(0); got != 0 { + fmt.Printf("and_uint16 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint16_0_ssa(0); got != 0 { + fmt.Printf("and_uint16 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint16_ssa(1); got != 0 { + fmt.Printf("and_uint16 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint16_0_ssa(1); got != 0 { + fmt.Printf("and_uint16 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint16_ssa(65535); got != 0 { + fmt.Printf("and_uint16 0%s65535 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint16_0_ssa(65535); got != 0 { + fmt.Printf("and_uint16 65535%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint16_ssa(0); got != 0 { + fmt.Printf("and_uint16 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint16_1_ssa(0); got != 0 { + fmt.Printf("and_uint16 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint16_ssa(1); got != 1 { + fmt.Printf("and_uint16 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint16_1_ssa(1); got != 1 { + fmt.Printf("and_uint16 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_uint16_ssa(65535); got != 1 { + fmt.Printf("and_uint16 1%s65535 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint16_1_ssa(65535); got != 1 { + fmt.Printf("and_uint16 65535%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_65535_uint16_ssa(0); got != 0 { + fmt.Printf("and_uint16 65535%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint16_65535_ssa(0); got != 0 { + fmt.Printf("and_uint16 0%s65535 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_65535_uint16_ssa(1); got != 1 { + fmt.Printf("and_uint16 65535%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint16_65535_ssa(1); got != 1 { + fmt.Printf("and_uint16 1%s65535 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_65535_uint16_ssa(65535); got != 65535 { + fmt.Printf("and_uint16 65535%s65535 = %d, wanted 65535\n", `&`, got) + failed = true + } + + if got := and_uint16_65535_ssa(65535); got != 65535 { + fmt.Printf("and_uint16 65535%s65535 = %d, wanted 65535\n", `&`, got) + failed = true + } + + if got := or_0_uint16_ssa(0); got != 0 { + fmt.Printf("or_uint16 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_uint16_0_ssa(0); got != 0 { + fmt.Printf("or_uint16 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_0_uint16_ssa(1); got != 1 { + fmt.Printf("or_uint16 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint16_0_ssa(1); got != 1 { + fmt.Printf("or_uint16 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_0_uint16_ssa(65535); got != 65535 { + fmt.Printf("or_uint16 0%s65535 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_uint16_0_ssa(65535); got != 65535 { + fmt.Printf("or_uint16 65535%s0 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_1_uint16_ssa(0); got != 1 { + fmt.Printf("or_uint16 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint16_1_ssa(0); got != 1 { + fmt.Printf("or_uint16 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_uint16_ssa(1); got != 1 { + fmt.Printf("or_uint16 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint16_1_ssa(1); got != 1 { + fmt.Printf("or_uint16 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_uint16_ssa(65535); got != 65535 { + fmt.Printf("or_uint16 1%s65535 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_uint16_1_ssa(65535); got != 65535 { + fmt.Printf("or_uint16 65535%s1 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_65535_uint16_ssa(0); got != 65535 { + fmt.Printf("or_uint16 65535%s0 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_uint16_65535_ssa(0); got != 65535 { + fmt.Printf("or_uint16 0%s65535 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_65535_uint16_ssa(1); got != 65535 { + fmt.Printf("or_uint16 65535%s1 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_uint16_65535_ssa(1); got != 65535 { + fmt.Printf("or_uint16 1%s65535 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_65535_uint16_ssa(65535); got != 65535 { + fmt.Printf("or_uint16 65535%s65535 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := or_uint16_65535_ssa(65535); got != 65535 { + fmt.Printf("or_uint16 65535%s65535 = %d, wanted 65535\n", `|`, got) + failed = true + } + + if got := xor_0_uint16_ssa(0); got != 0 { + fmt.Printf("xor_uint16 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint16_0_ssa(0); got != 0 { + fmt.Printf("xor_uint16 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_0_uint16_ssa(1); got != 1 { + fmt.Printf("xor_uint16 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_uint16_0_ssa(1); got != 1 { + fmt.Printf("xor_uint16 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_0_uint16_ssa(65535); got != 65535 { + fmt.Printf("xor_uint16 0%s65535 = %d, wanted 65535\n", `^`, got) + failed = true + } + + if got := xor_uint16_0_ssa(65535); got != 65535 { + fmt.Printf("xor_uint16 65535%s0 = %d, wanted 65535\n", `^`, got) + failed = true + } + + if got := xor_1_uint16_ssa(0); got != 1 { + fmt.Printf("xor_uint16 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_uint16_1_ssa(0); got != 1 { + fmt.Printf("xor_uint16 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_1_uint16_ssa(1); got != 0 { + fmt.Printf("xor_uint16 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint16_1_ssa(1); got != 0 { + fmt.Printf("xor_uint16 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_1_uint16_ssa(65535); got != 65534 { + fmt.Printf("xor_uint16 1%s65535 = %d, wanted 65534\n", `^`, got) + failed = true + } + + if got := xor_uint16_1_ssa(65535); got != 65534 { + fmt.Printf("xor_uint16 65535%s1 = %d, wanted 65534\n", `^`, got) + failed = true + } + + if got := xor_65535_uint16_ssa(0); got != 65535 { + fmt.Printf("xor_uint16 65535%s0 = %d, wanted 65535\n", `^`, got) + failed = true + } + + if got := xor_uint16_65535_ssa(0); got != 65535 { + fmt.Printf("xor_uint16 0%s65535 = %d, wanted 65535\n", `^`, got) + failed = true + } + + if got := xor_65535_uint16_ssa(1); got != 65534 { + fmt.Printf("xor_uint16 65535%s1 = %d, wanted 65534\n", `^`, got) + failed = true + } + + if got := xor_uint16_65535_ssa(1); got != 65534 { + fmt.Printf("xor_uint16 1%s65535 = %d, wanted 65534\n", `^`, got) + failed = true + } + + if got := xor_65535_uint16_ssa(65535); got != 0 { + fmt.Printf("xor_uint16 65535%s65535 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint16_65535_ssa(65535); got != 0 { + fmt.Printf("xor_uint16 65535%s65535 = %d, wanted 0\n", `^`, got) + failed = true + } + if got := add_Neg32768_int16_ssa(-32768); got != 0 { fmt.Printf("add_int16 -32768%s-32768 = %d, wanted 0\n", `+`, got) failed = true @@ -13009,6 +19099,1476 @@ func main() { failed = true } + if got := and_Neg32768_int16_ssa(-32768); got != -32768 { + fmt.Printf("and_int16 -32768%s-32768 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32768_ssa(-32768); got != -32768 { + fmt.Printf("and_int16 -32768%s-32768 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_Neg32768_int16_ssa(-32767); got != -32768 { + fmt.Printf("and_int16 -32768%s-32767 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32768_ssa(-32767); got != -32768 { + fmt.Printf("and_int16 -32767%s-32768 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_Neg32768_int16_ssa(-1); got != -32768 { + fmt.Printf("and_int16 -32768%s-1 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32768_ssa(-1); got != -32768 { + fmt.Printf("and_int16 -1%s-32768 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_Neg32768_int16_ssa(0); got != 0 { + fmt.Printf("and_int16 -32768%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32768_ssa(0); got != 0 { + fmt.Printf("and_int16 0%s-32768 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg32768_int16_ssa(1); got != 0 { + fmt.Printf("and_int16 -32768%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32768_ssa(1); got != 0 { + fmt.Printf("and_int16 1%s-32768 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg32768_int16_ssa(32766); got != 0 { + fmt.Printf("and_int16 -32768%s32766 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32768_ssa(32766); got != 0 { + fmt.Printf("and_int16 32766%s-32768 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg32768_int16_ssa(32767); got != 0 { + fmt.Printf("and_int16 -32768%s32767 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32768_ssa(32767); got != 0 { + fmt.Printf("and_int16 32767%s-32768 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg32767_int16_ssa(-32768); got != -32768 { + fmt.Printf("and_int16 -32767%s-32768 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32767_ssa(-32768); got != -32768 { + fmt.Printf("and_int16 -32768%s-32767 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_Neg32767_int16_ssa(-32767); got != -32767 { + fmt.Printf("and_int16 -32767%s-32767 = %d, wanted -32767\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32767_ssa(-32767); got != -32767 { + fmt.Printf("and_int16 -32767%s-32767 = %d, wanted -32767\n", `&`, got) + failed = true + } + + if got := and_Neg32767_int16_ssa(-1); got != -32767 { + fmt.Printf("and_int16 -32767%s-1 = %d, wanted -32767\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32767_ssa(-1); got != -32767 { + fmt.Printf("and_int16 -1%s-32767 = %d, wanted -32767\n", `&`, got) + failed = true + } + + if got := and_Neg32767_int16_ssa(0); got != 0 { + fmt.Printf("and_int16 -32767%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32767_ssa(0); got != 0 { + fmt.Printf("and_int16 0%s-32767 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg32767_int16_ssa(1); got != 1 { + fmt.Printf("and_int16 -32767%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32767_ssa(1); got != 1 { + fmt.Printf("and_int16 1%s-32767 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg32767_int16_ssa(32766); got != 0 { + fmt.Printf("and_int16 -32767%s32766 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32767_ssa(32766); got != 0 { + fmt.Printf("and_int16 32766%s-32767 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg32767_int16_ssa(32767); got != 1 { + fmt.Printf("and_int16 -32767%s32767 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_Neg32767_ssa(32767); got != 1 { + fmt.Printf("and_int16 32767%s-32767 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int16_ssa(-32768); got != -32768 { + fmt.Printf("and_int16 -1%s-32768 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_int16_Neg1_ssa(-32768); got != -32768 { + fmt.Printf("and_int16 -32768%s-1 = %d, wanted -32768\n", `&`, got) + failed = true + } + + if got := and_Neg1_int16_ssa(-32767); got != -32767 { + fmt.Printf("and_int16 -1%s-32767 = %d, wanted -32767\n", `&`, got) + failed = true + } + + if got := and_int16_Neg1_ssa(-32767); got != -32767 { + fmt.Printf("and_int16 -32767%s-1 = %d, wanted -32767\n", `&`, got) + failed = true + } + + if got := and_Neg1_int16_ssa(-1); got != -1 { + fmt.Printf("and_int16 -1%s-1 = %d, wanted -1\n", `&`, got) + failed = true + } + + if got := and_int16_Neg1_ssa(-1); got != -1 { + fmt.Printf("and_int16 -1%s-1 = %d, wanted -1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int16_ssa(0); got != 0 { + fmt.Printf("and_int16 -1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_Neg1_ssa(0); got != 0 { + fmt.Printf("and_int16 0%s-1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg1_int16_ssa(1); got != 1 { + fmt.Printf("and_int16 -1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_Neg1_ssa(1); got != 1 { + fmt.Printf("and_int16 1%s-1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int16_ssa(32766); got != 32766 { + fmt.Printf("and_int16 -1%s32766 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_int16_Neg1_ssa(32766); got != 32766 { + fmt.Printf("and_int16 32766%s-1 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_Neg1_int16_ssa(32767); got != 32767 { + fmt.Printf("and_int16 -1%s32767 = %d, wanted 32767\n", `&`, got) + failed = true + } + + if got := and_int16_Neg1_ssa(32767); got != 32767 { + fmt.Printf("and_int16 32767%s-1 = %d, wanted 32767\n", `&`, got) + failed = true + } + + if got := and_0_int16_ssa(-32768); got != 0 { + fmt.Printf("and_int16 0%s-32768 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_0_ssa(-32768); got != 0 { + fmt.Printf("and_int16 -32768%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int16_ssa(-32767); got != 0 { + fmt.Printf("and_int16 0%s-32767 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_0_ssa(-32767); got != 0 { + fmt.Printf("and_int16 -32767%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int16_ssa(-1); got != 0 { + fmt.Printf("and_int16 0%s-1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_0_ssa(-1); got != 0 { + fmt.Printf("and_int16 -1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int16_ssa(0); got != 0 { + fmt.Printf("and_int16 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_0_ssa(0); got != 0 { + fmt.Printf("and_int16 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int16_ssa(1); got != 0 { + fmt.Printf("and_int16 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_0_ssa(1); got != 0 { + fmt.Printf("and_int16 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int16_ssa(32766); got != 0 { + fmt.Printf("and_int16 0%s32766 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_0_ssa(32766); got != 0 { + fmt.Printf("and_int16 32766%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int16_ssa(32767); got != 0 { + fmt.Printf("and_int16 0%s32767 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_0_ssa(32767); got != 0 { + fmt.Printf("and_int16 32767%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int16_ssa(-32768); got != 0 { + fmt.Printf("and_int16 1%s-32768 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_1_ssa(-32768); got != 0 { + fmt.Printf("and_int16 -32768%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int16_ssa(-32767); got != 1 { + fmt.Printf("and_int16 1%s-32767 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_1_ssa(-32767); got != 1 { + fmt.Printf("and_int16 -32767%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int16_ssa(-1); got != 1 { + fmt.Printf("and_int16 1%s-1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_1_ssa(-1); got != 1 { + fmt.Printf("and_int16 -1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int16_ssa(0); got != 0 { + fmt.Printf("and_int16 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_1_ssa(0); got != 0 { + fmt.Printf("and_int16 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int16_ssa(1); got != 1 { + fmt.Printf("and_int16 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_1_ssa(1); got != 1 { + fmt.Printf("and_int16 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int16_ssa(32766); got != 0 { + fmt.Printf("and_int16 1%s32766 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_1_ssa(32766); got != 0 { + fmt.Printf("and_int16 32766%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int16_ssa(32767); got != 1 { + fmt.Printf("and_int16 1%s32767 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_1_ssa(32767); got != 1 { + fmt.Printf("and_int16 32767%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_32766_int16_ssa(-32768); got != 0 { + fmt.Printf("and_int16 32766%s-32768 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_32766_ssa(-32768); got != 0 { + fmt.Printf("and_int16 -32768%s32766 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_32766_int16_ssa(-32767); got != 0 { + fmt.Printf("and_int16 32766%s-32767 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_32766_ssa(-32767); got != 0 { + fmt.Printf("and_int16 -32767%s32766 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_32766_int16_ssa(-1); got != 32766 { + fmt.Printf("and_int16 32766%s-1 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_int16_32766_ssa(-1); got != 32766 { + fmt.Printf("and_int16 -1%s32766 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_32766_int16_ssa(0); got != 0 { + fmt.Printf("and_int16 32766%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_32766_ssa(0); got != 0 { + fmt.Printf("and_int16 0%s32766 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_32766_int16_ssa(1); got != 0 { + fmt.Printf("and_int16 32766%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_32766_ssa(1); got != 0 { + fmt.Printf("and_int16 1%s32766 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_32766_int16_ssa(32766); got != 32766 { + fmt.Printf("and_int16 32766%s32766 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_int16_32766_ssa(32766); got != 32766 { + fmt.Printf("and_int16 32766%s32766 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_32766_int16_ssa(32767); got != 32766 { + fmt.Printf("and_int16 32766%s32767 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_int16_32766_ssa(32767); got != 32766 { + fmt.Printf("and_int16 32767%s32766 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_32767_int16_ssa(-32768); got != 0 { + fmt.Printf("and_int16 32767%s-32768 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_32767_ssa(-32768); got != 0 { + fmt.Printf("and_int16 -32768%s32767 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_32767_int16_ssa(-32767); got != 1 { + fmt.Printf("and_int16 32767%s-32767 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_32767_ssa(-32767); got != 1 { + fmt.Printf("and_int16 -32767%s32767 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_32767_int16_ssa(-1); got != 32767 { + fmt.Printf("and_int16 32767%s-1 = %d, wanted 32767\n", `&`, got) + failed = true + } + + if got := and_int16_32767_ssa(-1); got != 32767 { + fmt.Printf("and_int16 -1%s32767 = %d, wanted 32767\n", `&`, got) + failed = true + } + + if got := and_32767_int16_ssa(0); got != 0 { + fmt.Printf("and_int16 32767%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int16_32767_ssa(0); got != 0 { + fmt.Printf("and_int16 0%s32767 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_32767_int16_ssa(1); got != 1 { + fmt.Printf("and_int16 32767%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int16_32767_ssa(1); got != 1 { + fmt.Printf("and_int16 1%s32767 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_32767_int16_ssa(32766); got != 32766 { + fmt.Printf("and_int16 32767%s32766 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_int16_32767_ssa(32766); got != 32766 { + fmt.Printf("and_int16 32766%s32767 = %d, wanted 32766\n", `&`, got) + failed = true + } + + if got := and_32767_int16_ssa(32767); got != 32767 { + fmt.Printf("and_int16 32767%s32767 = %d, wanted 32767\n", `&`, got) + failed = true + } + + if got := and_int16_32767_ssa(32767); got != 32767 { + fmt.Printf("and_int16 32767%s32767 = %d, wanted 32767\n", `&`, got) + failed = true + } + + if got := or_Neg32768_int16_ssa(-32768); got != -32768 { + fmt.Printf("or_int16 -32768%s-32768 = %d, wanted -32768\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32768_ssa(-32768); got != -32768 { + fmt.Printf("or_int16 -32768%s-32768 = %d, wanted -32768\n", `|`, got) + failed = true + } + + if got := or_Neg32768_int16_ssa(-32767); got != -32767 { + fmt.Printf("or_int16 -32768%s-32767 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32768_ssa(-32767); got != -32767 { + fmt.Printf("or_int16 -32767%s-32768 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_Neg32768_int16_ssa(-1); got != -1 { + fmt.Printf("or_int16 -32768%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32768_ssa(-1); got != -1 { + fmt.Printf("or_int16 -1%s-32768 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg32768_int16_ssa(0); got != -32768 { + fmt.Printf("or_int16 -32768%s0 = %d, wanted -32768\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32768_ssa(0); got != -32768 { + fmt.Printf("or_int16 0%s-32768 = %d, wanted -32768\n", `|`, got) + failed = true + } + + if got := or_Neg32768_int16_ssa(1); got != -32767 { + fmt.Printf("or_int16 -32768%s1 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32768_ssa(1); got != -32767 { + fmt.Printf("or_int16 1%s-32768 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_Neg32768_int16_ssa(32766); got != -2 { + fmt.Printf("or_int16 -32768%s32766 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32768_ssa(32766); got != -2 { + fmt.Printf("or_int16 32766%s-32768 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_Neg32768_int16_ssa(32767); got != -1 { + fmt.Printf("or_int16 -32768%s32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32768_ssa(32767); got != -1 { + fmt.Printf("or_int16 32767%s-32768 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg32767_int16_ssa(-32768); got != -32767 { + fmt.Printf("or_int16 -32767%s-32768 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32767_ssa(-32768); got != -32767 { + fmt.Printf("or_int16 -32768%s-32767 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_Neg32767_int16_ssa(-32767); got != -32767 { + fmt.Printf("or_int16 -32767%s-32767 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32767_ssa(-32767); got != -32767 { + fmt.Printf("or_int16 -32767%s-32767 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_Neg32767_int16_ssa(-1); got != -1 { + fmt.Printf("or_int16 -32767%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32767_ssa(-1); got != -1 { + fmt.Printf("or_int16 -1%s-32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg32767_int16_ssa(0); got != -32767 { + fmt.Printf("or_int16 -32767%s0 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32767_ssa(0); got != -32767 { + fmt.Printf("or_int16 0%s-32767 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_Neg32767_int16_ssa(1); got != -32767 { + fmt.Printf("or_int16 -32767%s1 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32767_ssa(1); got != -32767 { + fmt.Printf("or_int16 1%s-32767 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_Neg32767_int16_ssa(32766); got != -1 { + fmt.Printf("or_int16 -32767%s32766 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32767_ssa(32766); got != -1 { + fmt.Printf("or_int16 32766%s-32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg32767_int16_ssa(32767); got != -1 { + fmt.Printf("or_int16 -32767%s32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg32767_ssa(32767); got != -1 { + fmt.Printf("or_int16 32767%s-32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int16_ssa(-32768); got != -1 { + fmt.Printf("or_int16 -1%s-32768 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg1_ssa(-32768); got != -1 { + fmt.Printf("or_int16 -32768%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int16_ssa(-32767); got != -1 { + fmt.Printf("or_int16 -1%s-32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg1_ssa(-32767); got != -1 { + fmt.Printf("or_int16 -32767%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int16_ssa(-1); got != -1 { + fmt.Printf("or_int16 -1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg1_ssa(-1); got != -1 { + fmt.Printf("or_int16 -1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int16_ssa(0); got != -1 { + fmt.Printf("or_int16 -1%s0 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg1_ssa(0); got != -1 { + fmt.Printf("or_int16 0%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int16_ssa(1); got != -1 { + fmt.Printf("or_int16 -1%s1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg1_ssa(1); got != -1 { + fmt.Printf("or_int16 1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int16_ssa(32766); got != -1 { + fmt.Printf("or_int16 -1%s32766 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg1_ssa(32766); got != -1 { + fmt.Printf("or_int16 32766%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int16_ssa(32767); got != -1 { + fmt.Printf("or_int16 -1%s32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_Neg1_ssa(32767); got != -1 { + fmt.Printf("or_int16 32767%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_0_int16_ssa(-32768); got != -32768 { + fmt.Printf("or_int16 0%s-32768 = %d, wanted -32768\n", `|`, got) + failed = true + } + + if got := or_int16_0_ssa(-32768); got != -32768 { + fmt.Printf("or_int16 -32768%s0 = %d, wanted -32768\n", `|`, got) + failed = true + } + + if got := or_0_int16_ssa(-32767); got != -32767 { + fmt.Printf("or_int16 0%s-32767 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_0_ssa(-32767); got != -32767 { + fmt.Printf("or_int16 -32767%s0 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_0_int16_ssa(-1); got != -1 { + fmt.Printf("or_int16 0%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_0_ssa(-1); got != -1 { + fmt.Printf("or_int16 -1%s0 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_0_int16_ssa(0); got != 0 { + fmt.Printf("or_int16 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_int16_0_ssa(0); got != 0 { + fmt.Printf("or_int16 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_0_int16_ssa(1); got != 1 { + fmt.Printf("or_int16 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int16_0_ssa(1); got != 1 { + fmt.Printf("or_int16 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_0_int16_ssa(32766); got != 32766 { + fmt.Printf("or_int16 0%s32766 = %d, wanted 32766\n", `|`, got) + failed = true + } + + if got := or_int16_0_ssa(32766); got != 32766 { + fmt.Printf("or_int16 32766%s0 = %d, wanted 32766\n", `|`, got) + failed = true + } + + if got := or_0_int16_ssa(32767); got != 32767 { + fmt.Printf("or_int16 0%s32767 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_0_ssa(32767); got != 32767 { + fmt.Printf("or_int16 32767%s0 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_1_int16_ssa(-32768); got != -32767 { + fmt.Printf("or_int16 1%s-32768 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_1_ssa(-32768); got != -32767 { + fmt.Printf("or_int16 -32768%s1 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_1_int16_ssa(-32767); got != -32767 { + fmt.Printf("or_int16 1%s-32767 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_int16_1_ssa(-32767); got != -32767 { + fmt.Printf("or_int16 -32767%s1 = %d, wanted -32767\n", `|`, got) + failed = true + } + + if got := or_1_int16_ssa(-1); got != -1 { + fmt.Printf("or_int16 1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_1_ssa(-1); got != -1 { + fmt.Printf("or_int16 -1%s1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_1_int16_ssa(0); got != 1 { + fmt.Printf("or_int16 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int16_1_ssa(0); got != 1 { + fmt.Printf("or_int16 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_int16_ssa(1); got != 1 { + fmt.Printf("or_int16 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int16_1_ssa(1); got != 1 { + fmt.Printf("or_int16 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_int16_ssa(32766); got != 32767 { + fmt.Printf("or_int16 1%s32766 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_1_ssa(32766); got != 32767 { + fmt.Printf("or_int16 32766%s1 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_1_int16_ssa(32767); got != 32767 { + fmt.Printf("or_int16 1%s32767 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_1_ssa(32767); got != 32767 { + fmt.Printf("or_int16 32767%s1 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_32766_int16_ssa(-32768); got != -2 { + fmt.Printf("or_int16 32766%s-32768 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_int16_32766_ssa(-32768); got != -2 { + fmt.Printf("or_int16 -32768%s32766 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_32766_int16_ssa(-32767); got != -1 { + fmt.Printf("or_int16 32766%s-32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_32766_ssa(-32767); got != -1 { + fmt.Printf("or_int16 -32767%s32766 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_32766_int16_ssa(-1); got != -1 { + fmt.Printf("or_int16 32766%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_32766_ssa(-1); got != -1 { + fmt.Printf("or_int16 -1%s32766 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_32766_int16_ssa(0); got != 32766 { + fmt.Printf("or_int16 32766%s0 = %d, wanted 32766\n", `|`, got) + failed = true + } + + if got := or_int16_32766_ssa(0); got != 32766 { + fmt.Printf("or_int16 0%s32766 = %d, wanted 32766\n", `|`, got) + failed = true + } + + if got := or_32766_int16_ssa(1); got != 32767 { + fmt.Printf("or_int16 32766%s1 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_32766_ssa(1); got != 32767 { + fmt.Printf("or_int16 1%s32766 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_32766_int16_ssa(32766); got != 32766 { + fmt.Printf("or_int16 32766%s32766 = %d, wanted 32766\n", `|`, got) + failed = true + } + + if got := or_int16_32766_ssa(32766); got != 32766 { + fmt.Printf("or_int16 32766%s32766 = %d, wanted 32766\n", `|`, got) + failed = true + } + + if got := or_32766_int16_ssa(32767); got != 32767 { + fmt.Printf("or_int16 32766%s32767 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_32766_ssa(32767); got != 32767 { + fmt.Printf("or_int16 32767%s32766 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_32767_int16_ssa(-32768); got != -1 { + fmt.Printf("or_int16 32767%s-32768 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_32767_ssa(-32768); got != -1 { + fmt.Printf("or_int16 -32768%s32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_32767_int16_ssa(-32767); got != -1 { + fmt.Printf("or_int16 32767%s-32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_32767_ssa(-32767); got != -1 { + fmt.Printf("or_int16 -32767%s32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_32767_int16_ssa(-1); got != -1 { + fmt.Printf("or_int16 32767%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int16_32767_ssa(-1); got != -1 { + fmt.Printf("or_int16 -1%s32767 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_32767_int16_ssa(0); got != 32767 { + fmt.Printf("or_int16 32767%s0 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_32767_ssa(0); got != 32767 { + fmt.Printf("or_int16 0%s32767 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_32767_int16_ssa(1); got != 32767 { + fmt.Printf("or_int16 32767%s1 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_32767_ssa(1); got != 32767 { + fmt.Printf("or_int16 1%s32767 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_32767_int16_ssa(32766); got != 32767 { + fmt.Printf("or_int16 32767%s32766 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_32767_ssa(32766); got != 32767 { + fmt.Printf("or_int16 32766%s32767 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_32767_int16_ssa(32767); got != 32767 { + fmt.Printf("or_int16 32767%s32767 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := or_int16_32767_ssa(32767); got != 32767 { + fmt.Printf("or_int16 32767%s32767 = %d, wanted 32767\n", `|`, got) + failed = true + } + + if got := xor_Neg32768_int16_ssa(-32768); got != 0 { + fmt.Printf("xor_int16 -32768%s-32768 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32768_ssa(-32768); got != 0 { + fmt.Printf("xor_int16 -32768%s-32768 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg32768_int16_ssa(-32767); got != 1 { + fmt.Printf("xor_int16 -32768%s-32767 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32768_ssa(-32767); got != 1 { + fmt.Printf("xor_int16 -32767%s-32768 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_Neg32768_int16_ssa(-1); got != 32767 { + fmt.Printf("xor_int16 -32768%s-1 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32768_ssa(-1); got != 32767 { + fmt.Printf("xor_int16 -1%s-32768 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_Neg32768_int16_ssa(0); got != -32768 { + fmt.Printf("xor_int16 -32768%s0 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32768_ssa(0); got != -32768 { + fmt.Printf("xor_int16 0%s-32768 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_Neg32768_int16_ssa(1); got != -32767 { + fmt.Printf("xor_int16 -32768%s1 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32768_ssa(1); got != -32767 { + fmt.Printf("xor_int16 1%s-32768 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_Neg32768_int16_ssa(32766); got != -2 { + fmt.Printf("xor_int16 -32768%s32766 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32768_ssa(32766); got != -2 { + fmt.Printf("xor_int16 32766%s-32768 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg32768_int16_ssa(32767); got != -1 { + fmt.Printf("xor_int16 -32768%s32767 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32768_ssa(32767); got != -1 { + fmt.Printf("xor_int16 32767%s-32768 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg32767_int16_ssa(-32768); got != 1 { + fmt.Printf("xor_int16 -32767%s-32768 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32767_ssa(-32768); got != 1 { + fmt.Printf("xor_int16 -32768%s-32767 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_Neg32767_int16_ssa(-32767); got != 0 { + fmt.Printf("xor_int16 -32767%s-32767 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32767_ssa(-32767); got != 0 { + fmt.Printf("xor_int16 -32767%s-32767 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg32767_int16_ssa(-1); got != 32766 { + fmt.Printf("xor_int16 -32767%s-1 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32767_ssa(-1); got != 32766 { + fmt.Printf("xor_int16 -1%s-32767 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_Neg32767_int16_ssa(0); got != -32767 { + fmt.Printf("xor_int16 -32767%s0 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32767_ssa(0); got != -32767 { + fmt.Printf("xor_int16 0%s-32767 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_Neg32767_int16_ssa(1); got != -32768 { + fmt.Printf("xor_int16 -32767%s1 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32767_ssa(1); got != -32768 { + fmt.Printf("xor_int16 1%s-32767 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_Neg32767_int16_ssa(32766); got != -1 { + fmt.Printf("xor_int16 -32767%s32766 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32767_ssa(32766); got != -1 { + fmt.Printf("xor_int16 32766%s-32767 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg32767_int16_ssa(32767); got != -2 { + fmt.Printf("xor_int16 -32767%s32767 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg32767_ssa(32767); got != -2 { + fmt.Printf("xor_int16 32767%s-32767 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int16_ssa(-32768); got != 32767 { + fmt.Printf("xor_int16 -1%s-32768 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg1_ssa(-32768); got != 32767 { + fmt.Printf("xor_int16 -32768%s-1 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int16_ssa(-32767); got != 32766 { + fmt.Printf("xor_int16 -1%s-32767 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg1_ssa(-32767); got != 32766 { + fmt.Printf("xor_int16 -32767%s-1 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int16_ssa(-1); got != 0 { + fmt.Printf("xor_int16 -1%s-1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg1_ssa(-1); got != 0 { + fmt.Printf("xor_int16 -1%s-1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int16_ssa(0); got != -1 { + fmt.Printf("xor_int16 -1%s0 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg1_ssa(0); got != -1 { + fmt.Printf("xor_int16 0%s-1 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int16_ssa(1); got != -2 { + fmt.Printf("xor_int16 -1%s1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg1_ssa(1); got != -2 { + fmt.Printf("xor_int16 1%s-1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int16_ssa(32766); got != -32767 { + fmt.Printf("xor_int16 -1%s32766 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg1_ssa(32766); got != -32767 { + fmt.Printf("xor_int16 32766%s-1 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int16_ssa(32767); got != -32768 { + fmt.Printf("xor_int16 -1%s32767 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_int16_Neg1_ssa(32767); got != -32768 { + fmt.Printf("xor_int16 32767%s-1 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_0_int16_ssa(-32768); got != -32768 { + fmt.Printf("xor_int16 0%s-32768 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_int16_0_ssa(-32768); got != -32768 { + fmt.Printf("xor_int16 -32768%s0 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_0_int16_ssa(-32767); got != -32767 { + fmt.Printf("xor_int16 0%s-32767 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_int16_0_ssa(-32767); got != -32767 { + fmt.Printf("xor_int16 -32767%s0 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_0_int16_ssa(-1); got != -1 { + fmt.Printf("xor_int16 0%s-1 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int16_0_ssa(-1); got != -1 { + fmt.Printf("xor_int16 -1%s0 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_0_int16_ssa(0); got != 0 { + fmt.Printf("xor_int16 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int16_0_ssa(0); got != 0 { + fmt.Printf("xor_int16 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_0_int16_ssa(1); got != 1 { + fmt.Printf("xor_int16 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int16_0_ssa(1); got != 1 { + fmt.Printf("xor_int16 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_0_int16_ssa(32766); got != 32766 { + fmt.Printf("xor_int16 0%s32766 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_int16_0_ssa(32766); got != 32766 { + fmt.Printf("xor_int16 32766%s0 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_0_int16_ssa(32767); got != 32767 { + fmt.Printf("xor_int16 0%s32767 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_int16_0_ssa(32767); got != 32767 { + fmt.Printf("xor_int16 32767%s0 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_1_int16_ssa(-32768); got != -32767 { + fmt.Printf("xor_int16 1%s-32768 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_int16_1_ssa(-32768); got != -32767 { + fmt.Printf("xor_int16 -32768%s1 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_1_int16_ssa(-32767); got != -32768 { + fmt.Printf("xor_int16 1%s-32767 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_int16_1_ssa(-32767); got != -32768 { + fmt.Printf("xor_int16 -32767%s1 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_1_int16_ssa(-1); got != -2 { + fmt.Printf("xor_int16 1%s-1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int16_1_ssa(-1); got != -2 { + fmt.Printf("xor_int16 -1%s1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_1_int16_ssa(0); got != 1 { + fmt.Printf("xor_int16 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int16_1_ssa(0); got != 1 { + fmt.Printf("xor_int16 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_1_int16_ssa(1); got != 0 { + fmt.Printf("xor_int16 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int16_1_ssa(1); got != 0 { + fmt.Printf("xor_int16 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_1_int16_ssa(32766); got != 32767 { + fmt.Printf("xor_int16 1%s32766 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_int16_1_ssa(32766); got != 32767 { + fmt.Printf("xor_int16 32766%s1 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_1_int16_ssa(32767); got != 32766 { + fmt.Printf("xor_int16 1%s32767 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_int16_1_ssa(32767); got != 32766 { + fmt.Printf("xor_int16 32767%s1 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_32766_int16_ssa(-32768); got != -2 { + fmt.Printf("xor_int16 32766%s-32768 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int16_32766_ssa(-32768); got != -2 { + fmt.Printf("xor_int16 -32768%s32766 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_32766_int16_ssa(-32767); got != -1 { + fmt.Printf("xor_int16 32766%s-32767 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int16_32766_ssa(-32767); got != -1 { + fmt.Printf("xor_int16 -32767%s32766 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_32766_int16_ssa(-1); got != -32767 { + fmt.Printf("xor_int16 32766%s-1 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_int16_32766_ssa(-1); got != -32767 { + fmt.Printf("xor_int16 -1%s32766 = %d, wanted -32767\n", `^`, got) + failed = true + } + + if got := xor_32766_int16_ssa(0); got != 32766 { + fmt.Printf("xor_int16 32766%s0 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_int16_32766_ssa(0); got != 32766 { + fmt.Printf("xor_int16 0%s32766 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_32766_int16_ssa(1); got != 32767 { + fmt.Printf("xor_int16 32766%s1 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_int16_32766_ssa(1); got != 32767 { + fmt.Printf("xor_int16 1%s32766 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_32766_int16_ssa(32766); got != 0 { + fmt.Printf("xor_int16 32766%s32766 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int16_32766_ssa(32766); got != 0 { + fmt.Printf("xor_int16 32766%s32766 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_32766_int16_ssa(32767); got != 1 { + fmt.Printf("xor_int16 32766%s32767 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int16_32766_ssa(32767); got != 1 { + fmt.Printf("xor_int16 32767%s32766 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_32767_int16_ssa(-32768); got != -1 { + fmt.Printf("xor_int16 32767%s-32768 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int16_32767_ssa(-32768); got != -1 { + fmt.Printf("xor_int16 -32768%s32767 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_32767_int16_ssa(-32767); got != -2 { + fmt.Printf("xor_int16 32767%s-32767 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int16_32767_ssa(-32767); got != -2 { + fmt.Printf("xor_int16 -32767%s32767 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_32767_int16_ssa(-1); got != -32768 { + fmt.Printf("xor_int16 32767%s-1 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_int16_32767_ssa(-1); got != -32768 { + fmt.Printf("xor_int16 -1%s32767 = %d, wanted -32768\n", `^`, got) + failed = true + } + + if got := xor_32767_int16_ssa(0); got != 32767 { + fmt.Printf("xor_int16 32767%s0 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_int16_32767_ssa(0); got != 32767 { + fmt.Printf("xor_int16 0%s32767 = %d, wanted 32767\n", `^`, got) + failed = true + } + + if got := xor_32767_int16_ssa(1); got != 32766 { + fmt.Printf("xor_int16 32767%s1 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_int16_32767_ssa(1); got != 32766 { + fmt.Printf("xor_int16 1%s32767 = %d, wanted 32766\n", `^`, got) + failed = true + } + + if got := xor_32767_int16_ssa(32766); got != 1 { + fmt.Printf("xor_int16 32767%s32766 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int16_32767_ssa(32766); got != 1 { + fmt.Printf("xor_int16 32766%s32767 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_32767_int16_ssa(32767); got != 0 { + fmt.Printf("xor_int16 32767%s32767 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int16_32767_ssa(32767); got != 0 { + fmt.Printf("xor_int16 32767%s32767 = %d, wanted 0\n", `^`, got) + failed = true + } + if got := add_0_uint8_ssa(0); got != 0 { fmt.Printf("add_uint8 0%s0 = %d, wanted 0\n", `+`, got) failed = true @@ -13579,6 +21139,276 @@ func main() { failed = true } + if got := and_0_uint8_ssa(0); got != 0 { + fmt.Printf("and_uint8 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint8_0_ssa(0); got != 0 { + fmt.Printf("and_uint8 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint8_ssa(1); got != 0 { + fmt.Printf("and_uint8 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint8_0_ssa(1); got != 0 { + fmt.Printf("and_uint8 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_uint8_ssa(255); got != 0 { + fmt.Printf("and_uint8 0%s255 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint8_0_ssa(255); got != 0 { + fmt.Printf("and_uint8 255%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint8_ssa(0); got != 0 { + fmt.Printf("and_uint8 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint8_1_ssa(0); got != 0 { + fmt.Printf("and_uint8 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_uint8_ssa(1); got != 1 { + fmt.Printf("and_uint8 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint8_1_ssa(1); got != 1 { + fmt.Printf("and_uint8 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_uint8_ssa(255); got != 1 { + fmt.Printf("and_uint8 1%s255 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint8_1_ssa(255); got != 1 { + fmt.Printf("and_uint8 255%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_255_uint8_ssa(0); got != 0 { + fmt.Printf("and_uint8 255%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_uint8_255_ssa(0); got != 0 { + fmt.Printf("and_uint8 0%s255 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_255_uint8_ssa(1); got != 1 { + fmt.Printf("and_uint8 255%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_uint8_255_ssa(1); got != 1 { + fmt.Printf("and_uint8 1%s255 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_255_uint8_ssa(255); got != 255 { + fmt.Printf("and_uint8 255%s255 = %d, wanted 255\n", `&`, got) + failed = true + } + + if got := and_uint8_255_ssa(255); got != 255 { + fmt.Printf("and_uint8 255%s255 = %d, wanted 255\n", `&`, got) + failed = true + } + + if got := or_0_uint8_ssa(0); got != 0 { + fmt.Printf("or_uint8 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_uint8_0_ssa(0); got != 0 { + fmt.Printf("or_uint8 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_0_uint8_ssa(1); got != 1 { + fmt.Printf("or_uint8 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint8_0_ssa(1); got != 1 { + fmt.Printf("or_uint8 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_0_uint8_ssa(255); got != 255 { + fmt.Printf("or_uint8 0%s255 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_uint8_0_ssa(255); got != 255 { + fmt.Printf("or_uint8 255%s0 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_1_uint8_ssa(0); got != 1 { + fmt.Printf("or_uint8 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint8_1_ssa(0); got != 1 { + fmt.Printf("or_uint8 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_uint8_ssa(1); got != 1 { + fmt.Printf("or_uint8 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_uint8_1_ssa(1); got != 1 { + fmt.Printf("or_uint8 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_uint8_ssa(255); got != 255 { + fmt.Printf("or_uint8 1%s255 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_uint8_1_ssa(255); got != 255 { + fmt.Printf("or_uint8 255%s1 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_255_uint8_ssa(0); got != 255 { + fmt.Printf("or_uint8 255%s0 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_uint8_255_ssa(0); got != 255 { + fmt.Printf("or_uint8 0%s255 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_255_uint8_ssa(1); got != 255 { + fmt.Printf("or_uint8 255%s1 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_uint8_255_ssa(1); got != 255 { + fmt.Printf("or_uint8 1%s255 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_255_uint8_ssa(255); got != 255 { + fmt.Printf("or_uint8 255%s255 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := or_uint8_255_ssa(255); got != 255 { + fmt.Printf("or_uint8 255%s255 = %d, wanted 255\n", `|`, got) + failed = true + } + + if got := xor_0_uint8_ssa(0); got != 0 { + fmt.Printf("xor_uint8 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint8_0_ssa(0); got != 0 { + fmt.Printf("xor_uint8 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_0_uint8_ssa(1); got != 1 { + fmt.Printf("xor_uint8 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_uint8_0_ssa(1); got != 1 { + fmt.Printf("xor_uint8 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_0_uint8_ssa(255); got != 255 { + fmt.Printf("xor_uint8 0%s255 = %d, wanted 255\n", `^`, got) + failed = true + } + + if got := xor_uint8_0_ssa(255); got != 255 { + fmt.Printf("xor_uint8 255%s0 = %d, wanted 255\n", `^`, got) + failed = true + } + + if got := xor_1_uint8_ssa(0); got != 1 { + fmt.Printf("xor_uint8 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_uint8_1_ssa(0); got != 1 { + fmt.Printf("xor_uint8 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_1_uint8_ssa(1); got != 0 { + fmt.Printf("xor_uint8 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint8_1_ssa(1); got != 0 { + fmt.Printf("xor_uint8 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_1_uint8_ssa(255); got != 254 { + fmt.Printf("xor_uint8 1%s255 = %d, wanted 254\n", `^`, got) + failed = true + } + + if got := xor_uint8_1_ssa(255); got != 254 { + fmt.Printf("xor_uint8 255%s1 = %d, wanted 254\n", `^`, got) + failed = true + } + + if got := xor_255_uint8_ssa(0); got != 255 { + fmt.Printf("xor_uint8 255%s0 = %d, wanted 255\n", `^`, got) + failed = true + } + + if got := xor_uint8_255_ssa(0); got != 255 { + fmt.Printf("xor_uint8 0%s255 = %d, wanted 255\n", `^`, got) + failed = true + } + + if got := xor_255_uint8_ssa(1); got != 254 { + fmt.Printf("xor_uint8 255%s1 = %d, wanted 254\n", `^`, got) + failed = true + } + + if got := xor_uint8_255_ssa(1); got != 254 { + fmt.Printf("xor_uint8 1%s255 = %d, wanted 254\n", `^`, got) + failed = true + } + + if got := xor_255_uint8_ssa(255); got != 0 { + fmt.Printf("xor_uint8 255%s255 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_uint8_255_ssa(255); got != 0 { + fmt.Printf("xor_uint8 255%s255 = %d, wanted 0\n", `^`, got) + failed = true + } + if got := add_Neg128_int8_ssa(-128); got != 0 { fmt.Printf("add_int8 -128%s-128 = %d, wanted 0\n", `+`, got) failed = true @@ -15888,6 +23718,1476 @@ func main() { fmt.Printf("mod_int8 127%s127 = %d, wanted 0\n", `%`, got) failed = true } + + if got := and_Neg128_int8_ssa(-128); got != -128 { + fmt.Printf("and_int8 -128%s-128 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_int8_Neg128_ssa(-128); got != -128 { + fmt.Printf("and_int8 -128%s-128 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_Neg128_int8_ssa(-127); got != -128 { + fmt.Printf("and_int8 -128%s-127 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_int8_Neg128_ssa(-127); got != -128 { + fmt.Printf("and_int8 -127%s-128 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_Neg128_int8_ssa(-1); got != -128 { + fmt.Printf("and_int8 -128%s-1 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_int8_Neg128_ssa(-1); got != -128 { + fmt.Printf("and_int8 -1%s-128 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_Neg128_int8_ssa(0); got != 0 { + fmt.Printf("and_int8 -128%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_Neg128_ssa(0); got != 0 { + fmt.Printf("and_int8 0%s-128 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg128_int8_ssa(1); got != 0 { + fmt.Printf("and_int8 -128%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_Neg128_ssa(1); got != 0 { + fmt.Printf("and_int8 1%s-128 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg128_int8_ssa(126); got != 0 { + fmt.Printf("and_int8 -128%s126 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_Neg128_ssa(126); got != 0 { + fmt.Printf("and_int8 126%s-128 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg128_int8_ssa(127); got != 0 { + fmt.Printf("and_int8 -128%s127 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_Neg128_ssa(127); got != 0 { + fmt.Printf("and_int8 127%s-128 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg127_int8_ssa(-128); got != -128 { + fmt.Printf("and_int8 -127%s-128 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_int8_Neg127_ssa(-128); got != -128 { + fmt.Printf("and_int8 -128%s-127 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_Neg127_int8_ssa(-127); got != -127 { + fmt.Printf("and_int8 -127%s-127 = %d, wanted -127\n", `&`, got) + failed = true + } + + if got := and_int8_Neg127_ssa(-127); got != -127 { + fmt.Printf("and_int8 -127%s-127 = %d, wanted -127\n", `&`, got) + failed = true + } + + if got := and_Neg127_int8_ssa(-1); got != -127 { + fmt.Printf("and_int8 -127%s-1 = %d, wanted -127\n", `&`, got) + failed = true + } + + if got := and_int8_Neg127_ssa(-1); got != -127 { + fmt.Printf("and_int8 -1%s-127 = %d, wanted -127\n", `&`, got) + failed = true + } + + if got := and_Neg127_int8_ssa(0); got != 0 { + fmt.Printf("and_int8 -127%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_Neg127_ssa(0); got != 0 { + fmt.Printf("and_int8 0%s-127 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg127_int8_ssa(1); got != 1 { + fmt.Printf("and_int8 -127%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_Neg127_ssa(1); got != 1 { + fmt.Printf("and_int8 1%s-127 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg127_int8_ssa(126); got != 0 { + fmt.Printf("and_int8 -127%s126 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_Neg127_ssa(126); got != 0 { + fmt.Printf("and_int8 126%s-127 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg127_int8_ssa(127); got != 1 { + fmt.Printf("and_int8 -127%s127 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_Neg127_ssa(127); got != 1 { + fmt.Printf("and_int8 127%s-127 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int8_ssa(-128); got != -128 { + fmt.Printf("and_int8 -1%s-128 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_int8_Neg1_ssa(-128); got != -128 { + fmt.Printf("and_int8 -128%s-1 = %d, wanted -128\n", `&`, got) + failed = true + } + + if got := and_Neg1_int8_ssa(-127); got != -127 { + fmt.Printf("and_int8 -1%s-127 = %d, wanted -127\n", `&`, got) + failed = true + } + + if got := and_int8_Neg1_ssa(-127); got != -127 { + fmt.Printf("and_int8 -127%s-1 = %d, wanted -127\n", `&`, got) + failed = true + } + + if got := and_Neg1_int8_ssa(-1); got != -1 { + fmt.Printf("and_int8 -1%s-1 = %d, wanted -1\n", `&`, got) + failed = true + } + + if got := and_int8_Neg1_ssa(-1); got != -1 { + fmt.Printf("and_int8 -1%s-1 = %d, wanted -1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int8_ssa(0); got != 0 { + fmt.Printf("and_int8 -1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_Neg1_ssa(0); got != 0 { + fmt.Printf("and_int8 0%s-1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_Neg1_int8_ssa(1); got != 1 { + fmt.Printf("and_int8 -1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_Neg1_ssa(1); got != 1 { + fmt.Printf("and_int8 1%s-1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_Neg1_int8_ssa(126); got != 126 { + fmt.Printf("and_int8 -1%s126 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_int8_Neg1_ssa(126); got != 126 { + fmt.Printf("and_int8 126%s-1 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_Neg1_int8_ssa(127); got != 127 { + fmt.Printf("and_int8 -1%s127 = %d, wanted 127\n", `&`, got) + failed = true + } + + if got := and_int8_Neg1_ssa(127); got != 127 { + fmt.Printf("and_int8 127%s-1 = %d, wanted 127\n", `&`, got) + failed = true + } + + if got := and_0_int8_ssa(-128); got != 0 { + fmt.Printf("and_int8 0%s-128 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_0_ssa(-128); got != 0 { + fmt.Printf("and_int8 -128%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int8_ssa(-127); got != 0 { + fmt.Printf("and_int8 0%s-127 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_0_ssa(-127); got != 0 { + fmt.Printf("and_int8 -127%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int8_ssa(-1); got != 0 { + fmt.Printf("and_int8 0%s-1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_0_ssa(-1); got != 0 { + fmt.Printf("and_int8 -1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int8_ssa(0); got != 0 { + fmt.Printf("and_int8 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_0_ssa(0); got != 0 { + fmt.Printf("and_int8 0%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int8_ssa(1); got != 0 { + fmt.Printf("and_int8 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_0_ssa(1); got != 0 { + fmt.Printf("and_int8 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int8_ssa(126); got != 0 { + fmt.Printf("and_int8 0%s126 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_0_ssa(126); got != 0 { + fmt.Printf("and_int8 126%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_0_int8_ssa(127); got != 0 { + fmt.Printf("and_int8 0%s127 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_0_ssa(127); got != 0 { + fmt.Printf("and_int8 127%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int8_ssa(-128); got != 0 { + fmt.Printf("and_int8 1%s-128 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_1_ssa(-128); got != 0 { + fmt.Printf("and_int8 -128%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int8_ssa(-127); got != 1 { + fmt.Printf("and_int8 1%s-127 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_1_ssa(-127); got != 1 { + fmt.Printf("and_int8 -127%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int8_ssa(-1); got != 1 { + fmt.Printf("and_int8 1%s-1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_1_ssa(-1); got != 1 { + fmt.Printf("and_int8 -1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int8_ssa(0); got != 0 { + fmt.Printf("and_int8 1%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_1_ssa(0); got != 0 { + fmt.Printf("and_int8 0%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int8_ssa(1); got != 1 { + fmt.Printf("and_int8 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_1_ssa(1); got != 1 { + fmt.Printf("and_int8 1%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_1_int8_ssa(126); got != 0 { + fmt.Printf("and_int8 1%s126 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_1_ssa(126); got != 0 { + fmt.Printf("and_int8 126%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_1_int8_ssa(127); got != 1 { + fmt.Printf("and_int8 1%s127 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_1_ssa(127); got != 1 { + fmt.Printf("and_int8 127%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_126_int8_ssa(-128); got != 0 { + fmt.Printf("and_int8 126%s-128 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_126_ssa(-128); got != 0 { + fmt.Printf("and_int8 -128%s126 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_126_int8_ssa(-127); got != 0 { + fmt.Printf("and_int8 126%s-127 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_126_ssa(-127); got != 0 { + fmt.Printf("and_int8 -127%s126 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_126_int8_ssa(-1); got != 126 { + fmt.Printf("and_int8 126%s-1 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_int8_126_ssa(-1); got != 126 { + fmt.Printf("and_int8 -1%s126 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_126_int8_ssa(0); got != 0 { + fmt.Printf("and_int8 126%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_126_ssa(0); got != 0 { + fmt.Printf("and_int8 0%s126 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_126_int8_ssa(1); got != 0 { + fmt.Printf("and_int8 126%s1 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_126_ssa(1); got != 0 { + fmt.Printf("and_int8 1%s126 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_126_int8_ssa(126); got != 126 { + fmt.Printf("and_int8 126%s126 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_int8_126_ssa(126); got != 126 { + fmt.Printf("and_int8 126%s126 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_126_int8_ssa(127); got != 126 { + fmt.Printf("and_int8 126%s127 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_int8_126_ssa(127); got != 126 { + fmt.Printf("and_int8 127%s126 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_127_int8_ssa(-128); got != 0 { + fmt.Printf("and_int8 127%s-128 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_127_ssa(-128); got != 0 { + fmt.Printf("and_int8 -128%s127 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_127_int8_ssa(-127); got != 1 { + fmt.Printf("and_int8 127%s-127 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_127_ssa(-127); got != 1 { + fmt.Printf("and_int8 -127%s127 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_127_int8_ssa(-1); got != 127 { + fmt.Printf("and_int8 127%s-1 = %d, wanted 127\n", `&`, got) + failed = true + } + + if got := and_int8_127_ssa(-1); got != 127 { + fmt.Printf("and_int8 -1%s127 = %d, wanted 127\n", `&`, got) + failed = true + } + + if got := and_127_int8_ssa(0); got != 0 { + fmt.Printf("and_int8 127%s0 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_int8_127_ssa(0); got != 0 { + fmt.Printf("and_int8 0%s127 = %d, wanted 0\n", `&`, got) + failed = true + } + + if got := and_127_int8_ssa(1); got != 1 { + fmt.Printf("and_int8 127%s1 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_int8_127_ssa(1); got != 1 { + fmt.Printf("and_int8 1%s127 = %d, wanted 1\n", `&`, got) + failed = true + } + + if got := and_127_int8_ssa(126); got != 126 { + fmt.Printf("and_int8 127%s126 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_int8_127_ssa(126); got != 126 { + fmt.Printf("and_int8 126%s127 = %d, wanted 126\n", `&`, got) + failed = true + } + + if got := and_127_int8_ssa(127); got != 127 { + fmt.Printf("and_int8 127%s127 = %d, wanted 127\n", `&`, got) + failed = true + } + + if got := and_int8_127_ssa(127); got != 127 { + fmt.Printf("and_int8 127%s127 = %d, wanted 127\n", `&`, got) + failed = true + } + + if got := or_Neg128_int8_ssa(-128); got != -128 { + fmt.Printf("or_int8 -128%s-128 = %d, wanted -128\n", `|`, got) + failed = true + } + + if got := or_int8_Neg128_ssa(-128); got != -128 { + fmt.Printf("or_int8 -128%s-128 = %d, wanted -128\n", `|`, got) + failed = true + } + + if got := or_Neg128_int8_ssa(-127); got != -127 { + fmt.Printf("or_int8 -128%s-127 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_Neg128_ssa(-127); got != -127 { + fmt.Printf("or_int8 -127%s-128 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_Neg128_int8_ssa(-1); got != -1 { + fmt.Printf("or_int8 -128%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg128_ssa(-1); got != -1 { + fmt.Printf("or_int8 -1%s-128 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg128_int8_ssa(0); got != -128 { + fmt.Printf("or_int8 -128%s0 = %d, wanted -128\n", `|`, got) + failed = true + } + + if got := or_int8_Neg128_ssa(0); got != -128 { + fmt.Printf("or_int8 0%s-128 = %d, wanted -128\n", `|`, got) + failed = true + } + + if got := or_Neg128_int8_ssa(1); got != -127 { + fmt.Printf("or_int8 -128%s1 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_Neg128_ssa(1); got != -127 { + fmt.Printf("or_int8 1%s-128 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_Neg128_int8_ssa(126); got != -2 { + fmt.Printf("or_int8 -128%s126 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_int8_Neg128_ssa(126); got != -2 { + fmt.Printf("or_int8 126%s-128 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_Neg128_int8_ssa(127); got != -1 { + fmt.Printf("or_int8 -128%s127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg128_ssa(127); got != -1 { + fmt.Printf("or_int8 127%s-128 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg127_int8_ssa(-128); got != -127 { + fmt.Printf("or_int8 -127%s-128 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_Neg127_ssa(-128); got != -127 { + fmt.Printf("or_int8 -128%s-127 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_Neg127_int8_ssa(-127); got != -127 { + fmt.Printf("or_int8 -127%s-127 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_Neg127_ssa(-127); got != -127 { + fmt.Printf("or_int8 -127%s-127 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_Neg127_int8_ssa(-1); got != -1 { + fmt.Printf("or_int8 -127%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg127_ssa(-1); got != -1 { + fmt.Printf("or_int8 -1%s-127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg127_int8_ssa(0); got != -127 { + fmt.Printf("or_int8 -127%s0 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_Neg127_ssa(0); got != -127 { + fmt.Printf("or_int8 0%s-127 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_Neg127_int8_ssa(1); got != -127 { + fmt.Printf("or_int8 -127%s1 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_Neg127_ssa(1); got != -127 { + fmt.Printf("or_int8 1%s-127 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_Neg127_int8_ssa(126); got != -1 { + fmt.Printf("or_int8 -127%s126 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg127_ssa(126); got != -1 { + fmt.Printf("or_int8 126%s-127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg127_int8_ssa(127); got != -1 { + fmt.Printf("or_int8 -127%s127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg127_ssa(127); got != -1 { + fmt.Printf("or_int8 127%s-127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int8_ssa(-128); got != -1 { + fmt.Printf("or_int8 -1%s-128 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg1_ssa(-128); got != -1 { + fmt.Printf("or_int8 -128%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int8_ssa(-127); got != -1 { + fmt.Printf("or_int8 -1%s-127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg1_ssa(-127); got != -1 { + fmt.Printf("or_int8 -127%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int8_ssa(-1); got != -1 { + fmt.Printf("or_int8 -1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg1_ssa(-1); got != -1 { + fmt.Printf("or_int8 -1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int8_ssa(0); got != -1 { + fmt.Printf("or_int8 -1%s0 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg1_ssa(0); got != -1 { + fmt.Printf("or_int8 0%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int8_ssa(1); got != -1 { + fmt.Printf("or_int8 -1%s1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg1_ssa(1); got != -1 { + fmt.Printf("or_int8 1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int8_ssa(126); got != -1 { + fmt.Printf("or_int8 -1%s126 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg1_ssa(126); got != -1 { + fmt.Printf("or_int8 126%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_Neg1_int8_ssa(127); got != -1 { + fmt.Printf("or_int8 -1%s127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_Neg1_ssa(127); got != -1 { + fmt.Printf("or_int8 127%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_0_int8_ssa(-128); got != -128 { + fmt.Printf("or_int8 0%s-128 = %d, wanted -128\n", `|`, got) + failed = true + } + + if got := or_int8_0_ssa(-128); got != -128 { + fmt.Printf("or_int8 -128%s0 = %d, wanted -128\n", `|`, got) + failed = true + } + + if got := or_0_int8_ssa(-127); got != -127 { + fmt.Printf("or_int8 0%s-127 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_0_ssa(-127); got != -127 { + fmt.Printf("or_int8 -127%s0 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_0_int8_ssa(-1); got != -1 { + fmt.Printf("or_int8 0%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_0_ssa(-1); got != -1 { + fmt.Printf("or_int8 -1%s0 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_0_int8_ssa(0); got != 0 { + fmt.Printf("or_int8 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_int8_0_ssa(0); got != 0 { + fmt.Printf("or_int8 0%s0 = %d, wanted 0\n", `|`, got) + failed = true + } + + if got := or_0_int8_ssa(1); got != 1 { + fmt.Printf("or_int8 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int8_0_ssa(1); got != 1 { + fmt.Printf("or_int8 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_0_int8_ssa(126); got != 126 { + fmt.Printf("or_int8 0%s126 = %d, wanted 126\n", `|`, got) + failed = true + } + + if got := or_int8_0_ssa(126); got != 126 { + fmt.Printf("or_int8 126%s0 = %d, wanted 126\n", `|`, got) + failed = true + } + + if got := or_0_int8_ssa(127); got != 127 { + fmt.Printf("or_int8 0%s127 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_0_ssa(127); got != 127 { + fmt.Printf("or_int8 127%s0 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_1_int8_ssa(-128); got != -127 { + fmt.Printf("or_int8 1%s-128 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_1_ssa(-128); got != -127 { + fmt.Printf("or_int8 -128%s1 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_1_int8_ssa(-127); got != -127 { + fmt.Printf("or_int8 1%s-127 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_int8_1_ssa(-127); got != -127 { + fmt.Printf("or_int8 -127%s1 = %d, wanted -127\n", `|`, got) + failed = true + } + + if got := or_1_int8_ssa(-1); got != -1 { + fmt.Printf("or_int8 1%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_1_ssa(-1); got != -1 { + fmt.Printf("or_int8 -1%s1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_1_int8_ssa(0); got != 1 { + fmt.Printf("or_int8 1%s0 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int8_1_ssa(0); got != 1 { + fmt.Printf("or_int8 0%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_int8_ssa(1); got != 1 { + fmt.Printf("or_int8 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_int8_1_ssa(1); got != 1 { + fmt.Printf("or_int8 1%s1 = %d, wanted 1\n", `|`, got) + failed = true + } + + if got := or_1_int8_ssa(126); got != 127 { + fmt.Printf("or_int8 1%s126 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_1_ssa(126); got != 127 { + fmt.Printf("or_int8 126%s1 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_1_int8_ssa(127); got != 127 { + fmt.Printf("or_int8 1%s127 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_1_ssa(127); got != 127 { + fmt.Printf("or_int8 127%s1 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_126_int8_ssa(-128); got != -2 { + fmt.Printf("or_int8 126%s-128 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_int8_126_ssa(-128); got != -2 { + fmt.Printf("or_int8 -128%s126 = %d, wanted -2\n", `|`, got) + failed = true + } + + if got := or_126_int8_ssa(-127); got != -1 { + fmt.Printf("or_int8 126%s-127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_126_ssa(-127); got != -1 { + fmt.Printf("or_int8 -127%s126 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_126_int8_ssa(-1); got != -1 { + fmt.Printf("or_int8 126%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_126_ssa(-1); got != -1 { + fmt.Printf("or_int8 -1%s126 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_126_int8_ssa(0); got != 126 { + fmt.Printf("or_int8 126%s0 = %d, wanted 126\n", `|`, got) + failed = true + } + + if got := or_int8_126_ssa(0); got != 126 { + fmt.Printf("or_int8 0%s126 = %d, wanted 126\n", `|`, got) + failed = true + } + + if got := or_126_int8_ssa(1); got != 127 { + fmt.Printf("or_int8 126%s1 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_126_ssa(1); got != 127 { + fmt.Printf("or_int8 1%s126 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_126_int8_ssa(126); got != 126 { + fmt.Printf("or_int8 126%s126 = %d, wanted 126\n", `|`, got) + failed = true + } + + if got := or_int8_126_ssa(126); got != 126 { + fmt.Printf("or_int8 126%s126 = %d, wanted 126\n", `|`, got) + failed = true + } + + if got := or_126_int8_ssa(127); got != 127 { + fmt.Printf("or_int8 126%s127 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_126_ssa(127); got != 127 { + fmt.Printf("or_int8 127%s126 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_127_int8_ssa(-128); got != -1 { + fmt.Printf("or_int8 127%s-128 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_127_ssa(-128); got != -1 { + fmt.Printf("or_int8 -128%s127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_127_int8_ssa(-127); got != -1 { + fmt.Printf("or_int8 127%s-127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_127_ssa(-127); got != -1 { + fmt.Printf("or_int8 -127%s127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_127_int8_ssa(-1); got != -1 { + fmt.Printf("or_int8 127%s-1 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_int8_127_ssa(-1); got != -1 { + fmt.Printf("or_int8 -1%s127 = %d, wanted -1\n", `|`, got) + failed = true + } + + if got := or_127_int8_ssa(0); got != 127 { + fmt.Printf("or_int8 127%s0 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_127_ssa(0); got != 127 { + fmt.Printf("or_int8 0%s127 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_127_int8_ssa(1); got != 127 { + fmt.Printf("or_int8 127%s1 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_127_ssa(1); got != 127 { + fmt.Printf("or_int8 1%s127 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_127_int8_ssa(126); got != 127 { + fmt.Printf("or_int8 127%s126 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_127_ssa(126); got != 127 { + fmt.Printf("or_int8 126%s127 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_127_int8_ssa(127); got != 127 { + fmt.Printf("or_int8 127%s127 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := or_int8_127_ssa(127); got != 127 { + fmt.Printf("or_int8 127%s127 = %d, wanted 127\n", `|`, got) + failed = true + } + + if got := xor_Neg128_int8_ssa(-128); got != 0 { + fmt.Printf("xor_int8 -128%s-128 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg128_ssa(-128); got != 0 { + fmt.Printf("xor_int8 -128%s-128 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg128_int8_ssa(-127); got != 1 { + fmt.Printf("xor_int8 -128%s-127 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg128_ssa(-127); got != 1 { + fmt.Printf("xor_int8 -127%s-128 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_Neg128_int8_ssa(-1); got != 127 { + fmt.Printf("xor_int8 -128%s-1 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg128_ssa(-1); got != 127 { + fmt.Printf("xor_int8 -1%s-128 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_Neg128_int8_ssa(0); got != -128 { + fmt.Printf("xor_int8 -128%s0 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg128_ssa(0); got != -128 { + fmt.Printf("xor_int8 0%s-128 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_Neg128_int8_ssa(1); got != -127 { + fmt.Printf("xor_int8 -128%s1 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg128_ssa(1); got != -127 { + fmt.Printf("xor_int8 1%s-128 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_Neg128_int8_ssa(126); got != -2 { + fmt.Printf("xor_int8 -128%s126 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg128_ssa(126); got != -2 { + fmt.Printf("xor_int8 126%s-128 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg128_int8_ssa(127); got != -1 { + fmt.Printf("xor_int8 -128%s127 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg128_ssa(127); got != -1 { + fmt.Printf("xor_int8 127%s-128 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg127_int8_ssa(-128); got != 1 { + fmt.Printf("xor_int8 -127%s-128 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg127_ssa(-128); got != 1 { + fmt.Printf("xor_int8 -128%s-127 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_Neg127_int8_ssa(-127); got != 0 { + fmt.Printf("xor_int8 -127%s-127 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg127_ssa(-127); got != 0 { + fmt.Printf("xor_int8 -127%s-127 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg127_int8_ssa(-1); got != 126 { + fmt.Printf("xor_int8 -127%s-1 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg127_ssa(-1); got != 126 { + fmt.Printf("xor_int8 -1%s-127 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_Neg127_int8_ssa(0); got != -127 { + fmt.Printf("xor_int8 -127%s0 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg127_ssa(0); got != -127 { + fmt.Printf("xor_int8 0%s-127 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_Neg127_int8_ssa(1); got != -128 { + fmt.Printf("xor_int8 -127%s1 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg127_ssa(1); got != -128 { + fmt.Printf("xor_int8 1%s-127 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_Neg127_int8_ssa(126); got != -1 { + fmt.Printf("xor_int8 -127%s126 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg127_ssa(126); got != -1 { + fmt.Printf("xor_int8 126%s-127 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg127_int8_ssa(127); got != -2 { + fmt.Printf("xor_int8 -127%s127 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg127_ssa(127); got != -2 { + fmt.Printf("xor_int8 127%s-127 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int8_ssa(-128); got != 127 { + fmt.Printf("xor_int8 -1%s-128 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg1_ssa(-128); got != 127 { + fmt.Printf("xor_int8 -128%s-1 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int8_ssa(-127); got != 126 { + fmt.Printf("xor_int8 -1%s-127 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg1_ssa(-127); got != 126 { + fmt.Printf("xor_int8 -127%s-1 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int8_ssa(-1); got != 0 { + fmt.Printf("xor_int8 -1%s-1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg1_ssa(-1); got != 0 { + fmt.Printf("xor_int8 -1%s-1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int8_ssa(0); got != -1 { + fmt.Printf("xor_int8 -1%s0 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg1_ssa(0); got != -1 { + fmt.Printf("xor_int8 0%s-1 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int8_ssa(1); got != -2 { + fmt.Printf("xor_int8 -1%s1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg1_ssa(1); got != -2 { + fmt.Printf("xor_int8 1%s-1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int8_ssa(126); got != -127 { + fmt.Printf("xor_int8 -1%s126 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg1_ssa(126); got != -127 { + fmt.Printf("xor_int8 126%s-1 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_Neg1_int8_ssa(127); got != -128 { + fmt.Printf("xor_int8 -1%s127 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_int8_Neg1_ssa(127); got != -128 { + fmt.Printf("xor_int8 127%s-1 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_0_int8_ssa(-128); got != -128 { + fmt.Printf("xor_int8 0%s-128 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_int8_0_ssa(-128); got != -128 { + fmt.Printf("xor_int8 -128%s0 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_0_int8_ssa(-127); got != -127 { + fmt.Printf("xor_int8 0%s-127 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_int8_0_ssa(-127); got != -127 { + fmt.Printf("xor_int8 -127%s0 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_0_int8_ssa(-1); got != -1 { + fmt.Printf("xor_int8 0%s-1 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int8_0_ssa(-1); got != -1 { + fmt.Printf("xor_int8 -1%s0 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_0_int8_ssa(0); got != 0 { + fmt.Printf("xor_int8 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int8_0_ssa(0); got != 0 { + fmt.Printf("xor_int8 0%s0 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_0_int8_ssa(1); got != 1 { + fmt.Printf("xor_int8 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int8_0_ssa(1); got != 1 { + fmt.Printf("xor_int8 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_0_int8_ssa(126); got != 126 { + fmt.Printf("xor_int8 0%s126 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_int8_0_ssa(126); got != 126 { + fmt.Printf("xor_int8 126%s0 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_0_int8_ssa(127); got != 127 { + fmt.Printf("xor_int8 0%s127 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_int8_0_ssa(127); got != 127 { + fmt.Printf("xor_int8 127%s0 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_1_int8_ssa(-128); got != -127 { + fmt.Printf("xor_int8 1%s-128 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_int8_1_ssa(-128); got != -127 { + fmt.Printf("xor_int8 -128%s1 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_1_int8_ssa(-127); got != -128 { + fmt.Printf("xor_int8 1%s-127 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_int8_1_ssa(-127); got != -128 { + fmt.Printf("xor_int8 -127%s1 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_1_int8_ssa(-1); got != -2 { + fmt.Printf("xor_int8 1%s-1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int8_1_ssa(-1); got != -2 { + fmt.Printf("xor_int8 -1%s1 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_1_int8_ssa(0); got != 1 { + fmt.Printf("xor_int8 1%s0 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int8_1_ssa(0); got != 1 { + fmt.Printf("xor_int8 0%s1 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_1_int8_ssa(1); got != 0 { + fmt.Printf("xor_int8 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int8_1_ssa(1); got != 0 { + fmt.Printf("xor_int8 1%s1 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_1_int8_ssa(126); got != 127 { + fmt.Printf("xor_int8 1%s126 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_int8_1_ssa(126); got != 127 { + fmt.Printf("xor_int8 126%s1 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_1_int8_ssa(127); got != 126 { + fmt.Printf("xor_int8 1%s127 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_int8_1_ssa(127); got != 126 { + fmt.Printf("xor_int8 127%s1 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_126_int8_ssa(-128); got != -2 { + fmt.Printf("xor_int8 126%s-128 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int8_126_ssa(-128); got != -2 { + fmt.Printf("xor_int8 -128%s126 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_126_int8_ssa(-127); got != -1 { + fmt.Printf("xor_int8 126%s-127 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int8_126_ssa(-127); got != -1 { + fmt.Printf("xor_int8 -127%s126 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_126_int8_ssa(-1); got != -127 { + fmt.Printf("xor_int8 126%s-1 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_int8_126_ssa(-1); got != -127 { + fmt.Printf("xor_int8 -1%s126 = %d, wanted -127\n", `^`, got) + failed = true + } + + if got := xor_126_int8_ssa(0); got != 126 { + fmt.Printf("xor_int8 126%s0 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_int8_126_ssa(0); got != 126 { + fmt.Printf("xor_int8 0%s126 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_126_int8_ssa(1); got != 127 { + fmt.Printf("xor_int8 126%s1 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_int8_126_ssa(1); got != 127 { + fmt.Printf("xor_int8 1%s126 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_126_int8_ssa(126); got != 0 { + fmt.Printf("xor_int8 126%s126 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int8_126_ssa(126); got != 0 { + fmt.Printf("xor_int8 126%s126 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_126_int8_ssa(127); got != 1 { + fmt.Printf("xor_int8 126%s127 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int8_126_ssa(127); got != 1 { + fmt.Printf("xor_int8 127%s126 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_127_int8_ssa(-128); got != -1 { + fmt.Printf("xor_int8 127%s-128 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_int8_127_ssa(-128); got != -1 { + fmt.Printf("xor_int8 -128%s127 = %d, wanted -1\n", `^`, got) + failed = true + } + + if got := xor_127_int8_ssa(-127); got != -2 { + fmt.Printf("xor_int8 127%s-127 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_int8_127_ssa(-127); got != -2 { + fmt.Printf("xor_int8 -127%s127 = %d, wanted -2\n", `^`, got) + failed = true + } + + if got := xor_127_int8_ssa(-1); got != -128 { + fmt.Printf("xor_int8 127%s-1 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_int8_127_ssa(-1); got != -128 { + fmt.Printf("xor_int8 -1%s127 = %d, wanted -128\n", `^`, got) + failed = true + } + + if got := xor_127_int8_ssa(0); got != 127 { + fmt.Printf("xor_int8 127%s0 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_int8_127_ssa(0); got != 127 { + fmt.Printf("xor_int8 0%s127 = %d, wanted 127\n", `^`, got) + failed = true + } + + if got := xor_127_int8_ssa(1); got != 126 { + fmt.Printf("xor_int8 127%s1 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_int8_127_ssa(1); got != 126 { + fmt.Printf("xor_int8 1%s127 = %d, wanted 126\n", `^`, got) + failed = true + } + + if got := xor_127_int8_ssa(126); got != 1 { + fmt.Printf("xor_int8 127%s126 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_int8_127_ssa(126); got != 1 { + fmt.Printf("xor_int8 126%s127 = %d, wanted 1\n", `^`, got) + failed = true + } + + if got := xor_127_int8_ssa(127); got != 0 { + fmt.Printf("xor_int8 127%s127 = %d, wanted 0\n", `^`, got) + failed = true + } + + if got := xor_int8_127_ssa(127); got != 0 { + fmt.Printf("xor_int8 127%s127 = %d, wanted 0\n", `^`, got) + failed = true + } if failed { panic("tests failed") } diff --git a/src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go b/src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go new file mode 100644 index 00000000000..48b05f74918 --- /dev/null +++ b/src/cmd/compile/internal/gc/testdata/flowgraph_generator1.go @@ -0,0 +1,315 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "strings" +) + +// make fake flow graph. + +// The blocks of the flow graph are designated with letters A +// through Z, always including A (start block) and Z (exit +// block) The specification of a flow graph is a comma- +// separated list of block successor words, for blocks ordered +// A, B, C etc, where each block except Z has one or two +// successors, and any block except A can be a target. Within +// the generated code, each block with two successors includes +// a conditional testing x & 1 != 0 (x is the input parameter +// to the generated function) and also unconditionally shifts x +// right by one, so that different inputs generate different +// execution paths, including loops. Every block inverts a +// global binary to ensure it is not empty. For a flow graph +// with J words (J+1 blocks), a J-1 bit serial number specifies +// which blocks (not including A and Z) include an increment of +// the return variable y by increasing powers of 10, and a +// different version of the test function is created for each +// of the 2-to-the-(J-1) serial numbers. + +// For each generated function a compact summary is also +// created so that the generated funtion can be simulated +// with a simple interpreter to sanity check the behavior of +// the compiled code. + +// For example: + +// func BC_CD_BE_BZ_CZ101(x int64) int64 { +// y := int64(0) +// var b int64 +// _ = b +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto C +// } +// goto B +// B: +// glob_ = !glob_ +// y += 1 +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto D +// } +// goto C +// C: +// glob_ = !glob_ +// // no y increment +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto E +// } +// goto B +// D: +// glob_ = !glob_ +// y += 10 +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto Z +// } +// goto B +// E: +// glob_ = !glob_ +// // no y increment +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto Z +// } +// goto C +// Z: +// return y +// } + +// {f:BC_CD_BE_BZ_CZ101, +// maxin:32, blocks:[]blo{ +// blo{inc:0, cond:true, succs:[2]int64{1, 2}}, +// blo{inc:1, cond:true, succs:[2]int64{2, 3}}, +// blo{inc:0, cond:true, succs:[2]int64{1, 4}}, +// blo{inc:10, cond:true, succs:[2]int64{1, 25}}, +// blo{inc:0, cond:true, succs:[2]int64{2, 25}},}}, + +var labels string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func blocks(spec string) (blocks []string, fnameBase string) { + spec = strings.ToUpper(spec) + blocks = strings.Split(spec, ",") + fnameBase = strings.Replace(spec, ",", "_", -1) + return +} + +func makeFunctionFromFlowGraph(blocks []blo, fname string) string { + s := "" + + for j := range blocks { + // begin block + if j == 0 { + // block A, implicit label + s += ` +func ` + fname + `(x int64) int64 { + y := int64(0) + var b int64 + _ = b` + } else { + // block B,C, etc, explicit label w/ conditional increment + l := labels[j : j+1] + yeq := ` + // no y increment` + if blocks[j].inc != 0 { + yeq = ` + y += ` + fmt.Sprintf("%d", blocks[j].inc) + } + + s += ` +` + l + `: + glob = !glob` + yeq + } + + // edges to successors + if blocks[j].cond { // conditionally branch to second successor + s += ` + b = x & 1 + x = x >> 1 + if b != 0 {` + ` + goto ` + string(labels[blocks[j].succs[1]]) + ` + }` + + } + // branch to first successor + s += ` + goto ` + string(labels[blocks[j].succs[0]]) + } + + // end block (Z) + s += ` +Z: + return y +} +` + return s +} + +var graphs []string = []string{ + "Z", "BZ,Z", "B,BZ", "BZ,BZ", + "ZB,Z", "B,ZB", "ZB,BZ", "ZB,ZB", + + "BC,C,Z", "BC,BC,Z", "BC,BC,BZ", + "BC,Z,Z", "BC,ZC,Z", "BC,ZC,BZ", + "BZ,C,Z", "BZ,BC,Z", "BZ,CZ,Z", + "BZ,C,BZ", "BZ,BC,BZ", "BZ,CZ,BZ", + "BZ,C,CZ", "BZ,BC,CZ", "BZ,CZ,CZ", + + "BC,CD,BE,BZ,CZ", + "BC,BD,CE,CZ,BZ", + "BC,BD,CE,FZ,GZ,F,G", + "BC,BD,CE,FZ,GZ,G,F", + + "BC,DE,BE,FZ,FZ,Z", + "BC,DE,BE,FZ,ZF,Z", + "BC,DE,BE,ZF,FZ,Z", + "BC,DE,EB,FZ,FZ,Z", + "BC,ED,BE,FZ,FZ,Z", + "CB,DE,BE,FZ,FZ,Z", + + "CB,ED,BE,FZ,FZ,Z", + "BC,ED,EB,FZ,ZF,Z", + "CB,DE,EB,ZF,FZ,Z", + "CB,ED,EB,FZ,FZ,Z", + + "BZ,CD,CD,CE,BZ", + "EC,DF,FG,ZC,GB,BE,FD", + "BH,CF,DG,HE,BF,CG,DH,BZ", +} + +// blo describes a block in the generated/interpreted code +type blo struct { + inc int64 // increment amount + cond bool // block ends in conditional + succs [2]int64 +} + +// strings2blocks converts a slice of strings specifying +// successors into a slice of blo encoding the blocks in a +// common form easy to execute or interpret. +func strings2blocks(blocks []string, fname string, i int) (bs []blo, cond uint) { + bs = make([]blo, len(blocks)) + edge := int64(1) + cond = 0 + k := uint(0) + for j, s := range blocks { + if j == 0 { + } else { + if (i>>k)&1 != 0 { + bs[j].inc = edge + edge *= 10 + } + k++ + } + if len(s) > 1 { + bs[j].succs[1] = int64(blocks[j][1] - 'A') + bs[j].cond = true + cond++ + } + bs[j].succs[0] = int64(blocks[j][0] - 'A') + } + return bs, cond +} + +// fmtBlocks writes out the blocks for consumption in the generated test +func fmtBlocks(bs []blo) string { + s := "[]blo{" + for _, b := range bs { + s += fmt.Sprintf("blo{inc:%d, cond:%v, succs:[2]int64{%d, %d}},", b.inc, b.cond, b.succs[0], b.succs[1]) + } + s += "}" + return s +} + +func main() { + fmt.Printf(`// This is a machine-generated test file from flowgraph_generator1.go. +package main +import "fmt" +var glob bool +`) + s := "var funs []fun = []fun{" + for _, g := range graphs { + split, fnameBase := blocks(g) + nconfigs := 1 << uint(len(split)-1) + + for i := 0; i < nconfigs; i++ { + fname := fnameBase + fmt.Sprintf("%b", i) + bs, k := strings2blocks(split, fname, i) + fmt.Printf("%s", makeFunctionFromFlowGraph(bs, fname)) + s += ` + {f:` + fname + `, maxin:` + fmt.Sprintf("%d", 1<>1 + if c { + next = b.succs[1] + } + } + if next == last { + return y, true + } + j = next + } + return -1, false +} + +func main() { + sum := int64(0) + for i, f := range funs { + for x := int64(0); x < 16*f.maxin; x++ { + y, ok := interpret(f.blocks, x) + if ok { + yy := f.f(x) + if y != yy { + fmt.Printf("y(%d) != yy(%d), x=%b, i=%d, blocks=%v\n", y, yy, x, i, f.blocks) + return + } + sum += y + } + } + } +// fmt.Printf("Sum of all returns over all terminating inputs is %d\n", sum) +} +`) +} diff --git a/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go b/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go index ac1c8d93e8d..279c7bc7058 100644 --- a/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go +++ b/src/cmd/compile/internal/gc/testdata/gen/arithConstGen.go @@ -54,6 +54,9 @@ var ops = []op{ {"lsh", "<<"}, {"rsh", ">>"}, {"mod", "%"}, + {"and", "&"}, + {"or", "|"}, + {"xor", "^"}, } // compute the result of i op j, cast as type t. @@ -78,6 +81,12 @@ func ansU(i, j uint64, t, op string) string { ans = i << j case ">>": ans = i >> j + case "&": + ans = i & j + case "|": + ans = i | j + case "^": + ans = i ^ j } switch t { case "uint32": @@ -112,6 +121,12 @@ func ansS(i, j int64, t, op string) string { ans = i << uint64(j) case ">>": ans = i >> uint64(j) + case "&": + ans = i & j + case "|": + ans = i | j + case "^": + ans = i ^ j } switch t { case "int32": diff --git a/src/cmd/compile/internal/gc/typecheck.go b/src/cmd/compile/internal/gc/typecheck.go index 795bdcdd358..5285cb22d9f 100644 --- a/src/cmd/compile/internal/gc/typecheck.go +++ b/src/cmd/compile/internal/gc/typecheck.go @@ -7,7 +7,6 @@ package gc import ( "cmd/compile/internal/types" "cmd/internal/objabi" - "cmd/internal/src" "fmt" "math" "strings" @@ -244,21 +243,15 @@ func callrecvlist(l Nodes) bool { } // indexlit implements typechecking of untyped values as -// array/slice indexes. It is equivalent to defaultlit -// except for constants of numerical kind, which are acceptable -// whenever they can be represented by a value of type int. +// array/slice indexes. It is almost equivalent to defaultlit +// but also accepts untyped numeric values representable as +// value of type int (see also checkmake for comparison). // The result of indexlit MUST be assigned back to n, e.g. // n.Left = indexlit(n.Left) func indexlit(n *Node) *Node { - if n == nil || !n.Type.IsUntyped() { - return n + if n != nil && n.Type != nil && n.Type.Etype == TIDEAL { + return defaultlit(n, types.Types[TINT]) } - switch consttype(n) { - case CTINT, CTRUNE, CTFLT, CTCPLX: - n = defaultlit(n, types.Types[TINT]) - } - - n = defaultlit(n, nil) return n } @@ -266,7 +259,7 @@ func indexlit(n *Node) *Node { // n.Left = typecheck1(n.Left, top) func typecheck1(n *Node, top int) *Node { switch n.Op { - case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER: + case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, ORETJMP: // n.Sym is a field/method name, not a variable. default: if n.Sym != nil { @@ -285,7 +278,6 @@ func typecheck1(n *Node, top int) *Node { } ok := 0 -OpSwitch: switch n.Op { // until typecheck is complete, do nothing. default: @@ -300,11 +292,9 @@ OpSwitch: if n.Type == nil && n.Val().Ctype() == CTSTR { n.Type = types.Idealstring } - break OpSwitch case ONONAME: ok |= Erv - break OpSwitch case ONAME: if n.Name.Decldepth == 0 { @@ -312,7 +302,7 @@ OpSwitch: } if n.Etype != 0 { ok |= Ecall - break OpSwitch + break } if top&Easgn == 0 { @@ -327,7 +317,6 @@ OpSwitch: } ok |= Erv - break OpSwitch case OPACK: yyerror("use of package %v without selector", n.Sym) @@ -424,18 +413,7 @@ OpSwitch: } n.Op = OTYPE n.Type = types.NewMap(l.Type, r.Type) - - // map key validation - alg, bad := algtype1(l.Type) - if alg == ANOEQ { - if bad.Etype == TFORW { - // queue check for map until all the types are done settling. - mapqueue = append(mapqueue, mapqueueval{l, n.Pos}) - } else if bad.Etype != TANY { - // no need to queue, key is already bad - yyerror("invalid map key type %v", l.Type) - } - } + mapqueue = append(mapqueue, n) // check map keys when all types are settled n.Left = nil n.Right = nil @@ -505,7 +483,7 @@ OpSwitch: checkwidth(l.Type) } n.Left = nil - break OpSwitch + break } if !t.IsPtr() { @@ -515,12 +493,11 @@ OpSwitch: return n } - break OpSwitch + break } ok |= Erv n.Type = t.Elem() - break OpSwitch // arithmetic exprs case OASOP, @@ -597,7 +574,7 @@ OpSwitch: // the outer context gives the type n.Type = l.Type - break OpSwitch + break } // ideal mixed with non-ideal @@ -617,7 +594,7 @@ OpSwitch: if et == TIDEAL { et = TINT } - var aop Op = OXXX + aop := OXXX if iscmp[n.Op] && t.Etype != TIDEAL && !eqtype(l.Type, r.Type) { // comparison is okay as long as one side is // assignable to the other. convert so they have @@ -626,6 +603,7 @@ OpSwitch: // the only conversion that isn't a no-op is concrete == interface. // in that case, check comparability of the concrete type. // The conversion allocates, so only do it if the concrete type is huge. + converted := false if r.Type.Etype != TBLANK { aop = assignop(l.Type, r.Type, nil) if aop != 0 { @@ -644,11 +622,11 @@ OpSwitch: } t = r.Type - goto converted + converted = true } } - if l.Type.Etype != TBLANK { + if !converted && l.Type.Etype != TBLANK { aop = assignop(r.Type, l.Type, nil) if aop != 0 { if l.Type.IsInterface() && !r.Type.IsInterface() && !IsComparable(r.Type) { @@ -669,7 +647,6 @@ OpSwitch: } } - converted: et = t.Etype } @@ -781,7 +758,6 @@ OpSwitch: } n.Type = t - break OpSwitch case OCOM, OMINUS, ONOT, OPLUS: ok |= Erv @@ -799,7 +775,6 @@ OpSwitch: } n.Type = t - break OpSwitch // exprs case OADDR: @@ -840,7 +815,6 @@ OpSwitch: return n } n.Type = types.NewPtr(t) - break OpSwitch case OCOMPLIT: ok |= Erv @@ -848,7 +822,6 @@ OpSwitch: if n.Type == nil { return n } - break OpSwitch case OXDOT, ODOT: if n.Op == OXDOT { @@ -899,7 +872,7 @@ OpSwitch: n.Xoffset = 0 n.SetClass(PFUNC) ok = Erv - break OpSwitch + break } if t.IsPtr() && !t.Elem().IsInterface() { @@ -956,8 +929,6 @@ OpSwitch: ok |= Erv } - break OpSwitch - case ODOTTYPE: ok |= Erv n.Left = typecheck(n.Left, Erv) @@ -1003,8 +974,6 @@ OpSwitch: } } - break OpSwitch - case OINDEX: ok |= Erv n.Left = typecheck(n.Left, Erv) @@ -1066,8 +1035,6 @@ OpSwitch: n.Op = OINDEXMAP } - break OpSwitch - case ORECV: ok |= Etop | Erv n.Left = typecheck(n.Left, Erv) @@ -1091,16 +1058,13 @@ OpSwitch: } n.Type = t.Elem() - break OpSwitch case OSEND: ok |= Etop n.Left = typecheck(n.Left, Erv) - l := n.Left n.Right = typecheck(n.Right, Erv) n.Left = defaultlit(n.Left, nil) - l = n.Left - t := l.Type + t := n.Left.Type if t == nil { n.Type = nil return n @@ -1123,13 +1087,12 @@ OpSwitch: n.Type = nil return n } - n.Right = assignconv(r, l.Type.Elem(), "send") + n.Right = assignconv(r, t.Elem(), "send") // TODO: more aggressive n.Etype = 0 n.Type = nil - break OpSwitch case OSLICE, OSLICE3: ok |= Erv @@ -1204,7 +1167,6 @@ OpSwitch: n.Type = nil return n } - break OpSwitch // call and call like case OCALL: @@ -1296,11 +1258,11 @@ OpSwitch: typecheckaste(OCALL, n.Left, n.Isddd(), t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) }) ok |= Etop - if t.Results().NumFields() == 0 { - break OpSwitch + if t.NumResults() == 0 { + break } ok |= Erv - if t.Results().NumFields() == 1 { + if t.NumResults() == 1 { n.Type = l.Type.Results().Field(0).Type if n.Op == OCALLFUNC && n.Left.Op == ONAME && isRuntimePkg(n.Left.Sym.Pkg) && n.Left.Sym.Name == "getg" { @@ -1313,19 +1275,17 @@ OpSwitch: n.Op = OGETG } - break OpSwitch + break } // multiple return if top&(Efnstruct|Etop) == 0 { yyerror("multiple-value %v() in single-value context", l) - break OpSwitch + break } n.Type = l.Type.Results() - break OpSwitch - case OALIGNOF, OOFFSETOF, OSIZEOF: ok |= Erv if !onearg(n, "%v", n.Op) { @@ -1339,8 +1299,6 @@ OpSwitch: r.Orig = n n = &r - break OpSwitch - case OCAP, OLEN: ok |= Erv if !onearg(n, "%v", n.Op) { @@ -1391,7 +1349,6 @@ OpSwitch: } n.Type = types.Types[TINT] - break OpSwitch case OREAL, OIMAG: ok |= Erv @@ -1459,7 +1416,6 @@ OpSwitch: Fatalf("unexpected Etype: %v\n", et) } n.Type = types.Types[et] - break OpSwitch case OCOMPLEX: ok |= Erv @@ -1478,8 +1434,8 @@ OpSwitch: // Bail. This error will be reported elsewhere. return n } - if t.Results().NumFields() != 2 { - yyerror("invalid operation: complex expects two arguments, %v returns %d results", n.List.First(), t.Results().NumFields()) + if t.NumResults() != 2 { + yyerror("invalid operation: complex expects two arguments, %v returns %d results", n.List.First(), t.NumResults()) n.Type = nil return n } @@ -1541,7 +1497,6 @@ OpSwitch: } n.Type = t - break OpSwitch case OCLOSE: if !onearg(n, "%v", n.Op) { @@ -1569,7 +1524,6 @@ OpSwitch: } ok |= Etop - break OpSwitch case ODELETE: args := n.List @@ -1602,7 +1556,6 @@ OpSwitch: } args.SetSecond(assignconv(r, l.Type.Key(), "delete")) - break OpSwitch case OAPPEND: ok |= Erv @@ -1660,11 +1613,11 @@ OpSwitch: if t.Elem().IsKind(TUINT8) && args.Second().Type.IsString() { args.SetSecond(defaultlit(args.Second(), types.Types[TSTRING])) - break OpSwitch + break } args.SetSecond(assignconv(args.Second(), t.Orig, "append")) - break OpSwitch + break } if funarg != nil { @@ -1684,8 +1637,6 @@ OpSwitch: } } - break OpSwitch - case OCOPY: ok |= Etop | Erv args := n.List @@ -1721,7 +1672,7 @@ OpSwitch: // copy([]byte, string) if n.Left.Type.IsSlice() && n.Right.Type.IsString() { if eqtype(n.Left.Type.Elem(), types.Bytetype) { - break OpSwitch + break } yyerror("arguments to copy have different element types: %L and string", n.Left.Type) n.Type = nil @@ -1746,8 +1697,6 @@ OpSwitch: return n } - break OpSwitch - case OCONV: ok |= Erv saveorignode(n) @@ -1762,7 +1711,7 @@ OpSwitch: var why string n.Op = convertop(t, n.Type, &why) if n.Op == 0 { - if !n.Diag() && !n.Type.Broke() { + if !n.Diag() && !n.Type.Broke() && !n.Left.Diag() { yyerror("cannot convert %L to type %v%s", n.Left, n.Type, why) n.SetDiag(true) } @@ -1799,8 +1748,6 @@ OpSwitch: } } - break OpSwitch - case OMAKE: ok |= Erv args := n.List.Slice() @@ -1911,7 +1858,6 @@ OpSwitch: } n.Type = t - break OpSwitch case ONEW: ok |= Erv @@ -1937,7 +1883,6 @@ OpSwitch: n.Left = l n.Type = types.NewPtr(t) - break OpSwitch case OPRINT, OPRINTN: ok |= Etop @@ -1952,8 +1897,6 @@ OpSwitch: } } - break OpSwitch - case OPANIC: ok |= Etop if !onearg(n, "panic") { @@ -1966,7 +1909,6 @@ OpSwitch: n.Type = nil return n } - break OpSwitch case ORECOVER: ok |= Erv | Etop @@ -1977,7 +1919,6 @@ OpSwitch: } n.Type = types.Types[TINTER] - break OpSwitch case OCLOSURE: ok |= Erv @@ -1985,7 +1926,6 @@ OpSwitch: if n.Type == nil { return n } - break OpSwitch case OITAB: ok |= Erv @@ -1999,13 +1939,11 @@ OpSwitch: Fatalf("OITAB of %v", t) } n.Type = types.NewPtr(types.Types[TUINTPTR]) - break OpSwitch case OIDATA: // Whoever creates the OIDATA node must know a priori the concrete type at that moment, // usually by just having checked the OITAB. Fatalf("cannot typecheck interface data %v", n) - break OpSwitch case OSPTR: ok |= Erv @@ -2023,22 +1961,18 @@ OpSwitch: } else { n.Type = types.NewPtr(t.Elem()) } - break OpSwitch case OCLOSUREVAR: ok |= Erv - break OpSwitch case OCFUNC: ok |= Erv n.Left = typecheck(n.Left, Erv) n.Type = types.Types[TUINTPTR] - break OpSwitch case OCONVNOP: ok |= Erv n.Left = typecheck(n.Left, Erv) - break OpSwitch // statements case OAS: @@ -2050,23 +1984,20 @@ OpSwitch: if n.Left.Op == ONAME && n.Left.IsAutoTmp() { n.Left.Name.Defn = n } - break OpSwitch case OAS2: ok |= Etop typecheckas2(n) - break OpSwitch case OBREAK, OCONTINUE, ODCL, OEMPTY, OGOTO, - OXFALL, + OFALL, OVARKILL, OVARLIVE: ok |= Etop - break OpSwitch case OLABEL: ok |= Etop @@ -2078,7 +2009,6 @@ OpSwitch: n.Op = OEMPTY n.Left = nil } - break OpSwitch case ODEFER: ok |= Etop @@ -2086,13 +2016,11 @@ OpSwitch: if !n.Left.Diag() { checkdefergo(n) } - break OpSwitch case OPROC: ok |= Etop n.Left = typecheck(n.Left, Etop|Erv) checkdefergo(n) - break OpSwitch case OFOR, OFORUNTIL: ok |= Etop @@ -2108,7 +2036,6 @@ OpSwitch: n.Right = typecheck(n.Right, Etop) typecheckslice(n.Nbody.Slice(), Etop) decldepth-- - break OpSwitch case OIF: ok |= Etop @@ -2122,7 +2049,6 @@ OpSwitch: } typecheckslice(n.Nbody.Slice(), Etop) typecheckslice(n.Rlist.Slice(), Etop) - break OpSwitch case ORETURN: ok |= Etop @@ -2138,29 +2064,24 @@ OpSwitch: } if Curfn.Type.FuncType().Outnamed && n.List.Len() == 0 { - break OpSwitch + break } typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" }) - break OpSwitch case ORETJMP: ok |= Etop - break OpSwitch case OSELECT: ok |= Etop typecheckselect(n) - break OpSwitch case OSWITCH: ok |= Etop typecheckswitch(n) - break OpSwitch case ORANGE: ok |= Etop typecheckrange(n) - break OpSwitch case OTYPESW: yyerror("use of .(type) outside type switch") @@ -2171,17 +2092,14 @@ OpSwitch: ok |= Etop typecheckslice(n.List.Slice(), Erv) typecheckslice(n.Nbody.Slice(), Etop) - break OpSwitch case ODCLFUNC: ok |= Etop typecheckfunc(n) - break OpSwitch case ODCLCONST: ok |= Etop n.Left = typecheck(n.Left, Erv) - break OpSwitch case ODCLTYPE: ok |= Etop @@ -2193,7 +2111,6 @@ OpSwitch: // could silently propagate go:notinheap). yyerror("type %v must be go:notinheap", n.Left.Type) } - break OpSwitch } t := n.Type @@ -2208,7 +2125,7 @@ OpSwitch: } } - if safemode && !inimport && compiling_wrappers == 0 && t != nil && t.Etype == TUNSAFEPTR { + if safemode && !inimport && !compiling_wrappers && t != nil && t.Etype == TUNSAFEPTR { yyerror("cannot use unsafe.Pointer") } @@ -2624,68 +2541,66 @@ func hasddd(t *types.Type) bool { // typecheck assignment: type list = expression list func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, desc func() string) { var t *types.Type - var n *Node var n1 int var n2 int var i int lno := lineno + defer func() { lineno = lno }() if tstruct.Broke() { - goto out + return } - n = nil + var n *Node if nl.Len() == 1 { n = nl.First() - if n.Type != nil { - if n.Type.IsFuncArgStruct() { - if !hasddd(tstruct) { - n1 := tstruct.NumFields() - n2 := n.Type.NumFields() - if n2 > n1 { - goto toomany - } - if n2 < n1 { - goto notenough - } - } - - lfs := tstruct.FieldSlice() - rfs := n.Type.FieldSlice() - var why string - for i, tl := range lfs { - if tl.Isddd() { - for _, tn := range rfs[i:] { - if assignop(tn.Type, tl.Type.Elem(), &why) == 0 { - if call != nil { - yyerror("cannot use %v as type %v in argument to %v%s", tn.Type, tl.Type.Elem(), call, why) - } else { - yyerror("cannot use %v as type %v in %s%s", tn.Type, tl.Type.Elem(), desc(), why) - } - } - } - goto out - } - - if i >= len(rfs) { - goto notenough - } - tn := rfs[i] - if assignop(tn.Type, tl.Type, &why) == 0 { - if call != nil { - yyerror("cannot use %v as type %v in argument to %v%s", tn.Type, tl.Type, call, why) - } else { - yyerror("cannot use %v as type %v in %s%s", tn.Type, tl.Type, desc(), why) - } - } - } - - if len(rfs) > len(lfs) { + if n.Type != nil && n.Type.IsFuncArgStruct() { + if !hasddd(tstruct) { + n1 := tstruct.NumFields() + n2 := n.Type.NumFields() + if n2 > n1 { goto toomany } - goto out + if n2 < n1 { + goto notenough + } } + + lfs := tstruct.FieldSlice() + rfs := n.Type.FieldSlice() + var why string + for i, tl := range lfs { + if tl.Isddd() { + for _, tn := range rfs[i:] { + if assignop(tn.Type, tl.Type.Elem(), &why) == 0 { + if call != nil { + yyerror("cannot use %v as type %v in argument to %v%s", tn.Type, tl.Type.Elem(), call, why) + } else { + yyerror("cannot use %v as type %v in %s%s", tn.Type, tl.Type.Elem(), desc(), why) + } + } + } + return + } + + if i >= len(rfs) { + goto notenough + } + tn := rfs[i] + if assignop(tn.Type, tl.Type, &why) == 0 { + if call != nil { + yyerror("cannot use %v as type %v in argument to %v%s", tn.Type, tl.Type, call, why) + } else { + yyerror("cannot use %v as type %v in %s%s", tn.Type, tl.Type, desc(), why) + } + } + } + + if len(rfs) > len(lfs) { + goto toomany + } + return } } @@ -2729,7 +2644,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, if n.Type != nil { nl.SetIndex(i, assignconvfn(n, t, desc)) } - goto out + return } for ; i < nl.Len(); i++ { @@ -2739,8 +2654,7 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, nl.SetIndex(i, assignconvfn(n, t.Elem(), desc)) } } - - goto out + return } if i >= nl.Len() { @@ -2764,9 +2678,6 @@ func typecheckaste(op Op, call *Node, isddd bool, tstruct *types.Type, nl Nodes, yyerror("invalid use of ... in %v", op) } } - -out: - lineno = lno return notenough: @@ -2776,7 +2687,7 @@ notenough: // call is the expression being called, not the overall call. // Method expressions have the form T.M, and the compiler has // rewritten those to ONAME nodes but left T in Left. - if call.Op == ONAME && call.Left != nil && call.Left.Op == OTYPE { + if call.isMethodExpression() { yyerror("not enough arguments in call to method expression %v%s", call, details) } else { yyerror("not enough arguments in call to %v%s", call, details) @@ -2788,8 +2699,7 @@ notenough: n.SetDiag(true) } } - - goto out + return toomany: details := errorDetails(nl, tstruct, isddd) @@ -2798,7 +2708,6 @@ toomany: } else { yyerror("too many arguments to %v%s", op, details) } - goto out } func errorDetails(nl Nodes, tstruct *types.Type, isddd bool) string { @@ -3084,7 +2993,7 @@ func typecheckcomplit(n *Node) *Node { for i3, l := range n.List.Slice() { setlineno(l) if l.Op != OKEY { - n.List.SetIndex(i3, typecheck(n.List.Index(i3), Erv)) + n.List.SetIndex(i3, typecheck(l, Erv)) yyerror("missing key in map literal") continue } @@ -3111,7 +3020,7 @@ func typecheckcomplit(n *Node) *Node { // Need valid field offsets for Xoffset below. dowidth(t) - bad := 0 + errored := false if n.List.Len() != 0 && nokeys(n.List) { // simple list of variables ls := n.List.Slice() @@ -3120,10 +3029,10 @@ func typecheckcomplit(n *Node) *Node { n1 = typecheck(n1, Erv) ls[i] = n1 if i >= t.NumFields() { - if bad == 0 { + if !errored { yyerror("too many values in struct initializer") + errored = true } - bad++ continue } @@ -3180,17 +3089,21 @@ func typecheckcomplit(n *Node) *Node { } if l.Op != OSTRUCTKEY { - if bad == 0 { + if !errored { yyerror("mixture of field:value and value initializers") + errored = true } - bad++ ls[i] = typecheck(ls[i], Erv) continue } f := lookdot1(nil, l.Sym, t, t.Fields(), 0) if f == nil { - yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t) + if ci := lookdot1(nil, l.Sym, t, t.Fields(), 2); ci != nil { // Case-insensitive lookup. + yyerror("unknown field '%v' in struct literal of type %v (but does have %v)", l.Sym, t, ci.Sym) + } else { + yyerror("unknown field '%v' in struct literal of type %v", l.Sym, t) + } continue } fielddup(f.Sym.Name, hash) @@ -3493,7 +3406,7 @@ func typecheckas2(n *Node) { } mismatch: - yyerror("cannot assign %d values to %d variables", cr, cl) + yyerror("assignment mismatch: %d variables but %d values", cl, cr) // second half of dance out: @@ -3563,16 +3476,18 @@ func stringtoarraylit(n *Node) *Node { return nn } -var ntypecheckdeftype int +var mapqueue []*Node -type mapqueueval struct { - n *Node - lno src.XPos +func checkMapKeys() { + for _, n := range mapqueue { + k := n.Type.MapType().Key + if !k.Broke() && !IsComparable(k) { + yyerrorl(n.Pos, "invalid map key type %v", k) + } + } + mapqueue = nil } -// tracks the line numbers at which forward types are first used as map keys -var mapqueue []mapqueueval - func copytype(n *Node, t *types.Type) { if t.Etype == TFORW { // This type isn't computed yet; when it is, update n. @@ -3591,7 +3506,6 @@ func copytype(n *Node, t *types.Type) { t = n.Type t.Sym = n.Sym - t.SetLocal(n.Local()) if n.Name != nil { t.Vargen = n.Name.Vargen } @@ -3633,7 +3547,6 @@ func copytype(n *Node, t *types.Type) { } func typecheckdeftype(n *Node) { - ntypecheckdeftype++ lno := lineno setlineno(n) n.Type.Sym = n.Sym @@ -3643,39 +3556,18 @@ func typecheckdeftype(n *Node) { if t == nil { n.SetDiag(true) n.Type = nil - goto ret - } - - if n.Type == nil { + } else if n.Type == nil { n.SetDiag(true) - goto ret + } else { + // copy new type and clear fields + // that don't come along. + copytype(n, t) } - // copy new type and clear fields - // that don't come along. - copytype(n, t) - -ret: lineno = lno - - // if there are no type definitions going on, it's safe to - // try to validate the map key types for the interfaces - // we just read. - if ntypecheckdeftype == 1 { - for _, e := range mapqueue { - lineno = e.lno - if !IsComparable(e.n.Type) { - yyerror("invalid map key type %v", e.n.Type) - } - } - mapqueue = nil - lineno = lno - } - - ntypecheckdeftype-- } -func typecheckdef(n *Node) *Node { +func typecheckdef(n *Node) { lno := lineno setlineno(n) @@ -3691,11 +3583,11 @@ func typecheckdef(n *Node) *Node { yyerror("undefined: %v", n.Sym) } - return n + return } if n.Walkdef() == 1 { - return n + return } typecheckdefstack = append(typecheckdefstack, n) @@ -3861,7 +3753,6 @@ ret: lineno = lno n.SetWalkdef(1) - return n } func checkmake(t *types.Type, arg string, n *Node) bool { @@ -3886,6 +3777,10 @@ func checkmake(t *types.Type, arg string, n *Node) bool { } // defaultlit is necessary for non-constants too: n might be 1.1< 0) { + // var bv bmap + bv := temp(bmap(t)) - r = nod(OAS, var_, nil) // zero temp - r = typecheck(r, Etop) - init.Append(r) - r = nod(OADDR, var_, nil) + zero = nod(OAS, bv, nil) + zero = typecheck(zero, Etop) + init.Append(zero) + + // b = &bv + b := nod(OADDR, bv, nil) + + // h.buckets = b + bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap + na := nod(OAS, nodSym(ODOT, h, bsym), b) + na = typecheck(na, Etop) + init.Append(na) + } } - fn := syslook("makemap") - fn = substArgTypes(fn, hmap(t), mapbucket(t), t.Key(), t.Val()) - n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64]), a, r) + if Isconst(hint, CTINT) && hint.Val().U.(*Mpint).CmpInt64(BUCKETSIZE) <= 0 { + // Handling make(map[any]any) and + // make(map[any]any, hint) where hint <= BUCKETSIZE + // special allows for faster map initialization and + // improves binary size by using calls with fewer arguments. + // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false + // and no buckets will be allocated by makemap. Therefore, + // no buckets need to be allocated in this code path. + if n.Esc == EscNone { + // Only need to initialize h.hash0 since + // hmap h has been allocated on the stack already. + // h.hash0 = fastrand() + rand := mkcall("fastrand", types.Types[TUINT32], init) + hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap + a := nod(OAS, nodSym(ODOT, h, hashsym), rand) + a = typecheck(a, Etop) + a = walkexpr(a, init) + init.Append(a) + n = nod(OCONVNOP, h, nil) + n.Type = t + n = typecheck(n, Erv) + } else { + // Call runtime.makehmap to allocate an + // hmap on the heap and initialize hmap's hash0 field. + fn := syslook("makemap_small") + fn = substArgTypes(fn, t.Key(), t.Val()) + n = mkcall1(fn, n.Type, init) + } + } else { + if n.Esc != EscNone { + h = nodnil() + } + // Map initialization with a variable or large hint is + // more complicated. We therefore generate a call to + // runtime.makemap to intialize hmap and allocate the + // map buckets. + + // When hint fits into int, use makemap instead of + // makemap64, which is faster and shorter on 32 bit platforms. + fnname := "makemap64" + argtype := types.Types[TINT64] + + // Type checking guarantees that TIDEAL hint is positive and fits in an int. + // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. + // The case of hint overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makemap during runtime. + if hint.Type.IsKind(TIDEAL) || maxintval[hint.Type.Etype].Cmp(maxintval[TUINT]) <= 0 { + fnname = "makemap" + argtype = types.Types[TINT] + } + + fn := syslook(fnname) + fn = substArgTypes(fn, hmapType, t.Key(), t.Val()) + n = mkcall1(fn, n.Type, init, typename(n.Type), conv(hint, argtype), h) + } case OMAKESLICE: l := n.Left @@ -1487,7 +1569,7 @@ opswitch: fnname := "makeslice64" argtype := types.Types[TINT64] - // typechecking guarantees that TIDEAL len/cap are positive and fit in an int. + // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT // will be handled by the negative range checks in makeslice during runtime. if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) && @@ -1667,7 +1749,7 @@ opswitch: // Emit string symbol now to avoid emitting // any concurrently during the backend. if s, ok := n.Val().U.(string); ok { - _ = stringsym(s) + _ = stringsym(n.Pos, s) } } @@ -1699,7 +1781,7 @@ func reduceSlice(n *Node) *Node { return n } -func ascompatee1(op Op, l *Node, r *Node, init *Nodes) *Node { +func ascompatee1(l *Node, r *Node, init *Nodes) *Node { // convas will turn map assigns into function calls, // making it impossible for reorder3 to work. n := nod(OAS, l, r) @@ -1713,7 +1795,7 @@ func ascompatee1(op Op, l *Node, r *Node, init *Nodes) *Node { func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { // check assign expression list to - // a expression list. called in + // an expression list. called in // expr-list = expr-list // ensure order of evaluation for function calls @@ -1734,7 +1816,7 @@ func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { if op == ORETURN && samesafeexpr(nl[i], nr[i]) { continue } - nn = append(nn, ascompatee1(op, nl[i], nr[i], init)) + nn = append(nn, ascompatee1(nl[i], nr[i], init)) } // cannot happen: caller checked that lists had same length @@ -1755,9 +1837,6 @@ func fncall(l *Node, rt *types.Type) bool { if l.HasCall() || l.Op == OINDEXMAP { return true } - if needwritebarrier(l) { - return true - } if eqtype(l.Type, rt) { return false } @@ -1765,9 +1844,9 @@ func fncall(l *Node, rt *types.Type) bool { } // check assign type list to -// a expression list. called in +// an expression list. called in // expr-list = func() -func ascompatet(op Op, nl Nodes, nr *types.Type) []*Node { +func ascompatet(nl Nodes, nr *types.Type) []*Node { if nl.Len() != nr.NumFields() { Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) } @@ -1949,8 +2028,6 @@ func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node // return expr-list // func(expr-list) func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, init *Nodes) []*Node { - var nn []*Node - // f(g()) where g has multiple return values if len(rhs) == 1 && rhs[0].Type.IsFuncArgStruct() { // optimization - can do block copy @@ -1958,8 +2035,9 @@ func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, in nl := nodarg(lhs, fp) nr := nod(OCONVNOP, rhs[0], nil) nr.Type = nl.Type - nn = []*Node{convas(nod(OAS, nl, nr), init)} - goto ret + n := convas(nod(OAS, nl, nr), init) + n.SetTypecheck(1) + return []*Node{n} } // conversions involved. @@ -1983,6 +2061,7 @@ func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, in // If there's a ... parameter (which is only valid as the final // parameter) and this is not a ... call expression, // then assign the remaining arguments as a slice. + var nn []*Node for i, nl := range lhs.FieldSlice() { var nr *Node if nl.Isddd() && !isddd { @@ -1993,41 +2072,53 @@ func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, in a := nod(OAS, nodarg(nl, fp), nr) a = convas(a, init) + a.SetTypecheck(1) nn = append(nn, a) } -ret: - for _, n := range nn { - n.SetTypecheck(1) - } return nn } // generate code for print func walkprint(nn *Node, init *Nodes) *Node { - var r *Node - var n *Node - var on *Node - var t *types.Type - var et types.EType - - op := nn.Op - all := nn.List - var calls []*Node - notfirst := false - // Hoist all the argument evaluation up before the lock. - walkexprlistcheap(all.Slice(), init) + walkexprlistcheap(nn.List.Slice(), init) - calls = append(calls, mkcall("printlock", nil, init)) - for i1, n1 := range all.Slice() { - if notfirst { - calls = append(calls, mkcall("printsp", nil, init)) + // For println, add " " between elements and "\n" at the end. + if nn.Op == OPRINTN { + s := nn.List.Slice() + t := make([]*Node, 0, len(s)*2) + for i, n := range s { + if i != 0 { + t = append(t, nodstr(" ")) + } + t = append(t, n) } + t = append(t, nodstr("\n")) + nn.List.Set(t) + } - notfirst = op == OPRINTN + // Collapse runs of constant strings. + s := nn.List.Slice() + t := make([]*Node, 0, len(s)) + for i := 0; i < len(s); { + var strs []string + for i < len(s) && Isconst(s[i], CTSTR) { + strs = append(strs, s[i].Val().U.(string)) + i++ + } + if len(strs) > 0 { + t = append(t, nodstr(strings.Join(strs, ""))) + } + if i < len(s) { + t = append(t, s[i]) + i++ + } + } + nn.List.Set(t) - n = n1 + calls := []*Node{mkcall("printlock", nil, init)} + for i, n := range nn.List.Slice() { if n.Op == OLITERAL { switch n.Val().Ctype() { case CTRUNE: @@ -2045,71 +2136,76 @@ func walkprint(nn *Node, init *Nodes) *Node { n = defaultlit(n, types.Types[TINT64]) } n = defaultlit(n, nil) - all.SetIndex(i1, n) + nn.List.SetIndex(i, n) if n.Type == nil || n.Type.Etype == TFORW { continue } - t = n.Type - et = n.Type.Etype - if n.Type.IsInterface() { + var on *Node + switch n.Type.Etype { + case TINTER: if n.Type.IsEmptyInterface() { on = syslook("printeface") } else { on = syslook("printiface") } on = substArgTypes(on, n.Type) // any-1 - } else if n.Type.IsPtr() || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR { + case TPTR32, TPTR64, TCHAN, TMAP, TFUNC, TUNSAFEPTR: on = syslook("printpointer") on = substArgTypes(on, n.Type) // any-1 - } else if n.Type.IsSlice() { + case TSLICE: on = syslook("printslice") on = substArgTypes(on, n.Type) // any-1 - } else if isInt[et] { - if et == TUINT64 { - if isRuntimePkg(t.Sym.Pkg) && t.Sym.Name == "hex" { - on = syslook("printhex") - } else { - on = syslook("printuint") - } + case TUINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINTPTR: + if isRuntimePkg(n.Type.Sym.Pkg) && n.Type.Sym.Name == "hex" { + on = syslook("printhex") } else { - on = syslook("printint") + on = syslook("printuint") } - } else if isFloat[et] { + case TINT, TINT8, TINT16, TINT32, TINT64: + on = syslook("printint") + case TFLOAT32, TFLOAT64: on = syslook("printfloat") - } else if isComplex[et] { + case TCOMPLEX64, TCOMPLEX128: on = syslook("printcomplex") - } else if et == TBOOL { + case TBOOL: on = syslook("printbool") - } else if et == TSTRING { - on = syslook("printstring") - } else { + case TSTRING: + cs := "" + if Isconst(n, CTSTR) { + cs = n.Val().U.(string) + } + switch cs { + case " ": + on = syslook("printsp") + case "\n": + on = syslook("printnl") + default: + on = syslook("printstring") + } + default: badtype(OPRINT, n.Type, nil) continue } - t = on.Type.Params().Field(0).Type - - if !eqtype(t, n.Type) { - n = nod(OCONV, n, nil) - n.Type = t + r := nod(OCALL, on, nil) + if params := on.Type.Params().FieldSlice(); len(params) > 0 { + t := params[0].Type + if !eqtype(t, n.Type) { + n = nod(OCONV, n, nil) + n.Type = t + } + r.List.Append(n) } - - r = nod(OCALL, on, nil) - r.List.Append(n) calls = append(calls, r) } - if op == OPRINTN { - calls = append(calls, mkcall("printnl", nil, nil)) - } - calls = append(calls, mkcall("printunlock", nil, init)) typecheckslice(calls, Etop) walkexprlist(calls, init) - r = nod(OEMPTY, nil, nil) + r := nod(OEMPTY, nil, nil) r = typecheck(r, Etop) r = walkexpr(r, init) r.Ninit.Set(calls) @@ -2129,36 +2225,13 @@ func callnew(t *types.Type) *Node { } func iscallret(n *Node) bool { + if n == nil { + return false + } n = outervalue(n) return n.Op == OINDREGSP } -func isstack(n *Node) bool { - n = outervalue(n) - - // If n is *autotmp and autotmp = &foo, replace n with foo. - // We introduce such temps when initializing struct literals. - if n.Op == OIND && n.Left.Op == ONAME && n.Left.IsAutoTmp() { - defn := n.Left.Name.Defn - if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR { - n = defn.Right.Left - } - } - - switch n.Op { - case OINDREGSP: - return true - - case ONAME: - switch n.Class() { - case PAUTO, PPARAM, PPARAMOUT: - return true - } - } - - return false -} - // isReflectHeaderDataField reports whether l is an expression p.Data // where p has type reflect.SliceHeader or reflect.StringHeader. func isReflectHeaderDataField(l *Node) bool { @@ -2182,74 +2255,27 @@ func isReflectHeaderDataField(l *Node) bool { return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" } -// Do we need a write barrier for assigning to l? -func needwritebarrier(l *Node) bool { - if !use_writebarrier { - return false - } - - if l == nil || isblank(l) { - return false - } - - // No write barrier for write to stack. - if isstack(l) { - return false - } - - // Package unsafe's documentation says storing pointers into - // reflect.SliceHeader and reflect.StringHeader's Data fields - // is valid, even though they have type uintptr (#19168). - if isReflectHeaderDataField(l) { - return true - } - - // No write barrier for write of non-pointers. - dowidth(l.Type) - if !types.Haspointers(l.Type) { - return false - } - - // No write barrier if this is a pointer to a go:notinheap - // type, since the write barrier's inheap(ptr) check will fail. - if l.Type.IsPtr() && l.Type.Elem().NotInHeap() { - return false - } - - // TODO: We can eliminate write barriers if we know *both* the - // current and new content of the slot must already be shaded. - // We know a pointer is shaded if it's nil, or points to - // static data, a global (variable or function), or the stack. - // The nil optimization could be particularly useful for - // writes to just-allocated objects. Unfortunately, knowing - // the "current" value of the slot requires flow analysis. - - // Otherwise, be conservative and use write barrier. - return true -} - func convas(n *Node, init *Nodes) *Node { if n.Op != OAS { Fatalf("convas: not OAS %v", n.Op) } + defer updateHasCall(n) n.SetTypecheck(1) - var lt *types.Type - var rt *types.Type if n.Left == nil || n.Right == nil { - goto out + return n } - lt = n.Left.Type - rt = n.Right.Type + lt := n.Left.Type + rt := n.Right.Type if lt == nil || rt == nil { - goto out + return n } if isblank(n.Left) { n.Right = defaultlit(n.Right, nil) - goto out + return n } if !eqtype(lt, rt) { @@ -2258,8 +2284,6 @@ func convas(n *Node, init *Nodes) *Node { } dowidth(n.Right.Type) -out: - updateHasCall(n) return n } @@ -2270,18 +2294,18 @@ out: // then it is done first. otherwise must // make temp variables func reorder1(all []*Node) []*Node { - c := 0 // function calls - t := 0 // total parameters - - for _, n := range all { - t++ - updateHasCall(n) - if n.HasCall() { - c++ - } + if len(all) == 1 { + return all } - if c == 0 || t == 1 { + funcCalls := 0 + for _, n := range all { + updateHasCall(n) + if n.HasCall() { + funcCalls++ + } + } + if funcCalls == 0 { return all } @@ -2289,7 +2313,6 @@ func reorder1(all []*Node) []*Node { var f *Node // last fncall assigned to stack var r []*Node // non fncalls and tempnames assigned to stack d := 0 - var a *Node for _, n := range all { if !n.HasCall() { r = append(r, n) @@ -2297,13 +2320,13 @@ func reorder1(all []*Node) []*Node { } d++ - if d == c { + if d == funcCalls { f = n continue } // make assignment of fncall to tempname - a = temp(n.Right.Type) + a := temp(n.Right.Type) a = nod(OAS, a, n.Right) g = append(g, a) @@ -2328,8 +2351,6 @@ func reorder1(all []*Node) []*Node { // // function calls have been removed. func reorder3(all []*Node) []*Node { - var l *Node - // If a needed expression may be affected by an // earlier assignment, make an early copy of that // expression and use the copy instead. @@ -2337,7 +2358,7 @@ func reorder3(all []*Node) []*Node { var mapinit Nodes for i, n := range all { - l = n.Left + l := n.Left // Save subexpressions needed on left side. // Drill through non-dereferences. @@ -2404,23 +2425,21 @@ func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node { // outer value means containing struct or array. func outervalue(n *Node) *Node { for { - if n.Op == OXDOT { + switch n.Op { + case OXDOT: Fatalf("OXDOT in walk") - } - if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP { + case ODOT, OPAREN, OCONVNOP: n = n.Left continue + case OINDEX: + if n.Left.Type != nil && n.Left.Type.IsArray() { + n = n.Left + continue + } } - if n.Op == OINDEX && n.Left.Type != nil && n.Left.Type.IsArray() { - n = n.Left - continue - } - - break + return n } - - return n } // Is it possible that the computation of n might be @@ -2441,30 +2460,28 @@ func aliased(n *Node, all []*Node, i int) bool { // Also record whether there are any writes to main memory. // Also record whether there are any writes to variables // whose addresses have been taken. - memwrite := 0 - - varwrite := 0 - var a *Node + memwrite := false + varwrite := false for _, an := range all[:i] { - a = outervalue(an.Left) + a := outervalue(an.Left) for a.Op == ODOT { a = a.Left } if a.Op != ONAME { - memwrite = 1 + memwrite = true continue } switch n.Class() { default: - varwrite = 1 + varwrite = true continue case PAUTO, PPARAM, PPARAMOUT: if n.Addrtaken() { - varwrite = 1 + varwrite = true continue } @@ -2480,7 +2497,7 @@ func aliased(n *Node, all []*Node, i int) bool { // that are being written. // If no computed addresses are affected by the writes, no aliasing. - if memwrite == 0 && varwrite == 0 { + if !memwrite && !varwrite { return false } @@ -2703,11 +2720,14 @@ func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node { Fatalf("mkcall %v %v", fn, fn.Type) } - n := fn.Type.Params().NumFields() + n := fn.Type.NumParams() + if n != len(va) { + Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) + } r := nod(OCALL, fn, nil) - r.List.Set(va[:n]) - if fn.Type.Results().NumFields() > 0 { + r.List.Set(va) + if fn.Type.NumResults() > 0 { r = typecheck(r, Erv|Efnstruct) } else { r = typecheck(r, Etop) @@ -2785,21 +2805,23 @@ func mapfndel(name string, t *types.Type) *Node { const ( mapslow = iota mapfast32 + mapfast32ptr mapfast64 + mapfast64ptr mapfaststr nmapfast ) type mapnames [nmapfast]string -func mkmapnames(base string) mapnames { - return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"} +func mkmapnames(base string, ptr string) mapnames { + return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"} } -var mapaccess1 mapnames = mkmapnames("mapaccess1") -var mapaccess2 mapnames = mkmapnames("mapaccess2") -var mapassign mapnames = mkmapnames("mapassign") -var mapdelete mapnames = mkmapnames("mapdelete") +var mapaccess1 = mkmapnames("mapaccess1", "") +var mapaccess2 = mkmapnames("mapaccess2", "") +var mapassign = mkmapnames("mapassign", "ptr") +var mapdelete = mkmapnames("mapdelete", "") func mapfast(t *types.Type) int { // Check ../../runtime/hashmap.go:maxValueSize before changing. @@ -2808,9 +2830,22 @@ func mapfast(t *types.Type) int { } switch algtype(t.Key()) { case AMEM32: - return mapfast32 + if !t.Key().HasHeapPointer() { + return mapfast32 + } + if Widthptr == 4 { + return mapfast32ptr + } + Fatalf("small pointer %v", t.Key()) case AMEM64: - return mapfast64 + if !t.Key().HasHeapPointer() { + return mapfast64 + } + if Widthptr == 8 { + return mapfast64ptr + } + // Two-word object, at least one of which is a pointer. + // Use the slow path. case ASTRING: return mapfaststr } @@ -2942,12 +2977,13 @@ func appendslice(n *Node, init *Nodes) *Node { nt.Etype = 1 l = append(l, nod(OAS, s, nt)) - if types.Haspointers(l1.Type.Elem()) { + if l1.Type.Elem().HasHeapPointer() { // copy(s[len(l1):], l2) nptr1 := nod(OSLICE, s, nil) nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) nptr1.Etype = 1 nptr2 := l2 + Curfn.Func.setWBPos(n.Pos) fn := syslook("typedslicecopy") fn = substArgTypes(fn, l1.Type, l2.Type) var ln Nodes @@ -2961,16 +2997,20 @@ func appendslice(n *Node, init *Nodes) *Node { nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) nptr1.Etype = 1 nptr2 := l2 - var fn *Node - if l2.Type.IsString() { - fn = syslook("slicestringcopy") - } else { - fn = syslook("slicecopy") - } - fn = substArgTypes(fn, l1.Type, l2.Type) + var ln Nodes ln.Set(l) - nt := mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width)) + var nt *Node + if l2.Type.IsString() { + fn := syslook("slicestringcopy") + fn = substArgTypes(fn, l1.Type, l2.Type) + nt = mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2) + } else { + fn := syslook("slicecopy") + fn = substArgTypes(fn, l1.Type, l2.Type) + nt = mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width)) + } + l = append(ln.Slice(), nt) } else { // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) @@ -3105,18 +3145,20 @@ func walkappend(n *Node, init *Nodes, dst *Node) *Node { // Also works if b is a string. // func copyany(n *Node, init *Nodes, runtimecall bool) *Node { - if types.Haspointers(n.Left.Type.Elem()) { + if n.Left.Type.Elem().HasHeapPointer() { + Curfn.Func.setWBPos(n.Pos) fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type) return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), n.Left, n.Right) } if runtimecall { - var fn *Node if n.Right.Type.IsString() { - fn = syslook("slicestringcopy") - } else { - fn = syslook("slicecopy") + fn := syslook("slicestringcopy") + fn = substArgTypes(fn, n.Left.Type, n.Right.Type) + return mkcall1(fn, n.Type, init, n.Left, n.Right) } + + fn := syslook("slicecopy") fn = substArgTypes(fn, n.Left.Type, n.Right.Type) return mkcall1(fn, n.Type, init, n.Left, n.Right, nodintconst(n.Left.Type.Elem().Width)) } @@ -3159,7 +3201,7 @@ func copyany(n *Node, init *Nodes, runtimecall bool) *Node { return nlen } -func eqfor(t *types.Type, needsize *int) *Node { +func eqfor(t *types.Type) (n *Node, needsize bool) { // Should only arrive here with large memory or // a struct/array containing a non-memory field/element. // Small memory is handled inline, and single non-memory @@ -3168,8 +3210,7 @@ func eqfor(t *types.Type, needsize *int) *Node { case AMEM: n := syslook("memequal") n = substArgTypes(n, t, t) - *needsize = 1 - return n + return n, true case ASPECIAL: sym := typesymprefix(".eq", t) n := newname(sym) @@ -3180,11 +3221,10 @@ func eqfor(t *types.Type, needsize *int) *Node { ntype.Rlist.Append(anonfield(types.Types[TBOOL])) ntype = typecheck(ntype, Etype) n.Type = ntype.Type - *needsize = 0 - return n + return n, false } Fatalf("eqfor %v", t) - return nil + return nil, false } // The result of walkcompare MUST be assigned back to n, e.g. @@ -3304,11 +3344,11 @@ func walkcompare(n *Node, init *Nodes) *Node { ar = typecheck(ar, Etop) init.Append(ar) - var needsize int - call := nod(OCALL, eqfor(t, &needsize), nil) + fn, needsize := eqfor(t) + call := nod(OCALL, fn, nil) call.List.Append(pl) call.List.Append(pr) - if needsize != 0 { + if needsize { call.List.Append(nodintconst(t.Width)) } res := call @@ -3623,16 +3663,16 @@ func usemethod(n *Node) { // // TODO(crawshaw): improve precision of match by working out // how to check the method name. - if n := t.Params().NumFields(); n != 1 { + if n := t.NumParams(); n != 1 { return } - if n := t.Results().NumFields(); n != 1 && n != 2 { + if n := t.NumResults(); n != 1 && n != 2 { return } p0 := t.Params().Field(0) res0 := t.Results().Field(0) var res1 *types.Field - if t.Results().NumFields() == 2 { + if t.NumResults() == 2 { res1 = t.Results().Field(1) } @@ -3827,14 +3867,10 @@ func walkprintfunc(n *Node, init *Nodes) *Node { } t := nod(OTFUNC, nil, nil) - num := 0 var printargs []*Node - var a *Node - var buf string - for _, n1 := range n.List.Slice() { - buf = fmt.Sprintf("a%d", num) - num++ - a = namedfield(buf, n1.Type) + for i, n1 := range n.List.Slice() { + buf := fmt.Sprintf("a%d", i) + a := namedfield(buf, n1.Type) t.List.Append(a) printargs = append(printargs, a.Left) } @@ -3846,14 +3882,14 @@ func walkprintfunc(n *Node, init *Nodes) *Node { sym := lookupN("print·%d", walkprintfunc_prgen) fn := dclfunc(sym, t) - a = nod(n.Op, nil, nil) + a := nod(n.Op, nil, nil) a.List.Set(printargs) a = typecheck(a, Etop) a = walkstmt(a) fn.Nbody.Set1(a) - funcbody(fn) + funcbody() fn = typecheck(fn, Etop) typecheckslice(fn.Nbody.Slice(), Etop) diff --git a/src/cmd/compile/internal/mips/galign.go b/src/cmd/compile/internal/mips/galign.go index 77ec78aabfa..f207a17bbf0 100644 --- a/src/cmd/compile/internal/mips/galign.go +++ b/src/cmd/compile/internal/mips/galign.go @@ -18,6 +18,7 @@ func Init(arch *gc.Arch) { } arch.REGSP = mips.REGSP arch.MAXWIDTH = (1 << 31) - 1 + arch.SoftFloat = (objabi.GOMIPS == "softfloat") arch.ZeroRange = zerorange arch.ZeroAuto = zeroAuto arch.Ginsnop = ginsnop diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go index e65515a85b0..ee68afdfa3e 100644 --- a/src/cmd/compile/internal/mips/ssa.go +++ b/src/cmd/compile/internal/mips/ssa.go @@ -283,10 +283,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) - case *ssa.ExternSymbol: + case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ssa.ArgSymbol, *ssa.AutoSymbol: + case *gc.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: @@ -755,6 +755,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpMIPSLoweredGetClosurePtr: // Closure pointer is R22 (mips.REGCTXT). gc.CheckLoweredGetClosurePtr(v) + case ssa.OpMIPSLoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(mips.AMOVW) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpClobber: // TODO: implement for clobberdead experiment. Nop is ok for now. default: diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go index db163f3e9d0..291a162d1fa 100644 --- a/src/cmd/compile/internal/mips64/ssa.go +++ b/src/cmd/compile/internal/mips64/ssa.go @@ -257,10 +257,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) - case *ssa.ExternSymbol: + case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ssa.ArgSymbol, *ssa.AutoSymbol: + case *gc.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: @@ -483,6 +483,211 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { gc.Patch(p6, p2) case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter: s.Call(v) + case ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64: + as := mips.AMOVV + if v.Op == ssa.OpMIPS64LoweredAtomicLoad32 { + as = mips.AMOVW + } + s.Prog(mips.ASYNC) + p := s.Prog(as) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + s.Prog(mips.ASYNC) + case ssa.OpMIPS64LoweredAtomicStore32, ssa.OpMIPS64LoweredAtomicStore64: + as := mips.AMOVV + if v.Op == ssa.OpMIPS64LoweredAtomicStore32 { + as = mips.AMOVW + } + s.Prog(mips.ASYNC) + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + s.Prog(mips.ASYNC) + case ssa.OpMIPS64LoweredAtomicStorezero32, ssa.OpMIPS64LoweredAtomicStorezero64: + as := mips.AMOVV + if v.Op == ssa.OpMIPS64LoweredAtomicStorezero32 { + as = mips.AMOVW + } + s.Prog(mips.ASYNC) + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + s.Prog(mips.ASYNC) + case ssa.OpMIPS64LoweredAtomicExchange32, ssa.OpMIPS64LoweredAtomicExchange64: + // SYNC + // MOVV Rarg1, Rtmp + // LL (Rarg0), Rout + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + ll := mips.ALLV + sc := mips.ASCV + if v.Op == ssa.OpMIPS64LoweredAtomicExchange32 { + ll = mips.ALL + sc = mips.ASC + } + s.Prog(mips.ASYNC) + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = mips.REGTMP + p1 := s.Prog(ll) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = v.Args[0].Reg() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = v.Reg0() + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + gc.Patch(p3, p) + s.Prog(mips.ASYNC) + case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64: + // SYNC + // LL (Rarg0), Rout + // ADDV Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDV Rarg1, Rout + ll := mips.ALLV + sc := mips.ASCV + if v.Op == ssa.OpMIPS64LoweredAtomicAdd32 { + ll = mips.ALL + sc = mips.ASC + } + s.Prog(mips.ASYNC) + p := s.Prog(ll) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + p1 := s.Prog(mips.AADDVU) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = v.Args[1].Reg() + p1.Reg = v.Reg0() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + gc.Patch(p3, p) + s.Prog(mips.ASYNC) + p4 := s.Prog(mips.AADDVU) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Args[1].Reg() + p4.Reg = v.Reg0() + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Reg0() + case ssa.OpMIPS64LoweredAtomicAddconst32, ssa.OpMIPS64LoweredAtomicAddconst64: + // SYNC + // LL (Rarg0), Rout + // ADDV $auxint, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDV $auxint, Rout + ll := mips.ALLV + sc := mips.ASCV + if v.Op == ssa.OpMIPS64LoweredAtomicAddconst32 { + ll = mips.ALL + sc = mips.ASC + } + s.Prog(mips.ASYNC) + p := s.Prog(ll) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + p1 := s.Prog(mips.AADDVU) + p1.From.Type = obj.TYPE_CONST + p1.From.Offset = v.AuxInt + p1.Reg = v.Reg0() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + p2 := s.Prog(sc) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + p3 := s.Prog(mips.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = mips.REGTMP + p3.To.Type = obj.TYPE_BRANCH + gc.Patch(p3, p) + s.Prog(mips.ASYNC) + p4 := s.Prog(mips.AADDVU) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = v.AuxInt + p4.Reg = v.Reg0() + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Reg0() + case ssa.OpMIPS64LoweredAtomicCas32, ssa.OpMIPS64LoweredAtomicCas64: + // MOVV $0, Rout + // SYNC + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVV Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // SYNC + ll := mips.ALLV + sc := mips.ASCV + if v.Op == ssa.OpMIPS64LoweredAtomicCas32 { + ll = mips.ALL + sc = mips.ASC + } + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_REG + p.From.Reg = mips.REGZERO + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + s.Prog(mips.ASYNC) + p1 := s.Prog(ll) + p1.From.Type = obj.TYPE_MEM + p1.From.Reg = v.Args[0].Reg() + p1.To.Type = obj.TYPE_REG + p1.To.Reg = mips.REGTMP + p2 := s.Prog(mips.ABNE) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = v.Args[1].Reg() + p2.Reg = mips.REGTMP + p2.To.Type = obj.TYPE_BRANCH + p3 := s.Prog(mips.AMOVV) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[2].Reg() + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Reg0() + p4 := s.Prog(sc) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = v.Reg0() + p4.To.Type = obj.TYPE_MEM + p4.To.Reg = v.Args[0].Reg() + p5 := s.Prog(mips.ABEQ) + p5.From.Type = obj.TYPE_REG + p5.From.Reg = v.Reg0() + p5.To.Type = obj.TYPE_BRANCH + gc.Patch(p5, p1) + p6 := s.Prog(mips.ASYNC) + gc.Patch(p2, p6) case ssa.OpMIPS64LoweredNilCheck: // Issue a load which will fault if arg is nil. p := s.Prog(mips.AMOVB) @@ -520,6 +725,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpMIPS64LoweredGetClosurePtr: // Closure pointer is R22 (mips.REGCTXT). gc.CheckLoweredGetClosurePtr(v) + case ssa.OpMIPS64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(mips.AMOVV) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpClobber: // TODO: implement for clobberdead experiment. Nop is ok for now. default: diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index 5fe140fdcf1..008d9658f4c 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -124,7 +124,7 @@ func ssaGenISEL(s *gc.SSAGenState, v *ssa.Value, cr int64, r1, r2 int16) { p.To.Type = obj.TYPE_REG p.To.Reg = r p.Reg = r1 - p.From3 = &obj.Addr{Type: obj.TYPE_REG, Reg: r2} + p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r2}) p.From.Type = obj.TYPE_CONST p.From.Offset = cr } @@ -152,29 +152,6 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = y } - case ssa.OpPPC64Xf2i64: - { - x := v.Args[0].Reg() - y := v.Reg() - - p := s.Prog(ppc64.AMFVSRD) - p.From.Type = obj.TYPE_REG - p.From.Reg = x - p.To.Type = obj.TYPE_REG - p.To.Reg = y - } - case ssa.OpPPC64Xi2f64: - { - x := v.Args[0].Reg() - y := v.Reg() - - p := s.Prog(ppc64.AMTVSRD) - p.From.Type = obj.TYPE_REG - p.From.Reg = x - p.To.Type = obj.TYPE_REG - p.To.Reg = y - } - case ssa.OpPPC64LoweredAtomicAnd8, ssa.OpPPC64LoweredAtomicOr8: // SYNC @@ -445,6 +422,15 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // Closure pointer is R11 (already) gc.CheckLoweredGetClosurePtr(v) + case ssa.OpPPC64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(ppc64.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpPPC64LoweredRound32F, ssa.OpPPC64LoweredRound64F: // input is already rounded @@ -542,8 +528,9 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS, ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU, ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW, + ssa.OpPPC64ROTL, ssa.OpPPC64ROTLW, ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU, - ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS, + ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS, ssa.OpPPC64FCPSGN, ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64NOR, ssa.OpPPC64XOR, ssa.OpPPC64EQV: r := v.Reg() r1 := v.Args[0].Reg() @@ -573,9 +560,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Type = obj.TYPE_REG p.From.Reg = r1 p.Reg = r3 - p.From3 = new(obj.Addr) - p.From3.Type = obj.TYPE_REG - p.From3.Reg = r2 + p.SetFrom3(obj.Addr{Type: obj.TYPE_REG, Reg: r2}) p.To.Type = obj.TYPE_REG p.To.Reg = r @@ -596,7 +581,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REGTMP // Ignored; this is for the carry effect. - case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW, ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB: + case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW, ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS: r := v.Reg() p := s.Prog(v.Op.Asm()) p.To.Type = obj.TYPE_REG @@ -608,15 +593,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst: p := s.Prog(v.Op.Asm()) p.Reg = v.Args[0].Reg() - - if v.Aux != nil { - p.From.Type = obj.TYPE_CONST - p.From.Offset = gc.AuxOffset(v) - } else { - p.From.Type = obj.TYPE_CONST - p.From.Offset = v.AuxInt - } - + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() @@ -652,10 +630,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { switch v.Aux.(type) { default: v.Fatalf("aux is of unknown type %T", v.Aux) - case *ssa.ExternSymbol: + case *obj.LSym: wantreg = "SB" gc.AddAux(&p.From, v) - case *ssa.ArgSymbol, *ssa.AutoSymbol: + case *gc.Node: wantreg = "SP" gc.AddAux(&p.From, v) case nil: @@ -1088,17 +1066,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = ppc64.REG_CTR - if gc.Ctxt.Flag_shared && p.From.Reg != ppc64.REG_R12 { - // Make sure function pointer is in R12 as well when - // compiling Go into PIC. - // TODO(mwhudson): it would obviously be better to - // change the register allocation to put the value in - // R12 already, but I don't know how to do that. - // TODO: We have the technology now to implement TODO above. - q := s.Prog(ppc64.AMOVD) - q.From = p.From - q.To.Type = obj.TYPE_REG - q.To.Reg = ppc64.REG_R12 + if v.Args[0].Reg() != ppc64.REG_R12 { + v.Fatalf("Function address for %v should be in R12 %d but is in %d", v.LongString(), ppc64.REG_R12, p.From.Reg) } pp := s.Call(v) diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go index 8722345a097..c9d1f52c8f8 100644 --- a/src/cmd/compile/internal/s390x/ssa.go +++ b/src/cmd/compile/internal/s390x/ssa.go @@ -207,6 +207,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.Reg = r2 p.To.Type = obj.TYPE_REG p.To.Reg = r + case ssa.OpS390XFIDBR: + switch v.AuxInt { + case 0, 1, 3, 4, 5, 6, 7: + opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt) + default: + v.Fatalf("invalid FIDBR mask: %v", v.AuxInt) + } + case ssa.OpS390XCPSDR: + p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) + p.Reg = v.Args[0].Reg() case ssa.OpS390XDIVD, ssa.OpS390XDIVW, ssa.OpS390XDIVDU, ssa.OpS390XDIVWU, ssa.OpS390XMODD, ssa.OpS390XMODW, @@ -327,12 +337,18 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) case ssa.OpS390XFCMPS, ssa.OpS390XFCMP: opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) - case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst, ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: + case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() p.To.Type = obj.TYPE_CONST p.To.Offset = v.AuxInt + case ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_CONST + p.To.Offset = int64(uint32(v.AuxInt)) case ssa.OpS390XMOVDconst: x := v.Reg() p := s.Prog(v.Op.Asm()) @@ -374,7 +390,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { gc.AddAux(&p.From, v) p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() - case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, ssa.OpS390XMOVDloadidx, + case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx, + ssa.OpS390XMOVBloadidx, ssa.OpS390XMOVHloadidx, ssa.OpS390XMOVWloadidx, ssa.OpS390XMOVDloadidx, ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx, ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx: r := v.Args[0].Reg() @@ -425,10 +442,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { gc.AddAux2(&p.To, v, sc.Off()) case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg, ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg, + ssa.OpS390XLDGR, ssa.OpS390XLGDR, ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA, ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA, ssa.OpS390XLDEBR, ssa.OpS390XLEDBR, - ssa.OpS390XFNEG, ssa.OpS390XFNEGS: + ssa.OpS390XFNEG, ssa.OpS390XFNEGS, + ssa.OpS390XLPDFR, ssa.OpS390XLNDFR: opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg()) case ssa.OpS390XCLEAR: p := s.Prog(v.Op.Asm()) @@ -482,6 +501,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.From.Reg = s390x.REGG p.To.Type = obj.TYPE_REG p.To.Reg = r + case ssa.OpS390XLoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(s390x.AMOVD) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter: s.Call(v) case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW, @@ -534,10 +561,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p := s.Prog(s390x.AMVC) p.From.Type = obj.TYPE_CONST p.From.Offset = vo.Val() - p.From3 = new(obj.Addr) - p.From3.Type = obj.TYPE_MEM - p.From3.Reg = v.Args[1].Reg() - p.From3.Offset = vo.Off() + p.SetFrom3(obj.Addr{ + Type: obj.TYPE_MEM, + Reg: v.Args[1].Reg(), + Offset: vo.Off(), + }) p.To.Type = obj.TYPE_MEM p.To.Reg = v.Args[0].Reg() p.To.Offset = vo.Off() @@ -570,9 +598,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { mvc := s.Prog(s390x.AMVC) mvc.From.Type = obj.TYPE_CONST mvc.From.Offset = 256 - mvc.From3 = new(obj.Addr) - mvc.From3.Type = obj.TYPE_MEM - mvc.From3.Reg = v.Args[1].Reg() + mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) mvc.To.Type = obj.TYPE_MEM mvc.To.Reg = v.Args[0].Reg() @@ -599,9 +625,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { mvc := s.Prog(s390x.AMVC) mvc.From.Type = obj.TYPE_CONST mvc.From.Offset = v.AuxInt - mvc.From3 = new(obj.Addr) - mvc.From3.Type = obj.TYPE_MEM - mvc.From3.Reg = v.Args[1].Reg() + mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()}) mvc.To.Type = obj.TYPE_MEM mvc.To.Reg = v.Args[0].Reg() } diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index 10f07cefba7..273e5f15d77 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -198,6 +198,29 @@ func (b *Block) swapSuccessors() { b.Likely *= -1 } +// LackingPos indicates whether b is a block whose position should be inherited +// from its successors. This is true if all the values within it have unreliable positions +// and if it is "plain", meaning that there is no control flow that is also very likely +// to correspond to a well-understood source position. +func (b *Block) LackingPos() bool { + // Non-plain predecessors are If or Defer, which both (1) have two successors, + // which might have different line numbers and (2) correspond to statements + // in the source code that have positions, so this case ought not occur anyway. + if b.Kind != BlockPlain { + return false + } + if b.Pos != src.NoXPos { + return false + } + for _, v := range b.Values { + if v.LackingPos() { + continue + } + return false + } + return true +} + func (b *Block) Logf(msg string, args ...interface{}) { b.Func.Logf(msg, args...) } func (b *Block) Log() bool { return b.Func.Log() } func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) } diff --git a/src/cmd/compile/internal/ssa/cache.go b/src/cmd/compile/internal/ssa/cache.go index f1018da497f..8434084bde2 100644 --- a/src/cmd/compile/internal/ssa/cache.go +++ b/src/cmd/compile/internal/ssa/cache.go @@ -14,6 +14,11 @@ type Cache struct { blocks [200]Block locs [2000]Location + // Storage for DWARF variable locations. Lazily allocated + // since location lists are off by default. + varLocs []VarLoc + curVarLoc int + // Reusable stackAllocState. // See stackalloc.go's {new,put}StackAllocState. stackAllocState *stackAllocState @@ -38,4 +43,21 @@ func (c *Cache) Reset() { for i := range xl { xl[i] = nil } + xvl := c.varLocs[:c.curVarLoc] + for i := range xvl { + xvl[i] = VarLoc{} + } + c.curVarLoc = 0 +} + +func (c *Cache) NewVarLoc() *VarLoc { + if c.varLocs == nil { + c.varLocs = make([]VarLoc, 4000) + } + if c.curVarLoc == len(c.varLocs) { + return &VarLoc{} + } + vl := &c.varLocs[c.curVarLoc] + c.curVarLoc++ + return vl } diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go index 17f683fb10c..1c2fcd7948b 100644 --- a/src/cmd/compile/internal/ssa/check.go +++ b/src/cmd/compile/internal/ssa/check.go @@ -4,6 +4,10 @@ package ssa +import ( + "math" +) + // checkFunc checks invariants of f. func checkFunc(f *Func) { blockMark := make([]bool, f.NumBlocks()) @@ -199,6 +203,10 @@ func checkFunc(f *Func) { } } + if f.RegAlloc != nil && f.Config.SoftFloat && v.Type.IsFloat() { + f.Fatalf("unexpected floating-point type %v", v.LongString()) + } + // TODO: check for cycles in values // TODO: check type } @@ -276,15 +284,17 @@ func checkFunc(f *Func) { // Check loop construction if f.RegAlloc == nil && f.pass != nil { // non-nil pass allows better-targeted debug printing ln := f.loopnest() - po := f.postorder() // use po to avoid unreachable blocks. - for _, b := range po { - for _, s := range b.Succs { - bb := s.Block() - if ln.b2l[b.ID] == nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header { - f.Fatalf("block %s not in loop branches to non-header block %s in loop", b.String(), bb.String()) - } - if ln.b2l[b.ID] != nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header && !ln.b2l[b.ID].isWithinOrEq(ln.b2l[bb.ID]) { - f.Fatalf("block %s in loop branches to non-header block %s in non-containing loop", b.String(), bb.String()) + if !ln.hasIrreducible { + po := f.postorder() // use po to avoid unreachable blocks. + for _, b := range po { + for _, s := range b.Succs { + bb := s.Block() + if ln.b2l[b.ID] == nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header { + f.Fatalf("block %s not in loop branches to non-header block %s in loop", b.String(), bb.String()) + } + if ln.b2l[b.ID] != nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header && !ln.b2l[b.ID].isWithinOrEq(ln.b2l[bb.ID]) { + f.Fatalf("block %s in loop branches to non-header block %s in non-containing loop", b.String(), bb.String()) + } } } } @@ -450,11 +460,16 @@ func memCheck(f *Func) { for _, b := range f.Blocks { seenNonPhi := false for _, v := range b.Values { - if v.Op == OpPhi { + switch v.Op { + case OpPhi: if seenNonPhi { f.Fatalf("phi after non-phi @ %s: %s", b, v) } - } else { + case OpRegKill: + if f.RegAlloc == nil { + f.Fatalf("RegKill seen before register allocation @ %s: %s", b, v) + } + default: seenNonPhi = true } } @@ -471,7 +486,8 @@ func domCheck(f *Func, sdom SparseTree, x, y *Block) bool { return sdom.isAncestorEq(x, y) } -// isExactFloat32 reoprts whether v has an AuxInt that can be exactly represented as a float32. +// isExactFloat32 reports whether v has an AuxInt that can be exactly represented as a float32. func isExactFloat32(v *Value) bool { - return v.AuxFloat() == float64(float32(v.AuxFloat())) + x := v.AuxFloat() + return math.Float64bits(x) == math.Float64bits(float64(float32(x))) } diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go index 315416babd0..8a2e358c118 100644 --- a/src/cmd/compile/internal/ssa/compile.go +++ b/src/cmd/compile/internal/ssa/compile.go @@ -344,7 +344,7 @@ var passes = [...]pass{ {name: "prove", fn: prove}, {name: "loopbce", fn: loopbce}, {name: "decompose builtin", fn: decomposeBuiltIn, required: true}, - {name: "dec", fn: dec, required: true}, + {name: "softfloat", fn: softfloat, required: true}, {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules {name: "generic deadcode", fn: deadcode}, {name: "check bce", fn: checkbce}, @@ -356,6 +356,7 @@ var passes = [...]pass{ {name: "tighten", fn: tighten}, // move values closer to their uses {name: "lower", fn: lower, required: true}, {name: "lowered cse", fn: cse}, + {name: "elim unread autos", fn: elimUnreadAutos}, {name: "lowered deadcode", fn: deadcode, required: true}, {name: "checkLower", fn: checkLower, required: true}, {name: "late phielim", fn: phielim}, @@ -413,6 +414,8 @@ var passOrder = [...]constraint{ {"generic deadcode", "check bce"}, // don't run optimization pass until we've decomposed builtin objects {"decompose builtin", "late opt"}, + // decompose builtin is the last pass that may introduce new float ops, so run softfloat after it + {"decompose builtin", "softfloat"}, // don't layout blocks until critical edges have been removed {"critical", "layout"}, // regalloc requires the removal of all critical edges diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 6587c40ebcb..ae6caeea9e2 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -33,8 +33,10 @@ type Config struct { ctxt *obj.Link // Generic arch information optimize bool // Do optimization noDuffDevice bool // Don't use Duff's device + useSSE bool // Use SSE for non-float operations nacl bool // GOOS=nacl use387 bool // GO386=387 + SoftFloat bool // NeedsFpScratch bool // No direct move between GP and FP register sets BigEndian bool // sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score @@ -58,6 +60,7 @@ type Types struct { Int *types.Type Float32 *types.Type Float64 *types.Type + UInt *types.Type Uintptr *types.Type String *types.Type BytePtr *types.Type // TODO: use unsafe.Pointer instead? @@ -86,7 +89,7 @@ type Logger interface { // Forwards the Debug flags from gc Debug_checknil() bool - Debug_wb() bool + Debug_eagerwb() bool } type Frontend interface { @@ -129,15 +132,28 @@ type Frontend interface { // UseWriteBarrier returns whether write barrier is enabled UseWriteBarrier() bool + + // SetWBPos indicates that a write barrier has been inserted + // in this function at position pos. + SetWBPos(pos src.XPos) } -// interface used to hold *gc.Node. We'd use *gc.Node directly but -// that would lead to an import cycle. +// interface used to hold a *gc.Node (a stack variable). +// We'd use *gc.Node directly but that would lead to an import cycle. type GCNode interface { Typ() *types.Type String() string + StorageClass() StorageClass } +type StorageClass uint8 + +const ( + ClassAuto StorageClass = iota // local stack variable + ClassParam // argument + ClassParamOut // return value +) + // NewConfig returns a new configuration object for the given architecture. func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config { c := &Config{arch: arch, Types: types} @@ -264,11 +280,13 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config c.ctxt = ctxt c.optimize = optimize c.nacl = objabi.GOOS == "nacl" + c.useSSE = true - // Don't use Duff's device on Plan 9 AMD64, because floating - // point operations are not allowed in note handler. + // Don't use Duff's device nor SSE on Plan 9 AMD64, because + // floating point operations are not allowed in note handler. if objabi.GOOS == "plan9" && arch == "amd64" { c.noDuffDevice = true + c.useSSE = false } if c.nacl { diff --git a/src/cmd/compile/internal/ssa/copyelim.go b/src/cmd/compile/internal/ssa/copyelim.go index 5cbb4486b29..44ccfe1bfec 100644 --- a/src/cmd/compile/internal/ssa/copyelim.go +++ b/src/cmd/compile/internal/ssa/copyelim.go @@ -45,7 +45,7 @@ func copySource(v *Value) *Value { // but we take some extra care to make sure we // don't get stuck in an infinite loop. // Infinite copy loops may happen in unreachable code. - // (TODO: or can they? Needs a test.) + // (TODO: or can they? Needs a test.) slow := w var advance bool for w.Op == OpCopy { diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go index 08a2c6df14a..bbeb990f174 100644 --- a/src/cmd/compile/internal/ssa/deadstore.go +++ b/src/cmd/compile/internal/ssa/deadstore.go @@ -131,3 +131,58 @@ func dse(f *Func) { } } } + +// elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill) +// to autos that are never read from. +func elimUnreadAutos(f *Func) { + // Loop over all ops that affect autos taking note of which + // autos we need and also stores that we might be able to + // eliminate. + seen := make(map[GCNode]bool) + var stores []*Value + for _, b := range f.Blocks { + for _, v := range b.Values { + n, ok := v.Aux.(GCNode) + if !ok { + continue + } + if n.StorageClass() != ClassAuto { + continue + } + + effect := v.Op.SymEffect() + switch effect { + case SymNone, SymWrite: + // If we haven't seen the auto yet + // then this might be a store we can + // eliminate. + if !seen[n] { + stores = append(stores, v) + } + default: + // Assume the auto is needed (loaded, + // has its address taken, etc.). + // Note we have to check the uses + // because dead loads haven't been + // eliminated yet. + if v.Uses > 0 { + seen[n] = true + } + } + } + } + + // Eliminate stores to unread autos. + for _, store := range stores { + n, _ := store.Aux.(GCNode) + if seen[n] { + continue + } + + // replace store with OpCopy + store.SetArgs1(store.MemoryArg()) + store.Aux = nil + store.AuxInt = 0 + store.Op = OpCopy + } +} diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go new file mode 100644 index 00000000000..dcef9f2447e --- /dev/null +++ b/src/cmd/compile/internal/ssa/debug.go @@ -0,0 +1,574 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package ssa + +import ( + "cmd/internal/obj" + "fmt" + "strings" +) + +type SlotID int32 + +// A FuncDebug contains all the debug information for the variables in a +// function. Variables are identified by their LocalSlot, which may be the +// result of decomposing a larger variable. +type FuncDebug struct { + // Slots is all the slots used in the debug info, indexed by their SlotID. + // Use this when getting a LocalSlot from a SlotID. + Slots []*LocalSlot + // VarSlots is the slots that represent part of user variables. + // Use this when iterating over all the slots to generate debug information. + VarSlots []*LocalSlot + // The blocks in the function, in program text order. + Blocks []*BlockDebug + // The registers of the current architecture, indexed by Register.num. + Registers []Register +} + +func (f *FuncDebug) BlockString(b *BlockDebug) string { + var vars []string + + for slot := range f.VarSlots { + if len(b.Variables[slot].Locations) == 0 { + continue + } + vars = append(vars, fmt.Sprintf("%v = %v", f.Slots[slot], b.Variables[slot])) + } + return fmt.Sprintf("{%v}", strings.Join(vars, ", ")) +} + +func (f *FuncDebug) SlotLocsString(id SlotID) string { + var locs []string + for _, block := range f.Blocks { + for _, loc := range block.Variables[id].Locations { + locs = append(locs, block.LocString(loc)) + } + } + return strings.Join(locs, " ") +} + +type BlockDebug struct { + // The SSA block that this tracks. For debug logging only. + Block *Block + // The variables in this block, indexed by their SlotID. + Variables []VarLocList +} + +func (b *BlockDebug) LocString(loc *VarLoc) string { + registers := b.Block.Func.Config.registers + + var storage []string + if loc.OnStack { + storage = append(storage, "stack") + } + + for reg := 0; reg < 64; reg++ { + if loc.Registers&(1<= 0; i-- { + b := po[i] + + // Build the starting state for the block from the final + // state of its predecessors. + locs := state.mergePredecessors(b, blockLocs) + if state.loggingEnabled { + state.logf("Processing %v, initial locs %v, regs %v\n", b, state.BlockString(locs), state.registerContents) + } + // Update locs/registers with the effects of each Value. + // The location list generated here needs to be slightly adjusted for use by gdb. + // These adjustments are applied in genssa. + for _, v := range b.Values { + slots := valueNames[v.ID] + + // Loads and stores inherit the names of their sources. + var source *Value + switch v.Op { + case OpStoreReg: + source = v.Args[0] + case OpLoadReg: + switch a := v.Args[0]; a.Op { + case OpArg: + source = a + case OpStoreReg: + source = a.Args[0] + default: + state.unexpected(v, "load with unexpected source op %v", a) + } + } + if source != nil { + slots = append(slots, valueNames[source.ID]...) + // As of writing, the compiler never uses a load/store as a + // source of another load/store, so there's no reason this should + // ever be consulted. Update just in case, and so that when + // valueNames is cached, we can reuse the memory. + valueNames[v.ID] = slots + } + + if len(slots) == 0 { + continue + } + + reg, _ := f.getHome(v.ID).(*Register) + state.processValue(locs, v, slots, reg) + } + + // The block is done; mark any live locations as ending with the block. + for _, locList := range locs.Variables { + last := locList.last() + if last == nil || last.End != nil { + continue + } + last.End = BlockEnd + } + if state.loggingEnabled { + f.Logf("Block done: locs %v, regs %v\n", state.BlockString(locs), state.registerContents) + } + blockLocs[b.ID] = locs + } + + info := &FuncDebug{ + Slots: state.slots, + VarSlots: state.varSlots, + Registers: f.Config.registers, + } + // Consumers want the information in textual order, not by block ID. + for _, b := range f.Blocks { + info.Blocks = append(info.Blocks, blockLocs[b.ID]) + } + + if state.loggingEnabled { + f.Logf("Final result:\n") + for slot := range info.VarSlots { + f.Logf("\t%v => %v\n", info.Slots[slot], info.SlotLocsString(SlotID(slot))) + } + } + return info +} + +// isSynthetic reports whether if slot represents a compiler-inserted variable, +// e.g. an autotmp or an anonymous return value that needed a stack slot. +func isSynthetic(slot *LocalSlot) bool { + c := slot.String()[0] + return c == '.' || c == '~' +} + +// mergePredecessors takes the end state of each of b's predecessors and +// intersects them to form the starting state for b. +// The registers slice (the second return value) will be reused for each call to mergePredecessors. +func (state *debugState) mergePredecessors(b *Block, blockLocs []*BlockDebug) *BlockDebug { + live := make([]VarLocList, len(state.slots)) + + // Filter out back branches. + var preds []*Block + for _, pred := range b.Preds { + if blockLocs[pred.b.ID] != nil { + preds = append(preds, pred.b) + } + } + + if len(preds) > 0 { + p := preds[0] + for slot, locList := range blockLocs[p.ID].Variables { + last := locList.last() + if last == nil || last.End != BlockEnd { + continue + } + loc := state.cache.NewVarLoc() + loc.Start = BlockStart + loc.OnStack = last.OnStack + loc.StackLocation = last.StackLocation + loc.Registers = last.Registers + live[slot].append(loc) + } + } + if state.loggingEnabled && len(b.Preds) > 1 { + state.logf("Starting merge with state from %v: %v\n", b.Preds[0].b, state.BlockString(blockLocs[b.Preds[0].b.ID])) + } + for i := 1; i < len(preds); i++ { + p := preds[i] + if state.loggingEnabled { + state.logf("Merging in state from %v: %v &= %v\n", p, live, state.BlockString(blockLocs[p.ID])) + } + + for slot, liveVar := range live { + liveLoc := liveVar.last() + if liveLoc == nil { + continue + } + + predLoc := blockLocs[p.ID].Variables[SlotID(slot)].last() + // Clear out slots missing/dead in p. + if predLoc == nil || predLoc.End != BlockEnd { + live[slot].Locations = nil + continue + } + + // Unify storage locations. + if !liveLoc.OnStack || !predLoc.OnStack || liveLoc.StackLocation != predLoc.StackLocation { + liveLoc.OnStack = false + liveLoc.StackLocation = 0 + } + liveLoc.Registers &= predLoc.Registers + } + } + + // Create final result. + locs := &BlockDebug{Variables: live} + if state.loggingEnabled { + locs.Block = b + } + for reg := range state.registerContents { + state.registerContents[reg] = state.registerContents[reg][:0] + } + for slot, locList := range live { + loc := locList.last() + if loc == nil { + continue + } + for reg := 0; reg < state.numRegisters; reg++ { + if loc.Registers&(1<]") +var numberColonRe = regexp.MustCompile("^ *[0-9]+:") + +var gdb = "gdb" // Might be "ggdb" on Darwin, because gdb no longer part of XCode +var debugger = "gdb" // For naming files, etc. + +// TestNexting go-builds a file, then uses a debugger (default gdb, optionally delve) +// to next through the generated executable, recording each line landed at, and +// then compares those lines with reference file(s). +// Flag -u updates the reference file(s). +// Flag -d changes the debugger to delve (and uses delve-specific reference files) +// Flag -v is ever-so-slightly verbose. +// Flag -n is for dry-run, and prints the shell and first debug commands. +// +// Because this test (combined with existing compiler deficiencies) is flaky, +// for gdb-based testing by default inlining is disabled +// (otherwise output depends on library internals) +// and for both gdb and dlv by default repeated lines in the next stream are ignored +// (because this appears to be timing-dependent in gdb, and the cleanest fix is in code common to gdb and dlv). +// +// Also by default, any source code outside of .../testdata/ is not mentioned +// in the debugging histories. This deals both with inlined library code once +// the compiler is generating clean inline records, and also deals with +// runtime code between return from main and process exit. This is hidden +// so that those files (in the runtime/library) can change without affecting +// this test. +// +// These choices can be reversed with -i (inlining on) and -r (repeats detected) which +// will also cause their own failures against the expected outputs. Note that if the compiler +// and debugger were behaving properly, the inlined code and repeated lines would not appear, +// so the expected output is closer to what we hope to see, though it also encodes all our +// current bugs. +// +// The file being tested may contain comments of the form +// //DBG-TAG=(v1,v2,v3) +// where DBG = {gdb,dlv} and TAG={dbg,opt} +// each variable may optionally be followed by a / and one or more of S,A,N,O +// to indicate normalization of Strings, (hex) addresses, and numbers. +// "O" is an explicit indication that we expect it to be optimized out. +// For example: +/* + if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A) +*/ +// TODO: not implemented for Delve yet, but this is the plan +// +// After a compiler change that causes a difference in the debug behavior, check +// to see if it is sensible or not, and if it is, update the reference files with +// go test debug_test.go -args -u +// (for Delve) +// go test debug_test.go -args -u -d + +func TestNexting(t *testing.T) { + skipReasons := "" // Many possible skip reasons, list all that apply + if testing.Short() { + skipReasons = "not run in short mode; " + } + testenv.MustHaveGoBuild(t) + + if !*useDelve && !*force && !(runtime.GOOS == "linux" && runtime.GOARCH == "amd64") { + // Running gdb on OSX/darwin is very flaky. + // Sometimes it is called ggdb, depending on how it is installed. + // It also probably requires an admin password typed into a dialog box. + // Various architectures tend to differ slightly sometimes, and keeping them + // all in sync is a pain for people who don't have them all at hand, + // so limit testing to amd64 (for now) + skipReasons += "not run unless linux-amd64 or -d or -f; " + } + + if *useDelve { + debugger = "dlv" + _, err := exec.LookPath("dlv") + if err != nil { + skipReasons += "not run because dlv (requested by -d option) not on path; " + } + } else { + _, err := exec.LookPath(gdb) + if err != nil { + if runtime.GOOS != "darwin" { + skipReasons += "not run because gdb not on path; " + } else { + _, err = exec.LookPath("ggdb") + if err != nil { + skipReasons += "not run because gdb (and also ggdb) not on path; " + } else { + gdb = "ggdb" + } + } + } + } + + if skipReasons != "" { + t.Skip(skipReasons[:len(skipReasons)-2]) + } + + t.Run("dbg-"+debugger, func(t *testing.T) { + testNexting(t, "hist", "dbg", "-N -l") + }) + t.Run("dbg-race-"+debugger, func(t *testing.T) { + testNexting(t, "i22600", "dbg-race", "-N -l", "-race") + }) + t.Run("dbg-22558-"+debugger, func(t *testing.T) { + testNexting(t, "i22558", "dbg-22558", "-N -l") + }) + t.Run("opt-"+debugger, func(t *testing.T) { + // If this is test is run with a runtime compiled with -N -l, it is very likely to fail. + // This occurs in the noopt builders (for example). + if gogcflags := os.Getenv("GO_GCFLAGS"); *force || (!strings.Contains(gogcflags, "-N") && !strings.Contains(gogcflags, "-l")) { + if *useDelve || *inlines { + testNexting(t, "hist", "opt", "-dwarflocationlists") + } else { + // For gdb, disable inlining so that a compiler test does not depend on library code. + testNexting(t, "hist", "opt", "-l -dwarflocationlists") + } + } else { + t.Skip("skipping for unoptimized runtime") + } + }) +} + +func testNexting(t *testing.T, base, tag, gcflags string, moreArgs ...string) { + // (1) In testdata, build sample.go into sample + // (2) Run debugger gathering a history + // (3) Read expected history from testdata/sample..nexts + // optionally, write out testdata/sample..nexts + + exe := filepath.Join("testdata", base) + logbase := exe + "." + tag + tmpbase := filepath.Join("testdata", "test-"+base+"."+tag) + + if !*force { + tmpdir, err := ioutil.TempDir("", "debug_test") + if err != nil { + panic(fmt.Sprintf("Problem creating TempDir, error %v\n", err)) + } + exe = filepath.Join(tmpdir, base) + tmpbase = exe + "-" + tag + "-test" + if *verbose { + fmt.Printf("Tempdir is %s\n", tmpdir) + } + defer os.RemoveAll(tmpdir) + } + + runGoArgs := []string{"build", "-o", exe, "-gcflags=all=" + gcflags} + runGoArgs = append(runGoArgs, moreArgs...) + runGoArgs = append(runGoArgs, filepath.Join("testdata", base+".go")) + + runGo(t, "", runGoArgs...) + + var h1 *nextHist + nextlog := logbase + "-" + debugger + ".nexts" + tmplog := tmpbase + "-" + debugger + ".nexts" + if *useDelve { + h1 = dlvTest(tag, exe, 1000) + } else { + h1 = gdbTest(tag, exe, 1000) + } + if *dryrun { + fmt.Printf("# Tag for above is %s\n", tag) + return + } + if *update { + h1.write(nextlog) + } else { + h0 := &nextHist{} + h0.read(nextlog) + if !h0.equals(h1) { + // Be very noisy about exactly what's wrong to simplify debugging. + h1.write(tmplog) + cmd := exec.Command("diff", "-u", nextlog, tmplog) + line := asCommandLine("", cmd) + bytes, err := cmd.CombinedOutput() + if err != nil && len(bytes) == 0 { + t.Fatalf("step/next histories differ, diff command %s failed with error=%v", line, err) + } + t.Fatalf("step/next histories differ, diff=\n%s", string(bytes)) + } + } +} + +type dbgr interface { + start() + stepnext(s string) bool // step or next, possible with parameter, gets line etc. returns true for success, false for unsure response + quit() + hist() *nextHist +} + +// gdbTest runs the debugger test with gdb and returns the history +func gdbTest(tag, executable string, maxNext int, args ...string) *nextHist { + dbg := newGdb(tag, executable, args...) + dbg.start() + if *dryrun { + return nil + } + for i := 0; i < maxNext; i++ { + if !dbg.stepnext("n") { + break + } + } + h := dbg.hist() + return h +} + +// dlvTest runs the debugger test with dlv and returns the history +func dlvTest(tag, executable string, maxNext int, args ...string) *nextHist { + dbg := newDelve(tag, executable, args...) + dbg.start() + if *dryrun { + return nil + } + for i := 0; i < maxNext; i++ { + if !dbg.stepnext("n") { + break + } + } + h := dbg.hist() + return h +} + +func runGo(t *testing.T, dir string, args ...string) string { + var stdout, stderr bytes.Buffer + cmd := exec.Command(testenv.GoToolPath(t), args...) + cmd.Dir = dir + if *dryrun { + fmt.Printf("%s\n", asCommandLine("", cmd)) + return "" + } + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + t.Fatalf("error running cmd (%s): %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String()) + } + + if s := stderr.String(); s != "" { + t.Fatalf("Stderr = %s\nWant empty", s) + } + + return stdout.String() +} + +// tstring provides two strings, o (stdout) and e (stderr) +type tstring struct { + o string + e string +} + +func (t tstring) String() string { + return t.o + t.e +} + +type pos struct { + line uint16 + file uint8 // Artifact of plans to implement differencing instead of calling out to diff. +} + +type nextHist struct { + f2i map[string]uint8 + fs []string + ps []pos + texts []string + vars [][]string +} + +func (h *nextHist) write(filename string) { + file, err := os.Create(filename) + if err != nil { + panic(fmt.Sprintf("Problem opening %s, error %v\n", filename, err)) + } + defer file.Close() + var lastfile uint8 + for i, x := range h.texts { + p := h.ps[i] + if lastfile != p.file { + fmt.Fprintf(file, " %s\n", h.fs[p.file-1]) + lastfile = p.file + } + fmt.Fprintf(file, "%d:%s\n", p.line, x) + // TODO, normalize between gdb and dlv into a common, comparable format. + for _, y := range h.vars[i] { + y = strings.TrimSpace(y) + fmt.Fprintf(file, "%s\n", y) + } + } + file.Close() +} + +func (h *nextHist) read(filename string) { + h.f2i = make(map[string]uint8) + bytes, err := ioutil.ReadFile(filename) + if err != nil { + panic(fmt.Sprintf("Problem reading %s, error %v\n", filename, err)) + } + var lastfile string + lines := strings.Split(string(bytes), "\n") + for i, l := range lines { + if len(l) > 0 && l[0] != '#' { + if l[0] == ' ' { + // file -- first two characters expected to be " " + lastfile = strings.TrimSpace(l) + } else if numberColonRe.MatchString(l) { + // line number -- : + colonPos := strings.Index(l, ":") + if colonPos == -1 { + panic(fmt.Sprintf("Line %d (%s) in file %s expected to contain ':' but does not.\n", i+1, l, filename)) + } + h.add(lastfile, l[0:colonPos], l[colonPos+1:]) + } else { + h.addVar(l) + } + } + } +} + +func (h *nextHist) add(file, line, text string) bool { + // Only record source code in testdata unless the inlines flag is set + if !*inlines && !strings.Contains(file, "/testdata/") { + return false + } + fi := h.f2i[file] + if fi == 0 { + h.fs = append(h.fs, file) + fi = uint8(len(h.fs)) + h.f2i[file] = fi + } + + line = strings.TrimSpace(line) + var li int + var err error + if line != "" { + li, err = strconv.Atoi(line) + if err != nil { + panic(fmt.Sprintf("Non-numeric line: %s, error %v\n", line, err)) + } + } + l := len(h.ps) + p := pos{line: uint16(li), file: fi} + + if l == 0 || *repeats || h.ps[l-1] != p { + h.ps = append(h.ps, p) + h.texts = append(h.texts, text) + h.vars = append(h.vars, []string{}) + return true + } + return false +} + +func (h *nextHist) addVar(text string) { + l := len(h.texts) + h.vars[l-1] = append(h.vars[l-1], text) +} + +func invertMapSU8(hf2i map[string]uint8) map[uint8]string { + hi2f := make(map[uint8]string) + for hs, i := range hf2i { + hi2f[i] = hs + } + return hi2f +} + +func (h *nextHist) equals(k *nextHist) bool { + if len(h.f2i) != len(k.f2i) { + return false + } + if len(h.ps) != len(k.ps) { + return false + } + hi2f := invertMapSU8(h.f2i) + ki2f := invertMapSU8(k.f2i) + + for i, hs := range hi2f { + if hs != ki2f[i] { + return false + } + } + + for i, x := range h.ps { + if k.ps[i] != x { + return false + } + } + + for i, hv := range h.vars { + kv := k.vars[i] + if len(hv) != len(kv) { + return false + } + for j, hvt := range hv { + if hvt != kv[j] { + return false + } + } + } + + return true +} + +// canonFileName strips everything before "src/" from a filename. +// This makes file names portable across different machines, +// home directories, and temporary directories. +func canonFileName(f string) string { + i := strings.Index(f, "/src/") + if i != -1 { + f = f[i+1:] + } + return f +} + +/* Delve */ + +type delveState struct { + cmd *exec.Cmd + tag string + *ioState + atLineRe *regexp.Regexp // "\n =>" + funcFileLinePCre *regexp.Regexp // "^> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)" + line string + file string + function string +} + +func newDelve(tag, executable string, args ...string) dbgr { + cmd := exec.Command("dlv", "exec", executable) + cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb") + if len(args) > 0 { + cmd.Args = append(cmd.Args, "--") + cmd.Args = append(cmd.Args, args...) + } + s := &delveState{tag: tag, cmd: cmd} + // HAHA Delve has control characters embedded to change the color of the => and the line number + // that would be '(\\x1b\\[[0-9;]+m)?' OR TERM=dumb + s.atLineRe = regexp.MustCompile("\n=>[[:space:]]+[0-9]+:(.*)") + s.funcFileLinePCre = regexp.MustCompile("> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)[)]\n") + s.ioState = newIoState(s.cmd) + return s +} + +func (s *delveState) stepnext(ss string) bool { + x := s.ioState.writeReadExpect(ss+"\n", "[(]dlv[)] ") + excerpts := s.atLineRe.FindStringSubmatch(x.o) + locations := s.funcFileLinePCre.FindStringSubmatch(x.o) + excerpt := "" + if len(excerpts) > 1 { + excerpt = excerpts[1] + } + if len(locations) > 0 { + fn := canonFileName(locations[2]) + if *verbose { + if s.file != fn { + fmt.Printf("%s\n", locations[2]) // don't canonocalize verbose logging + } + fmt.Printf(" %s\n", locations[3]) + } + s.line = locations[3] + s.file = fn + s.function = locations[1] + s.ioState.history.add(s.file, s.line, excerpt) + // TODO: here is where variable processing will be added. See gdbState.stepnext as a guide. + // Adding this may require some amount of normalization so that logs are comparable. + return true + } + if *verbose { + fmt.Printf("DID NOT MATCH EXPECTED NEXT OUTPUT\nO='%s'\nE='%s'\n", x.o, x.e) + } + return false +} + +func (s *delveState) start() { + if *dryrun { + fmt.Printf("%s\n", asCommandLine("", s.cmd)) + fmt.Printf("b main.test\n") + fmt.Printf("c\n") + return + } + err := s.cmd.Start() + if err != nil { + line := asCommandLine("", s.cmd) + panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err)) + } + s.ioState.readExpecting(-1, 5000, "Type 'help' for list of commands.") + expect("Breakpoint [0-9]+ set at ", s.ioState.writeReadExpect("b main.test\n", "[(]dlv[)] ")) + s.stepnext("c") +} + +func (s *delveState) quit() { + expect("", s.ioState.writeRead("q\n")) +} + +/* Gdb */ + +type gdbState struct { + cmd *exec.Cmd + tag string + args []string + *ioState + atLineRe *regexp.Regexp + funcFileLinePCre *regexp.Regexp + line string + file string + function string +} + +func newGdb(tag, executable string, args ...string) dbgr { + // Turn off shell, necessary for Darwin apparently + cmd := exec.Command(gdb, "-ex", "set startup-with-shell off", executable) + cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb") + s := &gdbState{tag: tag, cmd: cmd, args: args} + s.atLineRe = regexp.MustCompile("(^|\n)([0-9]+)(.*)") + s.funcFileLinePCre = regexp.MustCompile( + "([^ ]+) [(][^)]*[)][ \\t\\n]+at ([^:]+):([0-9]+)") + // runtime.main () at /Users/drchase/GoogleDrive/work/go/src/runtime/proc.go:201 + // function file line + // Thread 2 hit Breakpoint 1, main.main () at /Users/drchase/GoogleDrive/work/debug/hist.go:18 + s.ioState = newIoState(s.cmd) + return s +} + +func (s *gdbState) start() { + run := "run" + for _, a := range s.args { + run += " " + a // Can't quote args for gdb, it will pass them through including the quotes + } + if *dryrun { + fmt.Printf("%s\n", asCommandLine("", s.cmd)) + fmt.Printf("tbreak main.test\n") + fmt.Printf("%s\n", run) + return + } + err := s.cmd.Start() + if err != nil { + line := asCommandLine("", s.cmd) + panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err)) + } + s.ioState.readExpecting(-1, -1, "[(]gdb[)] ") + x := s.ioState.writeReadExpect("b main.test\n", "[(]gdb[)] ") + expect("Breakpoint [0-9]+ at", x) + s.stepnext(run) +} + +func (s *gdbState) stepnext(ss string) bool { + x := s.ioState.writeReadExpect(ss+"\n", "[(]gdb[)] ") + excerpts := s.atLineRe.FindStringSubmatch(x.o) + locations := s.funcFileLinePCre.FindStringSubmatch(x.o) + excerpt := "" + addedLine := false + if len(excerpts) == 0 && len(locations) == 0 { + if *verbose { + fmt.Printf("DID NOT MATCH %s", x.o) + } + return false + } + if len(excerpts) > 0 { + excerpt = excerpts[3] + } + if len(locations) > 0 { + fn := canonFileName(locations[2]) + if *verbose { + if s.file != fn { + fmt.Printf("%s\n", locations[2]) + } + fmt.Printf(" %s\n", locations[3]) + } + s.line = locations[3] + s.file = fn + s.function = locations[1] + addedLine = s.ioState.history.add(s.file, s.line, excerpt) + } + if len(excerpts) > 0 { + if *verbose { + fmt.Printf(" %s\n", excerpts[2]) + } + s.line = excerpts[2] + addedLine = s.ioState.history.add(s.file, s.line, excerpt) + } + + if !addedLine { + // True if this was a repeat line + return true + } + // Look for //gdb-=(v1,v2,v3) and print v1, v2, v3 + vars := varsToPrint(excerpt, "//gdb-"+s.tag+"=(") + for _, v := range vars { + slashIndex := strings.Index(v, "/") + substitutions := "" + if slashIndex != -1 { + substitutions = v[slashIndex:] + v = v[:slashIndex] + } + response := s.ioState.writeReadExpect("p "+v+"\n", "[(]gdb[)] ").String() + // expect something like "$1 = ..." + dollar := strings.Index(response, "$") + cr := strings.Index(response, "\n") + if dollar == -1 { + if cr == -1 { + response = strings.TrimSpace(response) // discards trailing newline + response = strings.Replace(response, "\n", "
    ", -1) + s.ioState.history.addVar("$ Malformed response " + response) + continue + } + response = strings.TrimSpace(response[:cr]) + s.ioState.history.addVar("$ " + response) + continue + } + if cr == -1 { + cr = len(response) + } + // Convert the leading $ into $ to limit scope of diffs + // when a new print-this-variable comment is added. + response = strings.TrimSpace(response[dollar:cr]) + response = leadingDollarNumberRe.ReplaceAllString(response, v) + + if strings.Contains(substitutions, "A") { + response = hexRe.ReplaceAllString(response, "") + } + if strings.Contains(substitutions, "N") { + response = numRe.ReplaceAllString(response, "") + } + if strings.Contains(substitutions, "S") { + response = stringRe.ReplaceAllString(response, "") + } + if strings.Contains(substitutions, "O") { + response = optOutGdbRe.ReplaceAllString(response, "") + } + s.ioState.history.addVar(response) + } + return true +} + +// varsToPrint takes a source code line, and extracts the comma-separated variable names +// found between lookfor and the next ")". +// For example, if line includes "... //gdb-foo=(v1,v2,v3)" and +// lookfor="//gdb-foo=(", then varsToPrint returns ["v1", "v2", "v3"] +func varsToPrint(line, lookfor string) []string { + var vars []string + if strings.Contains(line, lookfor) { + x := line[strings.Index(line, lookfor)+len(lookfor):] + end := strings.Index(x, ")") + if end == -1 { + panic(fmt.Sprintf("Saw variable list begin %s in %s but no closing ')'", lookfor, line)) + } + vars = strings.Split(x[:end], ",") + for i, y := range vars { + vars[i] = strings.TrimSpace(y) + } + } + return vars +} + +func (s *gdbState) quit() { + response := s.ioState.writeRead("q\n") + if strings.Contains(response.o, "Quit anyway? (y or n)") { + s.ioState.writeRead("Y\n") + } +} + +type ioState struct { + stdout io.ReadCloser + stderr io.ReadCloser + stdin io.WriteCloser + outChan chan string + errChan chan string + last tstring // Output of previous step + history *nextHist +} + +func newIoState(cmd *exec.Cmd) *ioState { + var err error + s := &ioState{} + s.history = &nextHist{} + s.history.f2i = make(map[string]uint8) + s.stdout, err = cmd.StdoutPipe() + line := asCommandLine("", cmd) + if err != nil { + panic(fmt.Sprintf("There was an error [stdoutpipe] running '%s', %v\n", line, err)) + } + s.stderr, err = cmd.StderrPipe() + if err != nil { + panic(fmt.Sprintf("There was an error [stdouterr] running '%s', %v\n", line, err)) + } + s.stdin, err = cmd.StdinPipe() + if err != nil { + panic(fmt.Sprintf("There was an error [stdinpipe] running '%s', %v\n", line, err)) + } + + s.outChan = make(chan string, 1) + s.errChan = make(chan string, 1) + go func() { + buffer := make([]byte, 4096) + for { + n, err := s.stdout.Read(buffer) + if n > 0 { + s.outChan <- string(buffer[0:n]) + } + if err == io.EOF || n == 0 { + break + } + if err != nil { + fmt.Printf("Saw an error forwarding stdout") + break + } + } + close(s.outChan) + s.stdout.Close() + }() + + go func() { + buffer := make([]byte, 4096) + for { + n, err := s.stderr.Read(buffer) + if n > 0 { + s.errChan <- string(buffer[0:n]) + } + if err == io.EOF || n == 0 { + break + } + if err != nil { + fmt.Printf("Saw an error forwarding stderr") + break + } + } + close(s.errChan) + s.stderr.Close() + }() + return s +} + +func (s *ioState) hist() *nextHist { + return s.history +} + +// writeRead writes ss, then reads stdout and stderr, waiting 500ms to +// be sure all the output has appeared. +func (s *ioState) writeRead(ss string) tstring { + if *verbose { + fmt.Printf("=> %s", ss) + } + _, err := io.WriteString(s.stdin, ss) + if err != nil { + panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err)) + } + return s.readExpecting(-1, 500, "") +} + +// writeReadExpect writes ss, then reads stdout and stderr until something +// that matches expectRE appears. expectRE should not be "" +func (s *ioState) writeReadExpect(ss, expectRE string) tstring { + if *verbose { + fmt.Printf("=> %s", ss) + } + if expectRE == "" { + panic("expectRE should not be empty; use .* instead") + } + _, err := io.WriteString(s.stdin, ss) + if err != nil { + panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err)) + } + return s.readExpecting(-1, -1, expectRE) +} + +func (s *ioState) readExpecting(millis, interlineTimeout int, expectedRE string) tstring { + timeout := time.Millisecond * time.Duration(millis) + interline := time.Millisecond * time.Duration(interlineTimeout) + s.last = tstring{} + var re *regexp.Regexp + if expectedRE != "" { + re = regexp.MustCompile(expectedRE) + } +loop: + for { + var timer <-chan time.Time + if timeout > 0 { + timer = time.After(timeout) + } + select { + case x, ok := <-s.outChan: + if !ok { + s.outChan = nil + } + s.last.o += x + case x, ok := <-s.errChan: + if !ok { + s.errChan = nil + } + s.last.e += x + case <-timer: + break loop + } + if re != nil { + if re.MatchString(s.last.o) { + break + } + if re.MatchString(s.last.e) { + break + } + } + timeout = interline + } + if *verbose { + fmt.Printf("<= %s%s", s.last.o, s.last.e) + } + return s.last +} + +// replaceEnv returns a new environment derived from env +// by removing any existing definition of ev and adding ev=evv. +func replaceEnv(env []string, ev string, evv string) []string { + evplus := ev + "=" + var found bool + for i, v := range env { + if strings.HasPrefix(v, evplus) { + found = true + env[i] = evplus + evv + } + } + if !found { + env = append(env, evplus+evv) + } + return env +} + +// asCommandLine renders cmd as something that could be copy-and-pasted into a command line +// If cwd is not empty and different from the command's directory, prepend an approprirate "cd" +func asCommandLine(cwd string, cmd *exec.Cmd) string { + s := "(" + if cmd.Dir != "" && cmd.Dir != cwd { + s += "cd" + escape(cmd.Dir) + ";" + } + for _, e := range cmd.Env { + if !strings.HasPrefix(e, "PATH=") && + !strings.HasPrefix(e, "HOME=") && + !strings.HasPrefix(e, "USER=") && + !strings.HasPrefix(e, "SHELL=") { + s += escape(e) + } + } + for _, a := range cmd.Args { + s += escape(a) + } + s += " )" + return s +} + +// escape inserts escapes appropriate for use in a shell command line +func escape(s string) string { + s = strings.Replace(s, "\\", "\\\\", -1) + s = strings.Replace(s, "'", "\\'", -1) + // Conservative guess at characters that will force quoting + if strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") { + s = " '" + s + "'" + } else { + s = " " + s + } + return s +} + +func expect(want string, got tstring) { + if want != "" { + match, err := regexp.MatchString(want, got.o) + if err != nil { + panic(fmt.Sprintf("Error for regexp %s, %v\n", want, err)) + } + if match { + return + } + match, err = regexp.MatchString(want, got.e) + if match { + return + } + fmt.Printf("EXPECTED '%s'\n GOT O='%s'\nAND E='%s'\n", want, got.o, got.e) + } +} diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go index 2b3f16c30c2..0cabfb61e72 100644 --- a/src/cmd/compile/internal/ssa/decompose.go +++ b/src/cmd/compile/internal/ssa/decompose.go @@ -4,12 +4,15 @@ package ssa -import "cmd/compile/internal/types" +import ( + "cmd/compile/internal/types" +) // decompose converts phi ops on compound builtin types into phi -// ops on simple types. -// (The remaining compound ops are decomposed with rewrite rules.) +// ops on simple types, then invokes rewrite rules to decompose +// other ops on those types. func decomposeBuiltIn(f *Func) { + // Decompose phis for _, b := range f.Blocks { for _, v := range b.Values { if v.Op != OpPhi { @@ -19,87 +22,79 @@ func decomposeBuiltIn(f *Func) { } } + // Decompose other values + applyRewrite(f, rewriteBlockdec, rewriteValuedec) + if f.Config.RegSize == 4 { + applyRewrite(f, rewriteBlockdec64, rewriteValuedec64) + } + // Split up named values into their components. - // NOTE: the component values we are making are dead at this point. - // We must do the opt pass before any deadcode elimination or we will - // lose the name->value correspondence. var newNames []LocalSlot for _, name := range f.Names { t := name.Type switch { case t.IsInteger() && t.Size() > f.Config.RegSize: - var elemType *types.Type - if t.IsSigned() { - elemType = f.Config.Types.Int32 - } else { - elemType = f.Config.Types.UInt32 - } hiName, loName := f.fe.SplitInt64(name) newNames = append(newNames, hiName, loName) for _, v := range f.NamedValues[name] { - hi := v.Block.NewValue1(v.Pos, OpInt64Hi, elemType, v) - lo := v.Block.NewValue1(v.Pos, OpInt64Lo, f.Config.Types.UInt32, v) - f.NamedValues[hiName] = append(f.NamedValues[hiName], hi) - f.NamedValues[loName] = append(f.NamedValues[loName], lo) + if v.Op != OpInt64Make { + continue + } + f.NamedValues[hiName] = append(f.NamedValues[hiName], v.Args[0]) + f.NamedValues[loName] = append(f.NamedValues[loName], v.Args[1]) } delete(f.NamedValues, name) case t.IsComplex(): - var elemType *types.Type - if t.Size() == 16 { - elemType = f.Config.Types.Float64 - } else { - elemType = f.Config.Types.Float32 - } rName, iName := f.fe.SplitComplex(name) newNames = append(newNames, rName, iName) for _, v := range f.NamedValues[name] { - r := v.Block.NewValue1(v.Pos, OpComplexReal, elemType, v) - i := v.Block.NewValue1(v.Pos, OpComplexImag, elemType, v) - f.NamedValues[rName] = append(f.NamedValues[rName], r) - f.NamedValues[iName] = append(f.NamedValues[iName], i) + if v.Op != OpComplexMake { + continue + } + f.NamedValues[rName] = append(f.NamedValues[rName], v.Args[0]) + f.NamedValues[iName] = append(f.NamedValues[iName], v.Args[1]) + } delete(f.NamedValues, name) case t.IsString(): - ptrType := f.Config.Types.BytePtr - lenType := f.Config.Types.Int ptrName, lenName := f.fe.SplitString(name) newNames = append(newNames, ptrName, lenName) for _, v := range f.NamedValues[name] { - ptr := v.Block.NewValue1(v.Pos, OpStringPtr, ptrType, v) - len := v.Block.NewValue1(v.Pos, OpStringLen, lenType, v) - f.NamedValues[ptrName] = append(f.NamedValues[ptrName], ptr) - f.NamedValues[lenName] = append(f.NamedValues[lenName], len) + if v.Op != OpStringMake { + continue + } + f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0]) + f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1]) } delete(f.NamedValues, name) case t.IsSlice(): - ptrType := f.Config.Types.BytePtr - lenType := f.Config.Types.Int ptrName, lenName, capName := f.fe.SplitSlice(name) newNames = append(newNames, ptrName, lenName, capName) for _, v := range f.NamedValues[name] { - ptr := v.Block.NewValue1(v.Pos, OpSlicePtr, ptrType, v) - len := v.Block.NewValue1(v.Pos, OpSliceLen, lenType, v) - cap := v.Block.NewValue1(v.Pos, OpSliceCap, lenType, v) - f.NamedValues[ptrName] = append(f.NamedValues[ptrName], ptr) - f.NamedValues[lenName] = append(f.NamedValues[lenName], len) - f.NamedValues[capName] = append(f.NamedValues[capName], cap) + if v.Op != OpSliceMake { + continue + } + f.NamedValues[ptrName] = append(f.NamedValues[ptrName], v.Args[0]) + f.NamedValues[lenName] = append(f.NamedValues[lenName], v.Args[1]) + f.NamedValues[capName] = append(f.NamedValues[capName], v.Args[2]) } delete(f.NamedValues, name) case t.IsInterface(): - ptrType := f.Config.Types.BytePtr typeName, dataName := f.fe.SplitInterface(name) newNames = append(newNames, typeName, dataName) for _, v := range f.NamedValues[name] { - typ := v.Block.NewValue1(v.Pos, OpITab, ptrType, v) - data := v.Block.NewValue1(v.Pos, OpIData, ptrType, v) - f.NamedValues[typeName] = append(f.NamedValues[typeName], typ) - f.NamedValues[dataName] = append(f.NamedValues[dataName], data) + if v.Op != OpIMake { + continue + } + f.NamedValues[typeName] = append(f.NamedValues[typeName], v.Args[0]) + f.NamedValues[dataName] = append(f.NamedValues[dataName], v.Args[1]) } delete(f.NamedValues, name) case t.IsFloat(): // floats are never decomposed, even ones bigger than RegSize + newNames = append(newNames, name) case t.Size() > f.Config.RegSize: - f.Fatalf("undecomposed named type %v %v", name, t) + f.Fatalf("undecomposed named type %s %v", name, t) default: newNames = append(newNames, name) } @@ -228,44 +223,15 @@ func decomposeUser(f *Func) { } } // Split up named values into their components. - // NOTE: the component values we are making are dead at this point. - // We must do the opt pass before any deadcode elimination or we will - // lose the name->value correspondence. i := 0 - var fnames []LocalSlot var newNames []LocalSlot for _, name := range f.Names { t := name.Type switch { case t.IsStruct(): - n := t.NumFields() - fnames = fnames[:0] - for i := 0; i < n; i++ { - fnames = append(fnames, f.fe.SplitStruct(name, i)) - } - for _, v := range f.NamedValues[name] { - for i := 0; i < n; i++ { - x := v.Block.NewValue1I(v.Pos, OpStructSelect, t.FieldType(i), int64(i), v) - f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], x) - } - } - delete(f.NamedValues, name) - newNames = append(newNames, fnames...) + newNames = decomposeUserStructInto(f, name, newNames) case t.IsArray(): - if t.NumElem() == 0 { - // TODO(khr): Not sure what to do here. Probably nothing. - // Names for empty arrays aren't important. - break - } - if t.NumElem() != 1 { - f.Fatalf("array not of size 1") - } - elemName := f.fe.SplitArray(name) - for _, v := range f.NamedValues[name] { - e := v.Block.NewValue1I(v.Pos, OpArraySelect, t.ElemType(), 0, v) - f.NamedValues[elemName] = append(f.NamedValues[elemName], e) - } - + newNames = decomposeUserArrayInto(f, name, newNames) default: f.Names[i] = name i++ @@ -275,6 +241,83 @@ func decomposeUser(f *Func) { f.Names = append(f.Names, newNames...) } +// decomposeUserArrayInto creates names for the element(s) of arrays referenced +// by name where possible, and appends those new names to slots, which is then +// returned. +func decomposeUserArrayInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalSlot { + t := name.Type + if t.NumElem() == 0 { + // TODO(khr): Not sure what to do here. Probably nothing. + // Names for empty arrays aren't important. + return slots + } + if t.NumElem() != 1 { + // shouldn't get here due to CanSSA + f.Fatalf("array not of size 1") + } + elemName := f.fe.SplitArray(name) + for _, v := range f.NamedValues[name] { + if v.Op != OpArrayMake1 { + continue + } + f.NamedValues[elemName] = append(f.NamedValues[elemName], v.Args[0]) + } + // delete the name for the array as a whole + delete(f.NamedValues, name) + + if t.ElemType().IsArray() { + return decomposeUserArrayInto(f, elemName, slots) + } else if t.ElemType().IsStruct() { + return decomposeUserStructInto(f, elemName, slots) + } + + return append(slots, elemName) +} + +// decomposeUserStructInto creates names for the fields(s) of structs referenced +// by name where possible, and appends those new names to slots, which is then +// returned. +func decomposeUserStructInto(f *Func, name LocalSlot, slots []LocalSlot) []LocalSlot { + fnames := []LocalSlot{} // slots for struct in name + t := name.Type + n := t.NumFields() + + for i := 0; i < n; i++ { + fs := f.fe.SplitStruct(name, i) + fnames = append(fnames, fs) + // arrays and structs will be decomposed further, so + // there's no need to record a name + if !fs.Type.IsArray() && !fs.Type.IsStruct() { + slots = append(slots, fs) + } + } + + makeOp := StructMakeOp(n) + // create named values for each struct field + for _, v := range f.NamedValues[name] { + if v.Op != makeOp { + continue + } + for i := 0; i < len(fnames); i++ { + f.NamedValues[fnames[i]] = append(f.NamedValues[fnames[i]], v.Args[i]) + } + } + // remove the name of the struct as a whole + delete(f.NamedValues, name) + + // now that this f.NamedValues contains values for the struct + // fields, recurse into nested structs + for i := 0; i < n; i++ { + if name.Type.FieldType(i).IsStruct() { + slots = decomposeUserStructInto(f, fnames[i], slots) + delete(f.NamedValues, fnames[i]) + } else if name.Type.FieldType(i).IsArray() { + slots = decomposeUserArrayInto(f, fnames[i], slots) + delete(f.NamedValues, fnames[i]) + } + } + return slots +} func decomposeUserPhi(v *Value) { switch { case v.Type.IsStruct(): diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go index 3bb67a951bf..28ae494505b 100644 --- a/src/cmd/compile/internal/ssa/export_test.go +++ b/src/cmd/compile/internal/ssa/export_test.go @@ -75,6 +75,10 @@ func (d *DummyAuto) String() string { return d.s } +func (d *DummyAuto) StorageClass() StorageClass { + return ClassAuto +} + func (DummyFrontend) StringData(s string) interface{} { return nil } @@ -82,33 +86,33 @@ func (DummyFrontend) Auto(pos src.XPos, t *types.Type) GCNode { return &DummyAuto{t: t, s: "aDummyAuto"} } func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) { - return LocalSlot{s.N, dummyTypes.BytePtr, s.Off}, LocalSlot{s.N, dummyTypes.Int, s.Off + 8} + return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8} } func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) { - return LocalSlot{s.N, dummyTypes.BytePtr, s.Off}, LocalSlot{s.N, dummyTypes.BytePtr, s.Off + 8} + return LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.BytePtr, Off: s.Off + 8} } func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) { - return LocalSlot{s.N, s.Type.ElemType().PtrTo(), s.Off}, - LocalSlot{s.N, dummyTypes.Int, s.Off + 8}, - LocalSlot{s.N, dummyTypes.Int, s.Off + 16} + return LocalSlot{N: s.N, Type: s.Type.ElemType().PtrTo(), Off: s.Off}, + LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 8}, + LocalSlot{N: s.N, Type: dummyTypes.Int, Off: s.Off + 16} } func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) { if s.Type.Size() == 16 { - return LocalSlot{s.N, dummyTypes.Float64, s.Off}, LocalSlot{s.N, dummyTypes.Float64, s.Off + 8} + return LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float64, Off: s.Off + 8} } - return LocalSlot{s.N, dummyTypes.Float32, s.Off}, LocalSlot{s.N, dummyTypes.Float32, s.Off + 4} + return LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off}, LocalSlot{N: s.N, Type: dummyTypes.Float32, Off: s.Off + 4} } func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) { if s.Type.IsSigned() { - return LocalSlot{s.N, dummyTypes.Int32, s.Off + 4}, LocalSlot{s.N, dummyTypes.UInt32, s.Off} + return LocalSlot{N: s.N, Type: dummyTypes.Int32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off} } - return LocalSlot{s.N, dummyTypes.UInt32, s.Off + 4}, LocalSlot{s.N, dummyTypes.UInt32, s.Off} + return LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off + 4}, LocalSlot{N: s.N, Type: dummyTypes.UInt32, Off: s.Off} } func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot { - return LocalSlot{s.N, s.Type.FieldType(i), s.Off + s.Type.FieldOff(i)} + return LocalSlot{N: s.N, Type: s.Type.FieldType(i), Off: s.Off + s.Type.FieldOff(i)} } func (d DummyFrontend) SplitArray(s LocalSlot) LocalSlot { - return LocalSlot{s.N, s.Type.ElemType(), s.Off} + return LocalSlot{N: s.N, Type: s.Type.ElemType(), Off: s.Off} } func (DummyFrontend) Line(_ src.XPos) string { return "unknown.go:0" @@ -121,6 +125,8 @@ func (d DummyFrontend) Syslook(s string) *obj.LSym { func (DummyFrontend) UseWriteBarrier() bool { return true // only writebarrier_test cares } +func (DummyFrontend) SetWBPos(pos src.XPos) { +} func (d DummyFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) } func (d DummyFrontend) Log() bool { return true } @@ -128,7 +134,7 @@ func (d DummyFrontend) Log() bool { return true } func (d DummyFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) } func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) } func (d DummyFrontend) Debug_checknil() bool { return false } -func (d DummyFrontend) Debug_wb() bool { return false } +func (d DummyFrontend) Debug_eagerwb() bool { return false } var dummyTypes Types diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index 7ec596372ad..62550df0ccf 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -44,8 +44,6 @@ type Func struct { scheduled bool // Values in Blocks are in final order NoSplit bool // true if function is marked as nosplit. Used by schedule check pass. - WBPos src.XPos // line number of first write barrier - // when register allocation is done, maps value ids to locations RegAlloc []Location @@ -175,7 +173,7 @@ func (f *Func) LogStat(key string, args ...interface{}) { f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name) } -// freeValue frees a value. It must no longer be referenced. +// freeValue frees a value. It must no longer be referenced or have any args. func (f *Func) freeValue(v *Value) { if v.Block == nil { f.Fatalf("trying to free an already freed value") @@ -183,6 +181,9 @@ func (f *Func) freeValue(v *Value) { if v.Uses != 0 { f.Fatalf("value %s still has %d uses", v, v.Uses) } + if len(v.Args) != 0 { + f.Fatalf("value %s still has %d args", v, len(v.Args)) + } // Clear everything but ID (which we reuse). id := v.ID @@ -406,6 +407,7 @@ func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, // constVal returns a constant value for c. func (f *Func) constVal(pos src.XPos, op Op, t *types.Type, c int64, setAuxInt bool) *Value { + // TODO remove unused pos parameter, both here and in *func.ConstXXX callers. if f.constants == nil { f.constants = make(map[int64][]*Value) } @@ -420,9 +422,9 @@ func (f *Func) constVal(pos src.XPos, op Op, t *types.Type, c int64, setAuxInt b } var v *Value if setAuxInt { - v = f.Entry.NewValue0I(pos, op, t, c) + v = f.Entry.NewValue0I(src.NoXPos, op, t, c) } else { - v = f.Entry.NewValue0(pos, op, t) + v = f.Entry.NewValue0(src.NoXPos, op, t) } f.constants[c] = append(vv, v) return v diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index 49fcd365302..226bea6b74f 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -383,6 +383,8 @@ (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) (GetG mem) -> (LoweredGetG mem) (GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerPC) -> (LoweredGetCallerPC) +(GetCallerSP) -> (LoweredGetCallerSP) (Addr {sym} base) -> (LEAL {sym} base) // block rewrites @@ -538,7 +540,6 @@ (MULLconst [41] x) -> (LEAL8 x (LEAL4 x x)) (MULLconst [73] x) -> (LEAL8 x (LEAL8 x x)) -(MULLconst [c] x) && isPowerOfTwo(c) -> (SHLLconst [log2(c)] x) (MULLconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBL (SHLLconst [log2(c+1)] x) x) (MULLconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAL1 (SHLLconst [log2(c-1)] x) x) (MULLconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAL2 (SHLLconst [log2(c-2)] x) x) diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go index 8965d62353d..f5f46fad2cf 100644 --- a/src/cmd/compile/internal/ssa/gen/386Ops.go +++ b/src/cmd/compile/internal/ssa/gen/386Ops.go @@ -440,6 +440,13 @@ func init() { // and sorts it to the very beginning of the block to prevent other // use of DX (the closure pointer) {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}}, + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, @@ -448,7 +455,7 @@ func init() { // (particularly stack maps). It takes a memory arg so it // gets correctly ordered with respect to GC safepoints. // arg0=ptr/int arg1=mem, output=int/ptr - {name: "MOVLconvert", argLength: 2, reg: gp11, asm: "MOVL"}, + {name: "MOVLconvert", argLength: 2, reg: gp11, asm: "MOVL", resultInArg0: true}, // Constant flag values. For any comparison, there are 5 possible // outcomes: the three from the signed total order (<,==,>) and the diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index ff38be550e6..238515dfcb1 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -113,6 +113,11 @@ (Sqrt x) -> (SQRTSD x) +(RoundToEven x) -> (ROUNDSD [0] x) +(Floor x) -> (ROUNDSD [1] x) +(Ceil x) -> (ROUNDSD [2] x) +(Trunc x) -> (ROUNDSD [3] x) + // Lowering extension // Note: we always extend to 64 bits even though some ops don't need that many result bits. (SignExt8to16 x) -> (MOVBQSX x) @@ -319,7 +324,10 @@ (Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem) (Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem) (Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem) -(Move [16] dst src mem) -> (MOVOstore dst (MOVOload src mem) mem) +(Move [16] dst src mem) && config.useSSE -> (MOVOstore dst (MOVOload src mem) mem) +(Move [16] dst src mem) && !config.useSSE -> + (MOVQstore [8] dst (MOVQload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) (Move [3] dst src mem) -> (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) @@ -344,11 +352,18 @@ (OffPtr src [s%16]) (MOVQstore dst (MOVQload src mem) mem)) (Move [s] dst src mem) - && s > 16 && s%16 != 0 && s%16 > 8 -> + && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE -> (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) +(Move [s] dst src mem) + && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE -> + (Move [s-s%16] + (OffPtr dst [s%16]) + (OffPtr src [s%16]) + (MOVQstore [8] dst (MOVQload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem))) // Medium copying uses a duff device. (Move [s] dst src mem) @@ -387,36 +402,60 @@ (MOVLstoreconst [0] destptr mem)) // Strip off any fractional word zeroing. -(Zero [s] destptr mem) && s%8 != 0 && s > 8 -> +(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE -> (Zero [s-s%8] (OffPtr destptr [s%8]) (MOVQstoreconst [0] destptr mem)) // Zero small numbers of words directly. -(Zero [16] destptr mem) -> +(Zero [16] destptr mem) && !config.useSSE -> (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) -(Zero [24] destptr mem) -> +(Zero [24] destptr mem) && !config.useSSE -> (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) -(Zero [32] destptr mem) -> +(Zero [32] destptr mem) && !config.useSSE -> (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) +(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE -> + (MOVQstoreconst [makeValAndOff(0,s-8)] destptr + (MOVQstoreconst [0] destptr mem)) + +// Adjust zeros to be a multiple of 16 bytes. +(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE -> + (Zero [s-s%16] (OffPtr destptr [s%16]) + (MOVOstore destptr (MOVOconst [0]) mem)) + +(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE -> + (Zero [s-s%16] (OffPtr destptr [s%16]) + (MOVQstoreconst [0] destptr mem)) + +(Zero [16] destptr mem) && config.useSSE -> + (MOVOstore destptr (MOVOconst [0]) mem) +(Zero [32] destptr mem) && config.useSSE -> + (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) + (MOVOstore destptr (MOVOconst [0]) mem)) +(Zero [48] destptr mem) && config.useSSE -> + (MOVOstore (OffPtr destptr [32]) (MOVOconst [0]) + (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) + (MOVOstore destptr (MOVOconst [0]) mem))) +(Zero [64] destptr mem) && config.useSSE -> + (MOVOstore (OffPtr destptr [48]) (MOVOconst [0]) + (MOVOstore (OffPtr destptr [32]) (MOVOconst [0]) + (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) + (MOVOstore destptr (MOVOconst [0]) mem)))) + // Medium zeroing uses a duff device. (Zero [s] destptr mem) - && s <= 1024 && s%8 == 0 && s%16 != 0 - && !config.noDuffDevice -> - (Zero [s-8] (OffPtr [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) -(Zero [s] destptr mem) - && s <= 1024 && s%16 == 0 && !config.noDuffDevice -> + && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice -> (DUFFZERO [s] destptr (MOVOconst [0]) mem) // Large zeroing uses REP STOSQ. (Zero [s] destptr mem) - && (s > 1024 || (config.noDuffDevice && s > 32)) + && (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 -> (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) @@ -448,9 +487,22 @@ (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) (GetG mem) -> (LoweredGetG mem) (GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerPC) -> (LoweredGetCallerPC) +(GetCallerSP) -> (LoweredGetCallerSP) (Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base) (Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base) +(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 -> (SETLmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 -> (SETLEmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 -> (SETGmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 -> (SETGEmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 -> (SETEQmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 -> (SETNEmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 -> (SETBmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 -> (SETBEmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 -> (SETAmem [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 -> (SETAEmem [off] {sym} ptr x mem) + // block rewrites (If (SETL cmp) yes no) -> (LT cmp yes no) (If (SETLE cmp) yes no) -> (LE cmp yes no) @@ -504,6 +556,9 @@ (AtomicAnd8 ptr val mem) -> (ANDBlock ptr val mem) (AtomicOr8 ptr val mem) -> (ORBlock ptr val mem) +// Write barrier. +(WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem) + // *************************** // Above: lowering rules // Below: optimizations @@ -545,6 +600,17 @@ (SETEQ (TESTQconst [c] x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x)) (SETNE (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETB (BTQconst [log2(c)] x)) (SETEQ (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAE (BTQconst [log2(c)] x)) +// SET..mem variant +(SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl -> (SETBmem [off] {sym} ptr (BTL x y) mem) +(SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl -> (SETAEmem [off] {sym} ptr (BTL x y) mem) +(SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl -> (SETBmem [off] {sym} ptr (BTQ x y) mem) +(SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQ x y) mem) +(SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) +(SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 32 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) +(SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) +(SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) +(SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) +(SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(c) && log2(c) < 64 && !config.nacl -> (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) // Fold boolean negation into SETcc. (XORLconst [1] (SETNE x)) -> (SETEQ x) @@ -874,7 +940,6 @@ (MULQconst [41] x) -> (LEAQ8 x (LEAQ4 x x)) (MULQconst [73] x) -> (LEAQ8 x (LEAQ8 x x)) -(MULQconst [c] x) && isPowerOfTwo(c) -> (SHLQconst [log2(c)] x) (MULQconst [c] x) && isPowerOfTwo(c+1) && c >= 15 -> (SUBQ (SHLQconst [log2(c+1)] x) x) (MULQconst [c] x) && isPowerOfTwo(c-1) && c >= 17 -> (LEAQ1 (SHLQconst [log2(c-1)] x) x) (MULQconst [c] x) && isPowerOfTwo(c-2) && c >= 34 -> (LEAQ2 (SHLQconst [log2(c-2)] x) x) @@ -934,6 +999,17 @@ (SETEQ (InvertFlags x)) -> (SETEQ x) (SETNE (InvertFlags x)) -> (SETNE x) +(SETLmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGmem [off] {sym} ptr x mem) +(SETGmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLmem [off] {sym} ptr x mem) +(SETBmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAmem [off] {sym} ptr x mem) +(SETAmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBmem [off] {sym} ptr x mem) +(SETLEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETGEmem [off] {sym} ptr x mem) +(SETGEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETLEmem [off] {sym} ptr x mem) +(SETBEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETAEmem [off] {sym} ptr x mem) +(SETAEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETBEmem [off] {sym} ptr x mem) +(SETEQmem [off] {sym} ptr (InvertFlags x) mem) -> (SETEQmem [off] {sym} ptr x mem) +(SETNEmem [off] {sym} ptr (InvertFlags x) mem) -> (SETNEmem [off] {sym} ptr x mem) + // sign extended loads // Note: The combined instruction must end up in the same block // as the original load. If not, we end up making a value with @@ -961,6 +1037,8 @@ (MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload [off] {sym} ptr mem) (MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVLload [off] {sym} ptr mem) +(MOVLQZX x) && zeroUpper32Bits(x,3) -> x + (MOVBQZX x:(MOVBloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx1 [off] {sym} ptr idx mem) (MOVWQZX x:(MOVWloadidx1 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx1 [off] {sym} ptr idx mem) (MOVWQZX x:(MOVWloadidx2 [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx2 [off] {sym} ptr idx mem) @@ -1092,6 +1170,8 @@ (MOVLloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOVLload [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVLloadidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOVQload [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVQloadidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOVQload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> @@ -1115,6 +1195,8 @@ (MOVLstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) (MOVLstore [off1] {sym1} (LEAQ4 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVLstoreidx4 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) +(MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) (MOVQstore [off1] {sym1} (LEAQ1 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVQstoreidx1 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) (MOVQstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> @@ -1164,11 +1246,13 @@ // combine SHLQ into indexed loads and stores (MOVWloadidx1 [c] {sym} ptr (SHLQconst [1] idx) mem) -> (MOVWloadidx2 [c] {sym} ptr idx mem) (MOVLloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVLloadidx4 [c] {sym} ptr idx mem) +(MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVLloadidx8 [c] {sym} ptr idx mem) (MOVQloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQloadidx8 [c] {sym} ptr idx mem) (MOVSSloadidx1 [c] {sym} ptr (SHLQconst [2] idx) mem) -> (MOVSSloadidx4 [c] {sym} ptr idx mem) (MOVSDloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVSDloadidx8 [c] {sym} ptr idx mem) (MOVWstoreidx1 [c] {sym} ptr (SHLQconst [1] idx) val mem) -> (MOVWstoreidx2 [c] {sym} ptr idx val mem) (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVLstoreidx4 [c] {sym} ptr idx val mem) +(MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVLstoreidx8 [c] {sym} ptr idx val mem) (MOVQstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVQstoreidx8 [c] {sym} ptr idx val mem) (MOVSSstoreidx1 [c] {sym} ptr (SHLQconst [2] idx) val mem) -> (MOVSSstoreidx4 [c] {sym} ptr idx val mem) (MOVSDstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) -> (MOVSDstoreidx8 [c] {sym} ptr idx val mem) @@ -1182,6 +1266,7 @@ (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem) (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem) +(MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx8 [c+d] {sym} ptr idx mem) (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem) (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) @@ -1194,6 +1279,7 @@ (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) +(MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) @@ -1206,6 +1292,7 @@ (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) +(MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) @@ -1218,6 +1305,7 @@ (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) +(MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) @@ -1465,6 +1553,66 @@ (SETAE (FlagGT_ULT)) -> (MOVLconst [0]) (SETAE (FlagGT_UGT)) -> (MOVLconst [1]) +(SETEQmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETNEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +(SETLmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETLEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETGmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +(SETGEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +(SETBmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETBEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETAmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +(SETAEmem [off] {sym} ptr x:(FlagEQ) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem) -> (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + // Remove redundant *const ops (ADDQconst [0] x) -> x (ADDLconst [c] x) && int32(c)==0 -> x @@ -2199,6 +2347,14 @@ && ValAndOff(a).Off() + 4 == ValAndOff(c).Off() && clobber(x) -> (MOVQstore [ValAndOff(a).Off()] {s} p (MOVQconst [ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32]) mem) +(MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) + && config.useSSE + && x.Uses == 1 + && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() + && ValAndOff(c).Val() == 0 + && ValAndOff(c2).Val() == 0 + && clobber(x) + -> (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) (MOVBstoreconstidx1 [c] {s} p i x:(MOVBstoreconstidx1 [a] {s} p i mem)) && x.Uses == 1 @@ -2295,6 +2451,58 @@ && clobber(x) -> (MOVQstoreidx1 [i-4] {s} p (SHLQconst [2] idx) w0 mem) +(MOVBstore [i] {s} p + x1:(MOVBload [j] {s2} p2 mem) + mem2:(MOVBstore [i-1] {s} p + x2:(MOVBload [j-1] {s2} p2 mem) mem)) + && x1.Uses == 1 + && x2.Uses == 1 + && mem2.Uses == 1 + && clobber(x1) + && clobber(x2) + && clobber(mem2) + -> (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) + +(MOVWstore [i] {s} p + x1:(MOVWload [j] {s2} p2 mem) + mem2:(MOVWstore [i-2] {s} p + x2:(MOVWload [j-2] {s2} p2 mem) mem)) + && x1.Uses == 1 + && x2.Uses == 1 + && mem2.Uses == 1 + && clobber(x1) + && clobber(x2) + && clobber(mem2) + -> (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) + +(MOVLstore [i] {s} p + x1:(MOVLload [j] {s2} p2 mem) + mem2:(MOVLstore [i-4] {s} p + x2:(MOVLload [j-4] {s2} p2 mem) mem)) + && x1.Uses == 1 + && x2.Uses == 1 + && mem2.Uses == 1 + && clobber(x1) + && clobber(x2) + && clobber(mem2) + -> (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) + +// This is somewhat tricky. There may be pointers in SSE registers due to rule below. +// However those register shouldn't live across GC safepoint. +(MOVQstore [i] {s} p + x1:(MOVQload [j] {s2} p2 mem) + mem2:(MOVQstore [i-8] {s} p + x2:(MOVQload [j-8] {s2} p2 mem) mem)) + && x1.Uses == 1 + && x2.Uses == 1 + && mem2.Uses == 1 + && config.useSSE + && clobber(x1) + && clobber(x2) + && clobber(mem2) + -> (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem) + + // amd64p32 rules // same as the rules above, but with 32 instead of 64 bit pointer arithmetic. // LEAQ,ADDQ -> LEAL,ADDL @@ -2419,3 +2627,54 @@ (MOVWQZX (MOVWQZX x)) -> (MOVWQZX x) (MOVWQZX (MOVBQZX x)) -> (MOVBQZX x) (MOVBQZX (MOVBQZX x)) -> (MOVBQZX x) + +(MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) -> + (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem) +(MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) -> + (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem) + +// float <-> int register moves, with no conversion. +// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}. +(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) -> (MOVQf2i val) +(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) -> (MOVLf2i val) +(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) -> (MOVQi2f val) +(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) -> (MOVLi2f val) + +// Other load-like ops. +(ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ADDQ x (MOVQf2i y)) +(ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ADDL x (MOVLf2i y)) +(SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (SUBQ x (MOVQf2i y)) +(SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (SUBL x (MOVLf2i y)) +(ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (ANDQ x (MOVQf2i y)) +(ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (ANDL x (MOVLf2i y)) +( ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> ( ORQ x (MOVQf2i y)) +( ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> ( ORL x (MOVLf2i y)) +(XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) -> (XORQ x (MOVQf2i y)) +(XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) -> (XORL x (MOVLf2i y)) + +(ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) -> + (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) +(ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) -> + (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) + +(ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (ADDSD x (MOVQi2f y)) +(ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (ADDSS x (MOVLi2f y)) +(SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (SUBSD x (MOVQi2f y)) +(SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (SUBSS x (MOVLi2f y)) +(MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) -> (MULSD x (MOVQi2f y)) +(MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) -> (MULSS x (MOVLi2f y)) + +// Redirect stores to use the other register set. +(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) -> (MOVSDstore [off] {sym} ptr val mem) +(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) -> (MOVSSstore [off] {sym} ptr val mem) +(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) -> (MOVQstore [off] {sym} ptr val mem) +(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) -> (MOVLstore [off] {sym} ptr val mem) + +// Load args directly into the register class where it will be used. +// We do this by just modifying the type of the Arg. +(MOVQf2i (Arg [off] {sym})) -> @b.Func.Entry (Arg [off] {sym}) +(MOVLf2i (Arg [off] {sym})) -> @b.Func.Entry (Arg [off] {sym}) +(MOVQi2f (Arg [off] {sym})) -> @b.Func.Entry (Arg [off] {sym}) +(MOVLi2f (Arg [off] {sym})) -> @b.Func.Entry (Arg [off] {sym}) diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index c984cbfb127..0c3b2efa305 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -180,18 +180,20 @@ func init() { {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by 8i store - {name: "ADDSDmem", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "ADDSSmem", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "ADDSDmem", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "SUBSSmem", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "SUBSDmem", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "MULSSmem", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem {name: "MULSDmem", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem // binary ops - {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1 - {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1 - {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint - {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint + {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1 + {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1 + {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint + {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint + {name: "ADDQconstmem", argLength: 2, reg: gpstoreconst, asm: "ADDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + {name: "ADDLconstmem", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 @@ -339,6 +341,10 @@ func init() { {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0) + // ROUNDSD instruction isn't guaranteed to be on the target platform (it is SSE4.1) + // Any use must be preceded by a successful check of runtime.support_sse41. + {name: "ROUNDSD", argLength: 1, reg: fp11, aux: "Int8", asm: "ROUNDSD"}, // rounds arg0 depending on auxint, 1 means math.Floor, 2 Ceil, 3 Trunc + {name: "SBBQcarrymask", argLength: 1, reg: flagsgp, asm: "SBBQ"}, // (int64)(-1) if carry is set, 0 if carry is clear. {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear. // Note: SBBW and SBBB are subsumed by SBBL @@ -353,6 +359,17 @@ func init() { {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0 {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0 {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0 + // Variants that store result to memory + {name: "SETEQmem", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETNEmem", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETLmem", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETLEmem", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETGmem", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETGEmem", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETBmem", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETBEmem", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETAmem", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETAEmem", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem // Need different opcodes for floating point conditions because // any comparison involving a NaN is always FALSE and thus // the patterns for inverting conditions cannot be used. @@ -385,6 +402,13 @@ func init() { {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32 {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64 + // Move values between int and float registers, with no conversion. + // TODO: should we have generic versions of these? + {name: "MOVQi2f", argLength: 1, reg: gpfp, typ: "Float64"}, // move 64 bits from int to float reg + {name: "MOVQf2i", argLength: 1, reg: fpgp, typ: "UInt64"}, // move 64 bits from float to int reg + {name: "MOVLi2f", argLength: 1, reg: gpfp, typ: "Float32"}, // move 32 bits from int to float reg + {name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"}, // move 32 bits from float to int reg + {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation. {name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux @@ -417,6 +441,7 @@ func init() { {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem + {name: "MOVLloadidx8", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+8*arg1+auxint+aux. arg2=mem {name: "MOVQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVQ", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem {name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem // TODO: sign-extending indexed loads @@ -426,6 +451,7 @@ func init() { {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem + {name: "MOVLstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem {name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem {name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem // TODO: add size-mismatched indexed loads, like MOVBstoreidx4. @@ -459,7 +485,6 @@ func init() { inputs: []regMask{buildReg("DI"), buildReg("X0")}, clobbers: buildReg("DI"), }, - clobberFlags: true, faultOnNilArg0: true, }, {name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", aux: "Int128", rematerializeable: true}, @@ -530,16 +555,27 @@ func init() { // and sorts it to the very beginning of the block to prevent other // use of DX (the closure pointer) {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}}, + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, + // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier + // It saves all GP registers if necessary, but may clobber others. + {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), ax}, clobbers: callerSave ^ gp}, clobberFlags: true, aux: "Sym", symEffect: "None"}, + // MOVQconvert converts between pointers and integers. // We have a special op for this so as to not confuse GC // (particularly stack maps). It takes a memory arg so it // gets correctly ordered with respect to GC safepoints. // arg0=ptr/int arg1=mem, output=int/ptr - {name: "MOVQconvert", argLength: 2, reg: gp11, asm: "MOVQ"}, - {name: "MOVLconvert", argLength: 2, reg: gp11, asm: "MOVL"}, // amd64p32 equivalent + {name: "MOVQconvert", argLength: 2, reg: gp11, asm: "MOVQ", resultInArg0: true}, + {name: "MOVLconvert", argLength: 2, reg: gp11, asm: "MOVL", resultInArg0: true}, // amd64p32 equivalent // Constant flag values. For any comparison, there are 5 possible // outcomes: the three from the signed total order (<,==,>) and the diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index d66e50f17a4..c3baa5133fd 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -398,6 +398,7 @@ // pseudo-ops (GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerSP) -> (LoweredGetCallerSP) (Convert x mem) -> (MOVWconvert x mem) // Absorb pseudo-ops into blocks. @@ -499,6 +500,10 @@ (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) -> x +(MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) -> (MOVBUreg x) +(MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) -> (MOVBreg x) +(MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) -> (MOVHUreg x) +(MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) -> (MOVHreg x) // fold constant into arithmatic ops (ADD x (MOVWconst [c])) -> (ADDconst [c] x) @@ -526,6 +531,9 @@ (CMP x (MOVWconst [c])) -> (CMPconst [c] x) (CMP (MOVWconst [c]) x) -> (InvertFlags (CMPconst [c] x)) +(CMN x (MOVWconst [c])) -> (CMNconst [c] x) +(TST x (MOVWconst [c])) -> (TSTconst [c] x) +(TEQ x (MOVWconst [c])) -> (TEQconst [c] x) // don't extend after proper load // MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type. @@ -598,6 +606,28 @@ (MULA (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (ADD (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) (MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (ADD (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) +(MULS x (MOVWconst [c]) a) && int32(c) == -1 -> (ADD a x) +(MULS _ (MOVWconst [0]) a) -> a +(MULS x (MOVWconst [1]) a) -> (RSB x a) +(MULS x (MOVWconst [c]) a) && isPowerOfTwo(c) -> (RSB (SLLconst [log2(c)] x) a) +(MULS x (MOVWconst [c]) a) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (RSB (ADDshiftLL x x [log2(c-1)]) a) +(MULS x (MOVWconst [c]) a) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSB (RSBshiftLL x x [log2(c+1)]) a) +(MULS x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (RSB (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) a) +(MULS x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (RSB (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) a) +(MULS x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (RSB (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) +(MULS x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (RSB (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) + +(MULS (MOVWconst [c]) x a) && int32(c) == -1 -> (ADD a x) +(MULS (MOVWconst [0]) _ a) -> a +(MULS (MOVWconst [1]) x a) -> (RSB x a) +(MULS (MOVWconst [c]) x a) && isPowerOfTwo(c) -> (RSB (SLLconst [log2(c)] x) a) +(MULS (MOVWconst [c]) x a) && isPowerOfTwo(c-1) && int32(c) >= 3 -> (RSB (ADDshiftLL x x [log2(c-1)]) a) +(MULS (MOVWconst [c]) x a) && isPowerOfTwo(c+1) && int32(c) >= 7 -> (RSB (RSBshiftLL x x [log2(c+1)]) a) +(MULS (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) -> (RSB (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) a) +(MULS (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) -> (RSB (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) a) +(MULS (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) -> (RSB (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) +(MULS (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) -> (RSB (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) + // div by constant (Select0 (CALLudiv x (MOVWconst [1]))) -> x (Select1 (CALLudiv _ (MOVWconst [1]))) -> (MOVWconst [0]) @@ -610,6 +640,17 @@ (CMPconst (MOVWconst [x]) [y]) && int32(x)uint32(y) -> (FlagLT_UGT) (CMPconst (MOVWconst [x]) [y]) && int32(x)>int32(y) && uint32(x) (FlagGT_ULT) (CMPconst (MOVWconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) +(CMNconst (MOVWconst [x]) [y]) && int32(x)==int32(-y) -> (FlagEQ) +(CMNconst (MOVWconst [x]) [y]) && int32(x) (FlagLT_ULT) +(CMNconst (MOVWconst [x]) [y]) && int32(x)uint32(-y) -> (FlagLT_UGT) +(CMNconst (MOVWconst [x]) [y]) && int32(x)>int32(-y) && uint32(x) (FlagGT_ULT) +(CMNconst (MOVWconst [x]) [y]) && int32(x)>int32(-y) && uint32(x)>uint32(-y) -> (FlagGT_UGT) +(TSTconst (MOVWconst [x]) [y]) && int32(x&y)==0 -> (FlagEQ) +(TSTconst (MOVWconst [x]) [y]) && int32(x&y)<0 -> (FlagLT_UGT) +(TSTconst (MOVWconst [x]) [y]) && int32(x&y)>0 -> (FlagGT_UGT) +(TEQconst (MOVWconst [x]) [y]) && int32(x^y)==0 -> (FlagEQ) +(TEQconst (MOVWconst [x]) [y]) && int32(x^y)<0 -> (FlagLT_UGT) +(TEQconst (MOVWconst [x]) [y]) && int32(x^y)>0 -> (FlagGT_UGT) // other known comparisons (CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT_ULT) @@ -799,8 +840,8 @@ // generic constant folding (ADDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) -> (SUBconst [int64(int32(-c))] x) (SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) -> (ADDconst [int64(int32(-c))] x) -(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) -> (BICconst [int64(^uint32(c))] x) -(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) -> (ANDconst [int64(^uint32(c))] x) +(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) -> (BICconst [int64(int32(^uint32(c)))] x) +(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) -> (ANDconst [int64(int32(^uint32(c)))] x) (ADDconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c+d))]) (ADDconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(c+d))] x) (ADDconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x) @@ -824,6 +865,7 @@ (SRAconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)>>uint64(c))]) (MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c*d))]) (MULA (MOVWconst [c]) (MOVWconst [d]) a) -> (ADDconst [int64(int32(c*d))] a) +(MULS (MOVWconst [c]) (MOVWconst [d]) a) -> (SUBconst [int64(int32(c*d))] a) (Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(uint32(c)/uint32(d))]) (Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) -> (MOVWconst [int64(uint32(c)%uint32(d))]) (ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d]) @@ -840,6 +882,9 @@ (MOVHreg (MOVWconst [c])) -> (MOVWconst [int64(int16(c))]) (MOVHUreg (MOVWconst [c])) -> (MOVWconst [int64(uint16(c))]) (MOVWreg (MOVWconst [c])) -> (MOVWconst [c]) +// BFX: Width = c >> 8, LSB = c & 0xff, result = d << (32 - Width - LSB) >> (32 - Width) +(BFX [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) +(BFXU [c] (MOVWconst [d])) -> (MOVWconst [int64(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) // absorb shifts into ops (ADD x (SLLconst [c] y)) -> (ADDshiftLL x y [c]) @@ -958,6 +1003,24 @@ (CMP (SRL y z) x) -> (InvertFlags (CMPshiftRLreg x y z)) (CMP x (SRA y z)) -> (CMPshiftRAreg x y z) (CMP (SRA y z) x) -> (InvertFlags (CMPshiftRAreg x y z)) +(TST x (SLLconst [c] y)) -> (TSTshiftLL x y [c]) +(TST x (SRLconst [c] y)) -> (TSTshiftRL x y [c]) +(TST x (SRAconst [c] y)) -> (TSTshiftRA x y [c]) +(TST x (SLL y z)) -> (TSTshiftLLreg x y z) +(TST x (SRL y z)) -> (TSTshiftRLreg x y z) +(TST x (SRA y z)) -> (TSTshiftRAreg x y z) +(TEQ x (SLLconst [c] y)) -> (TEQshiftLL x y [c]) +(TEQ x (SRLconst [c] y)) -> (TEQshiftRL x y [c]) +(TEQ x (SRAconst [c] y)) -> (TEQshiftRA x y [c]) +(TEQ x (SLL y z)) -> (TEQshiftLLreg x y z) +(TEQ x (SRL y z)) -> (TEQshiftRLreg x y z) +(TEQ x (SRA y z)) -> (TEQshiftRAreg x y z) +(CMN x (SLLconst [c] y)) -> (CMNshiftLL x y [c]) +(CMN x (SRLconst [c] y)) -> (CMNshiftRL x y [c]) +(CMN x (SRAconst [c] y)) -> (CMNshiftRA x y [c]) +(CMN x (SLL y z)) -> (CMNshiftLLreg x y z) +(CMN x (SRL y z)) -> (CMNshiftRLreg x y z) +(CMN x (SRA y z)) -> (CMNshiftRAreg x y z) // prefer *const ops to *shift ops (ADDshiftLL (MOVWconst [c]) x [d]) -> (ADDconst [c] (SLLconst x [d])) @@ -1000,6 +1063,15 @@ (CMPshiftLL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SLLconst x [d]))) (CMPshiftRL (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRLconst x [d]))) (CMPshiftRA (MOVWconst [c]) x [d]) -> (InvertFlags (CMPconst [c] (SRAconst x [d]))) +(TSTshiftLL (MOVWconst [c]) x [d]) -> (TSTconst [c] (SLLconst x [d])) +(TSTshiftRL (MOVWconst [c]) x [d]) -> (TSTconst [c] (SRLconst x [d])) +(TSTshiftRA (MOVWconst [c]) x [d]) -> (TSTconst [c] (SRAconst x [d])) +(TEQshiftLL (MOVWconst [c]) x [d]) -> (TEQconst [c] (SLLconst x [d])) +(TEQshiftRL (MOVWconst [c]) x [d]) -> (TEQconst [c] (SRLconst x [d])) +(TEQshiftRA (MOVWconst [c]) x [d]) -> (TEQconst [c] (SRAconst x [d])) +(CMNshiftLL (MOVWconst [c]) x [d]) -> (CMNconst [c] (SLLconst x [d])) +(CMNshiftRL (MOVWconst [c]) x [d]) -> (CMNconst [c] (SRLconst x [d])) +(CMNshiftRA (MOVWconst [c]) x [d]) -> (CMNconst [c] (SRAconst x [d])) (ADDshiftLLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SLL x y)) (ADDshiftRLreg (MOVWconst [c]) x y) -> (ADDconst [c] (SRL x y)) @@ -1040,54 +1112,72 @@ (CMPshiftLLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SLL x y))) (CMPshiftRLreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRL x y))) (CMPshiftRAreg (MOVWconst [c]) x y) -> (InvertFlags (CMPconst [c] (SRA x y))) +(TSTshiftLLreg (MOVWconst [c]) x y) -> (TSTconst [c] (SLL x y)) +(TSTshiftRLreg (MOVWconst [c]) x y) -> (TSTconst [c] (SRL x y)) +(TSTshiftRAreg (MOVWconst [c]) x y) -> (TSTconst [c] (SRA x y)) +(TEQshiftLLreg (MOVWconst [c]) x y) -> (TEQconst [c] (SLL x y)) +(TEQshiftRLreg (MOVWconst [c]) x y) -> (TEQconst [c] (SRL x y)) +(TEQshiftRAreg (MOVWconst [c]) x y) -> (TEQconst [c] (SRA x y)) +(CMNshiftLLreg (MOVWconst [c]) x y) -> (CMNconst [c] (SLL x y)) +(CMNshiftRLreg (MOVWconst [c]) x y) -> (CMNconst [c] (SRL x y)) +(CMNshiftRAreg (MOVWconst [c]) x y) -> (CMNconst [c] (SRA x y)) // constant folding in *shift ops -(ADDshiftLL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(uint32(c)< (ADDconst x [int64(uint32(c)>>uint64(d))]) +(ADDshiftLL x (MOVWconst [c]) [d]) -> (ADDconst x [int64(int32(uint32(c)< (ADDconst x [int64(int32(uint32(c)>>uint64(d)))]) (ADDshiftRA x (MOVWconst [c]) [d]) -> (ADDconst x [int64(int32(c)>>uint64(d))]) -(ADCshiftLL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(uint32(c)< (ADCconst x [int64(uint32(c)>>uint64(d))] flags) +(ADCshiftLL x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(int32(uint32(c)< (ADCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) (ADCshiftRA x (MOVWconst [c]) [d] flags) -> (ADCconst x [int64(int32(c)>>uint64(d))] flags) -(ADDSshiftLL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(uint32(c)< (ADDSconst x [int64(uint32(c)>>uint64(d))]) +(ADDSshiftLL x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(int32(uint32(c)< (ADDSconst x [int64(int32(uint32(c)>>uint64(d)))]) (ADDSshiftRA x (MOVWconst [c]) [d]) -> (ADDSconst x [int64(int32(c)>>uint64(d))]) -(SUBshiftLL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(uint32(c)< (SUBconst x [int64(uint32(c)>>uint64(d))]) +(SUBshiftLL x (MOVWconst [c]) [d]) -> (SUBconst x [int64(int32(uint32(c)< (SUBconst x [int64(int32(uint32(c)>>uint64(d)))]) (SUBshiftRA x (MOVWconst [c]) [d]) -> (SUBconst x [int64(int32(c)>>uint64(d))]) -(SBCshiftLL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(uint32(c)< (SBCconst x [int64(uint32(c)>>uint64(d))] flags) +(SBCshiftLL x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(int32(uint32(c)< (SBCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) (SBCshiftRA x (MOVWconst [c]) [d] flags) -> (SBCconst x [int64(int32(c)>>uint64(d))] flags) -(SUBSshiftLL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(uint32(c)< (SUBSconst x [int64(uint32(c)>>uint64(d))]) +(SUBSshiftLL x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(int32(uint32(c)< (SUBSconst x [int64(int32(uint32(c)>>uint64(d)))]) (SUBSshiftRA x (MOVWconst [c]) [d]) -> (SUBSconst x [int64(int32(c)>>uint64(d))]) -(RSBshiftLL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(uint32(c)< (RSBconst x [int64(uint32(c)>>uint64(d))]) +(RSBshiftLL x (MOVWconst [c]) [d]) -> (RSBconst x [int64(int32(uint32(c)< (RSBconst x [int64(int32(uint32(c)>>uint64(d)))]) (RSBshiftRA x (MOVWconst [c]) [d]) -> (RSBconst x [int64(int32(c)>>uint64(d))]) -(RSCshiftLL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(uint32(c)< (RSCconst x [int64(uint32(c)>>uint64(d))] flags) +(RSCshiftLL x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(int32(uint32(c)< (RSCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) (RSCshiftRA x (MOVWconst [c]) [d] flags) -> (RSCconst x [int64(int32(c)>>uint64(d))] flags) -(RSBSshiftLL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(uint32(c)< (RSBSconst x [int64(uint32(c)>>uint64(d))]) +(RSBSshiftLL x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(int32(uint32(c)< (RSBSconst x [int64(int32(uint32(c)>>uint64(d)))]) (RSBSshiftRA x (MOVWconst [c]) [d]) -> (RSBSconst x [int64(int32(c)>>uint64(d))]) -(ANDshiftLL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(uint32(c)< (ANDconst x [int64(uint32(c)>>uint64(d))]) +(ANDshiftLL x (MOVWconst [c]) [d]) -> (ANDconst x [int64(int32(uint32(c)< (ANDconst x [int64(int32(uint32(c)>>uint64(d)))]) (ANDshiftRA x (MOVWconst [c]) [d]) -> (ANDconst x [int64(int32(c)>>uint64(d))]) -(ORshiftLL x (MOVWconst [c]) [d]) -> (ORconst x [int64(uint32(c)< (ORconst x [int64(uint32(c)>>uint64(d))]) +(ORshiftLL x (MOVWconst [c]) [d]) -> (ORconst x [int64(int32(uint32(c)< (ORconst x [int64(int32(uint32(c)>>uint64(d)))]) (ORshiftRA x (MOVWconst [c]) [d]) -> (ORconst x [int64(int32(c)>>uint64(d))]) -(XORshiftLL x (MOVWconst [c]) [d]) -> (XORconst x [int64(uint32(c)< (XORconst x [int64(uint32(c)>>uint64(d))]) +(XORshiftLL x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(uint32(c)< (XORconst x [int64(int32(uint32(c)>>uint64(d)))]) (XORshiftRA x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(c)>>uint64(d))]) -(XORshiftRR x (MOVWconst [c]) [d]) -> (XORconst x [int64(uint32(c)>>uint64(d)|uint32(c)< (BICconst x [int64(uint32(c)< (BICconst x [int64(uint32(c)>>uint64(d))]) +(XORshiftRR x (MOVWconst [c]) [d]) -> (XORconst x [int64(int32(uint32(c)>>uint64(d)|uint32(c)< (BICconst x [int64(int32(uint32(c)< (BICconst x [int64(int32(uint32(c)>>uint64(d)))]) (BICshiftRA x (MOVWconst [c]) [d]) -> (BICconst x [int64(int32(c)>>uint64(d))]) (MVNshiftLL (MOVWconst [c]) [d]) -> (MOVWconst [^int64(uint32(c)< (MOVWconst [^int64(uint32(c)>>uint64(d))]) (MVNshiftRA (MOVWconst [c]) [d]) -> (MOVWconst [^int64(int32(c)>>uint64(d))]) -(CMPshiftLL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(uint32(c)< (CMPconst x [int64(uint32(c)>>uint64(d))]) +(CMPshiftLL x (MOVWconst [c]) [d]) -> (CMPconst x [int64(int32(uint32(c)< (CMPconst x [int64(int32(uint32(c)>>uint64(d)))]) (CMPshiftRA x (MOVWconst [c]) [d]) -> (CMPconst x [int64(int32(c)>>uint64(d))]) +(TSTshiftLL x (MOVWconst [c]) [d]) -> (TSTconst x [int64(int32(uint32(c)< (TSTconst x [int64(int32(uint32(c)>>uint64(d)))]) +(TSTshiftRA x (MOVWconst [c]) [d]) -> (TSTconst x [int64(int32(c)>>uint64(d))]) +(TEQshiftLL x (MOVWconst [c]) [d]) -> (TEQconst x [int64(int32(uint32(c)< (TEQconst x [int64(int32(uint32(c)>>uint64(d)))]) +(TEQshiftRA x (MOVWconst [c]) [d]) -> (TEQconst x [int64(int32(c)>>uint64(d))]) +(CMNshiftLL x (MOVWconst [c]) [d]) -> (CMNconst x [int64(int32(uint32(c)< (CMNconst x [int64(int32(uint32(c)>>uint64(d)))]) +(CMNshiftRA x (MOVWconst [c]) [d]) -> (CMNconst x [int64(int32(c)>>uint64(d))]) (ADDshiftLLreg x y (MOVWconst [c])) -> (ADDshiftLL x y [c]) (ADDshiftRLreg x y (MOVWconst [c])) -> (ADDshiftRL x y [c]) @@ -1134,6 +1224,15 @@ (CMPshiftLLreg x y (MOVWconst [c])) -> (CMPshiftLL x y [c]) (CMPshiftRLreg x y (MOVWconst [c])) -> (CMPshiftRL x y [c]) (CMPshiftRAreg x y (MOVWconst [c])) -> (CMPshiftRA x y [c]) +(TSTshiftLLreg x y (MOVWconst [c])) -> (TSTshiftLL x y [c]) +(TSTshiftRLreg x y (MOVWconst [c])) -> (TSTshiftRL x y [c]) +(TSTshiftRAreg x y (MOVWconst [c])) -> (TSTshiftRA x y [c]) +(TEQshiftLLreg x y (MOVWconst [c])) -> (TEQshiftLL x y [c]) +(TEQshiftRLreg x y (MOVWconst [c])) -> (TEQshiftRL x y [c]) +(TEQshiftRAreg x y (MOVWconst [c])) -> (TEQshiftRA x y [c]) +(CMNshiftLLreg x y (MOVWconst [c])) -> (CMNshiftLL x y [c]) +(CMNshiftRLreg x y (MOVWconst [c])) -> (CMNshiftRL x y [c]) +(CMNshiftRAreg x y (MOVWconst [c])) -> (CMNshiftRA x y [c]) // Generate rotates (ADDshiftLL [c] (SRLconst x [32-c]) x) -> (SRRconst [32-c] x) @@ -1152,13 +1251,31 @@ (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftLL ptr idx [c] val mem) (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRL ptr idx [c] val mem) (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRA ptr idx [c] val mem) +(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBUloadidx ptr idx mem) +(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBloadidx ptr idx mem) +(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVBstoreidx ptr idx val mem) +(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHUloadidx ptr idx mem) +(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHloadidx ptr idx mem) +(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVHstoreidx ptr idx val mem) // constant folding in indexed loads and stores (MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem) (MOVWloadidx (MOVWconst [c]) ptr mem) -> (MOVWload [c] ptr mem) +(MOVBloadidx ptr (MOVWconst [c]) mem) -> (MOVBload [c] ptr mem) +(MOVBloadidx (MOVWconst [c]) ptr mem) -> (MOVBload [c] ptr mem) +(MOVBUloadidx ptr (MOVWconst [c]) mem) -> (MOVBUload [c] ptr mem) +(MOVBUloadidx (MOVWconst [c]) ptr mem) -> (MOVBUload [c] ptr mem) +(MOVHUloadidx ptr (MOVWconst [c]) mem) -> (MOVHUload [c] ptr mem) +(MOVHUloadidx (MOVWconst [c]) ptr mem) -> (MOVHUload [c] ptr mem) +(MOVHloadidx ptr (MOVWconst [c]) mem) -> (MOVHload [c] ptr mem) +(MOVHloadidx (MOVWconst [c]) ptr mem) -> (MOVHload [c] ptr mem) (MOVWstoreidx ptr (MOVWconst [c]) val mem) -> (MOVWstore [c] ptr val mem) (MOVWstoreidx (MOVWconst [c]) ptr val mem) -> (MOVWstore [c] ptr val mem) +(MOVBstoreidx ptr (MOVWconst [c]) val mem) -> (MOVBstore [c] ptr val mem) +(MOVBstoreidx (MOVWconst [c]) ptr val mem) -> (MOVBstore [c] ptr val mem) +(MOVHstoreidx ptr (MOVWconst [c]) val mem) -> (MOVHstore [c] ptr val mem) +(MOVHstoreidx (MOVWconst [c]) ptr val mem) -> (MOVHstore [c] ptr val mem) (MOVWloadidx ptr (SLLconst idx [c]) mem) -> (MOVWloadshiftLL ptr idx [c] mem) (MOVWloadidx (SLLconst idx [c]) ptr mem) -> (MOVWloadshiftLL ptr idx [c] mem) @@ -1193,6 +1310,25 @@ (BIC x x) -> (MOVWconst [0]) (ADD (MUL x y) a) -> (MULA x y a) +(SUB a (MUL x y)) && objabi.GOARM == 7 -> (MULS x y a) +(RSB (MUL x y) a) && objabi.GOARM == 7 -> (MULS x y a) + +(NEGF (MULF x y)) && objabi.GOARM >= 6 -> (NMULF x y) +(NEGD (MULD x y)) && objabi.GOARM >= 6 -> (NMULD x y) +(MULF (NEGF x) y) && objabi.GOARM >= 6 -> (NMULF x y) +(MULD (NEGD x) y) && objabi.GOARM >= 6 -> (NMULD x y) +(NMULF (NEGF x) y) -> (MULF x y) +(NMULD (NEGD x) y) -> (MULD x y) + +// the result will overwrite the addend, since they are in the same register +(ADDF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULAF a x y) +(ADDF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULSF a x y) +(ADDD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULAD a x y) +(ADDD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULSD a x y) +(SUBF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULSF a x y) +(SUBF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULAF a x y) +(SUBD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULSD a x y) +(SUBD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 -> (MULAD a x y) (AND x (MVN y)) -> (BIC x y) @@ -1222,3 +1358,75 @@ // floating point optimizations (CMPF x (MOVFconst [0])) -> (CMPF0 x) (CMPD x (MOVDconst [0])) -> (CMPD0 x) + +// bit extraction +(SRAconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 -> (BFX [(d-c)|(32-d)<<8] x) +(SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 -> (BFXU [(d-c)|(32-d)<<8] x) + +// comparison simplification +(CMP x (RSBconst [0] y)) -> (CMN x y) +(CMN x (RSBconst [0] y)) -> (CMP x y) +(EQ (CMPconst [0] (SUB x y)) yes no) -> (EQ (CMP x y) yes no) +(EQ (CMPconst [0] (SUBconst [c] x)) yes no) -> (EQ (CMPconst [c] x) yes no) +(EQ (CMPconst [0] (SUBshiftLL x y [c])) yes no) -> (EQ (CMPshiftLL x y [c]) yes no) +(EQ (CMPconst [0] (SUBshiftRL x y [c])) yes no) -> (EQ (CMPshiftRL x y [c]) yes no) +(EQ (CMPconst [0] (SUBshiftRA x y [c])) yes no) -> (EQ (CMPshiftRA x y [c]) yes no) +(EQ (CMPconst [0] (SUBshiftLLreg x y z)) yes no) -> (EQ (CMPshiftLLreg x y z) yes no) +(EQ (CMPconst [0] (SUBshiftRLreg x y z)) yes no) -> (EQ (CMPshiftRLreg x y z) yes no) +(EQ (CMPconst [0] (SUBshiftRAreg x y z)) yes no) -> (EQ (CMPshiftRAreg x y z) yes no) +(NE (CMPconst [0] (SUB x y)) yes no) -> (NE (CMP x y) yes no) +(NE (CMPconst [0] (SUBconst [c] x)) yes no) -> (NE (CMPconst [c] x) yes no) +(NE (CMPconst [0] (SUBshiftLL x y [c])) yes no) -> (NE (CMPshiftLL x y [c]) yes no) +(NE (CMPconst [0] (SUBshiftRL x y [c])) yes no) -> (NE (CMPshiftRL x y [c]) yes no) +(NE (CMPconst [0] (SUBshiftRA x y [c])) yes no) -> (NE (CMPshiftRA x y [c]) yes no) +(NE (CMPconst [0] (SUBshiftLLreg x y z)) yes no) -> (NE (CMPshiftLLreg x y z) yes no) +(NE (CMPconst [0] (SUBshiftRLreg x y z)) yes no) -> (NE (CMPshiftRLreg x y z) yes no) +(NE (CMPconst [0] (SUBshiftRAreg x y z)) yes no) -> (NE (CMPshiftRAreg x y z) yes no) +(EQ (CMPconst [0] (ADD x y)) yes no) -> (EQ (CMN x y) yes no) +(EQ (CMPconst [0] (ADDconst [c] x)) yes no) -> (EQ (CMNconst [c] x) yes no) +(EQ (CMPconst [0] (ADDshiftLL x y [c])) yes no) -> (EQ (CMNshiftLL x y [c]) yes no) +(EQ (CMPconst [0] (ADDshiftRL x y [c])) yes no) -> (EQ (CMNshiftRL x y [c]) yes no) +(EQ (CMPconst [0] (ADDshiftRA x y [c])) yes no) -> (EQ (CMNshiftRA x y [c]) yes no) +(EQ (CMPconst [0] (ADDshiftLLreg x y z)) yes no) -> (EQ (CMNshiftLLreg x y z) yes no) +(EQ (CMPconst [0] (ADDshiftRLreg x y z)) yes no) -> (EQ (CMNshiftRLreg x y z) yes no) +(EQ (CMPconst [0] (ADDshiftRAreg x y z)) yes no) -> (EQ (CMNshiftRAreg x y z) yes no) +(NE (CMPconst [0] (ADD x y)) yes no) -> (NE (CMN x y) yes no) +(NE (CMPconst [0] (ADDconst [c] x)) yes no) -> (NE (CMNconst [c] x) yes no) +(NE (CMPconst [0] (ADDshiftLL x y [c])) yes no) -> (NE (CMNshiftLL x y [c]) yes no) +(NE (CMPconst [0] (ADDshiftRL x y [c])) yes no) -> (NE (CMNshiftRL x y [c]) yes no) +(NE (CMPconst [0] (ADDshiftRA x y [c])) yes no) -> (NE (CMNshiftRA x y [c]) yes no) +(NE (CMPconst [0] (ADDshiftLLreg x y z)) yes no) -> (NE (CMNshiftLLreg x y z) yes no) +(NE (CMPconst [0] (ADDshiftRLreg x y z)) yes no) -> (NE (CMNshiftRLreg x y z) yes no) +(NE (CMPconst [0] (ADDshiftRAreg x y z)) yes no) -> (NE (CMNshiftRAreg x y z) yes no) +(EQ (CMPconst [0] (AND x y)) yes no) -> (EQ (TST x y) yes no) +(EQ (CMPconst [0] (ANDconst [c] x)) yes no) -> (EQ (TSTconst [c] x) yes no) +(EQ (CMPconst [0] (ANDshiftLL x y [c])) yes no) -> (EQ (TSTshiftLL x y [c]) yes no) +(EQ (CMPconst [0] (ANDshiftRL x y [c])) yes no) -> (EQ (TSTshiftRL x y [c]) yes no) +(EQ (CMPconst [0] (ANDshiftRA x y [c])) yes no) -> (EQ (TSTshiftRA x y [c]) yes no) +(EQ (CMPconst [0] (ANDshiftLLreg x y z)) yes no) -> (EQ (TSTshiftLLreg x y z) yes no) +(EQ (CMPconst [0] (ANDshiftRLreg x y z)) yes no) -> (EQ (TSTshiftRLreg x y z) yes no) +(EQ (CMPconst [0] (ANDshiftRAreg x y z)) yes no) -> (EQ (TSTshiftRAreg x y z) yes no) +(NE (CMPconst [0] (AND x y)) yes no) -> (NE (TST x y) yes no) +(NE (CMPconst [0] (ANDconst [c] x)) yes no) -> (NE (TSTconst [c] x) yes no) +(NE (CMPconst [0] (ANDshiftLL x y [c])) yes no) -> (NE (TSTshiftLL x y [c]) yes no) +(NE (CMPconst [0] (ANDshiftRL x y [c])) yes no) -> (NE (TSTshiftRL x y [c]) yes no) +(NE (CMPconst [0] (ANDshiftRA x y [c])) yes no) -> (NE (TSTshiftRA x y [c]) yes no) +(NE (CMPconst [0] (ANDshiftLLreg x y z)) yes no) -> (NE (TSTshiftLLreg x y z) yes no) +(NE (CMPconst [0] (ANDshiftRLreg x y z)) yes no) -> (NE (TSTshiftRLreg x y z) yes no) +(NE (CMPconst [0] (ANDshiftRAreg x y z)) yes no) -> (NE (TSTshiftRAreg x y z) yes no) +(EQ (CMPconst [0] (XOR x y)) yes no) -> (EQ (TEQ x y) yes no) +(EQ (CMPconst [0] (XORconst [c] x)) yes no) -> (EQ (TEQconst [c] x) yes no) +(EQ (CMPconst [0] (XORshiftLL x y [c])) yes no) -> (EQ (TEQshiftLL x y [c]) yes no) +(EQ (CMPconst [0] (XORshiftRL x y [c])) yes no) -> (EQ (TEQshiftRL x y [c]) yes no) +(EQ (CMPconst [0] (XORshiftRA x y [c])) yes no) -> (EQ (TEQshiftRA x y [c]) yes no) +(EQ (CMPconst [0] (XORshiftLLreg x y z)) yes no) -> (EQ (TEQshiftLLreg x y z) yes no) +(EQ (CMPconst [0] (XORshiftRLreg x y z)) yes no) -> (EQ (TEQshiftRLreg x y z) yes no) +(EQ (CMPconst [0] (XORshiftRAreg x y z)) yes no) -> (EQ (TEQshiftRAreg x y z) yes no) +(NE (CMPconst [0] (XOR x y)) yes no) -> (NE (TEQ x y) yes no) +(NE (CMPconst [0] (XORconst [c] x)) yes no) -> (NE (TEQconst [c] x) yes no) +(NE (CMPconst [0] (XORshiftLL x y [c])) yes no) -> (NE (TEQshiftLL x y [c]) yes no) +(NE (CMPconst [0] (XORshiftRL x y [c])) yes no) -> (NE (TEQshiftRL x y [c]) yes no) +(NE (CMPconst [0] (XORshiftRA x y [c])) yes no) -> (NE (TEQshiftRA x y [c]) yes no) +(NE (CMPconst [0] (XORshiftLLreg x y z)) yes no) -> (NE (TEQshiftLLreg x y z) yes no) +(NE (CMPconst [0] (XORshiftRLreg x y z)) yes no) -> (NE (TEQshiftRLreg x y z) yes no) +(NE (CMPconst [0] (XORshiftRAreg x y z)) yes no) -> (NE (TEQshiftRAreg x y z) yes no) diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules index 4831ff2f3f7..558e60f6e22 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules @@ -103,98 +103,68 @@ (NeqB x y) -> (XOR x y) (Not x) -> (XOR (MOVDconst [1]) x) -// constant shifts -(Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SLLconst x [c]) -(Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRAconst x [c]) -(Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 -> (SRLconst x [c]) -(Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SLLconst x [c]) -(Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRAconst (SignExt32to64 x) [c]) -(Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 -> (SRLconst (ZeroExt32to64 x) [c]) -(Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SLLconst x [c]) -(Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRAconst (SignExt16to64 x) [c]) -(Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 -> (SRLconst (ZeroExt16to64 x) [c]) -(Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SLLconst x [c]) -(Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRAconst (SignExt8to64 x) [c]) -(Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 -> (SRLconst (ZeroExt8to64 x) [c]) - -// large constant shifts -(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0]) -(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 -> (MOVDconst [0]) -(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0]) -(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 -> (MOVDconst [0]) -(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0]) -(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 -> (MOVDconst [0]) -(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0]) -(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 -> (MOVDconst [0]) - -// large constant signed right shift, we leave the sign bit -(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 -> (SRAconst x [63]) -(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 -> (SRAconst (SignExt32to64 x) [63]) -(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 -> (SRAconst (SignExt16to64 x) [63]) -(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 -> (SRAconst (SignExt8to64 x) [63]) - // shifts // hardware instruction uses only the low 6 bits of the shift // we compare to 64 to ensure Go semantics for large shifts -(Lsh64x64 x y) -> (CSELULT (SLL x y) (Const64 [0]) (CMPconst [64] y)) -(Lsh64x32 x y) -> (CSELULT (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Lsh64x16 x y) -> (CSELULT (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Lsh64x8 x y) -> (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) +(Lsh64x64 x y) -> (CSELULT (SLL x y) (MOVDconst [0]) (CMPconst [64] y)) +(Lsh64x32 x y) -> (CSELULT (SLL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Lsh64x16 x y) -> (CSELULT (SLL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Lsh64x8 x y) -> (CSELULT (SLL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) -(Lsh32x64 x y) -> (CSELULT (SLL x y) (Const64 [0]) (CMPconst [64] y)) -(Lsh32x32 x y) -> (CSELULT (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Lsh32x16 x y) -> (CSELULT (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Lsh32x8 x y) -> (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) +(Lsh32x64 x y) -> (CSELULT (SLL x y) (MOVDconst [0]) (CMPconst [64] y)) +(Lsh32x32 x y) -> (CSELULT (SLL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Lsh32x16 x y) -> (CSELULT (SLL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Lsh32x8 x y) -> (CSELULT (SLL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) -(Lsh16x64 x y) -> (CSELULT (SLL x y) (Const64 [0]) (CMPconst [64] y)) -(Lsh16x32 x y) -> (CSELULT (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Lsh16x16 x y) -> (CSELULT (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Lsh16x8 x y) -> (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) +(Lsh16x64 x y) -> (CSELULT (SLL x y) (MOVDconst [0]) (CMPconst [64] y)) +(Lsh16x32 x y) -> (CSELULT (SLL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Lsh16x16 x y) -> (CSELULT (SLL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Lsh16x8 x y) -> (CSELULT (SLL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) -(Lsh8x64 x y) -> (CSELULT (SLL x y) (Const64 [0]) (CMPconst [64] y)) -(Lsh8x32 x y) -> (CSELULT (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Lsh8x16 x y) -> (CSELULT (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Lsh8x8 x y) -> (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) +(Lsh8x64 x y) -> (CSELULT (SLL x y) (MOVDconst [0]) (CMPconst [64] y)) +(Lsh8x32 x y) -> (CSELULT (SLL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Lsh8x16 x y) -> (CSELULT (SLL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Lsh8x8 x y) -> (CSELULT (SLL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) -(Rsh64Ux64 x y) -> (CSELULT (SRL x y) (Const64 [0]) (CMPconst [64] y)) -(Rsh64Ux32 x y) -> (CSELULT (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Rsh64Ux16 x y) -> (CSELULT (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Rsh64Ux8 x y) -> (CSELULT (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) +(Rsh64Ux64 x y) -> (CSELULT (SRL x y) (MOVDconst [0]) (CMPconst [64] y)) +(Rsh64Ux32 x y) -> (CSELULT (SRL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Rsh64Ux16 x y) -> (CSELULT (SRL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Rsh64Ux8 x y) -> (CSELULT (SRL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) -(Rsh32Ux64 x y) -> (CSELULT (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) -(Rsh32Ux32 x y) -> (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Rsh32Ux16 x y) -> (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Rsh32Ux8 x y) -> (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) +(Rsh32Ux64 x y) -> (CSELULT (SRL (ZeroExt32to64 x) y) (MOVDconst [0]) (CMPconst [64] y)) +(Rsh32Ux32 x y) -> (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Rsh32Ux16 x y) -> (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Rsh32Ux8 x y) -> (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) -(Rsh16Ux64 x y) -> (CSELULT (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) -(Rsh16Ux32 x y) -> (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Rsh16Ux16 x y) -> (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Rsh16Ux8 x y) -> (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) +(Rsh16Ux64 x y) -> (CSELULT (SRL (ZeroExt16to64 x) y) (MOVDconst [0]) (CMPconst [64] y)) +(Rsh16Ux32 x y) -> (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Rsh16Ux16 x y) -> (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Rsh16Ux8 x y) -> (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) -(Rsh8Ux64 x y) -> (CSELULT (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) -(Rsh8Ux32 x y) -> (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) -(Rsh8Ux16 x y) -> (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) -(Rsh8Ux8 x y) -> (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) +(Rsh8Ux64 x y) -> (CSELULT (SRL (ZeroExt8to64 x) y) (MOVDconst [0]) (CMPconst [64] y)) +(Rsh8Ux32 x y) -> (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) +(Rsh8Ux16 x y) -> (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) +(Rsh8Ux8 x y) -> (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) -(Rsh64x64 x y) -> (SRA x (CSELULT y (Const64 [63]) (CMPconst [64] y))) -(Rsh64x32 x y) -> (SRA x (CSELULT (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) -(Rsh64x16 x y) -> (SRA x (CSELULT (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) -(Rsh64x8 x y) -> (SRA x (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) +(Rsh64x64 x y) -> (SRA x (CSELULT y (MOVDconst [63]) (CMPconst [64] y))) +(Rsh64x32 x y) -> (SRA x (CSELULT (ZeroExt32to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt32to64 y)))) +(Rsh64x16 x y) -> (SRA x (CSELULT (ZeroExt16to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt16to64 y)))) +(Rsh64x8 x y) -> (SRA x (CSELULT (ZeroExt8to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt8to64 y)))) -(Rsh32x64 x y) -> (SRA (SignExt32to64 x) (CSELULT y (Const64 [63]) (CMPconst [64] y))) -(Rsh32x32 x y) -> (SRA (SignExt32to64 x) (CSELULT (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) -(Rsh32x16 x y) -> (SRA (SignExt32to64 x) (CSELULT (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) -(Rsh32x8 x y) -> (SRA (SignExt32to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) +(Rsh32x64 x y) -> (SRA (SignExt32to64 x) (CSELULT y (MOVDconst [63]) (CMPconst [64] y))) +(Rsh32x32 x y) -> (SRA (SignExt32to64 x) (CSELULT (ZeroExt32to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt32to64 y)))) +(Rsh32x16 x y) -> (SRA (SignExt32to64 x) (CSELULT (ZeroExt16to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt16to64 y)))) +(Rsh32x8 x y) -> (SRA (SignExt32to64 x) (CSELULT (ZeroExt8to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt8to64 y)))) -(Rsh16x64 x y) -> (SRA (SignExt16to64 x) (CSELULT y (Const64 [63]) (CMPconst [64] y))) -(Rsh16x32 x y) -> (SRA (SignExt16to64 x) (CSELULT (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) -(Rsh16x16 x y) -> (SRA (SignExt16to64 x) (CSELULT (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) -(Rsh16x8 x y) -> (SRA (SignExt16to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) +(Rsh16x64 x y) -> (SRA (SignExt16to64 x) (CSELULT y (MOVDconst [63]) (CMPconst [64] y))) +(Rsh16x32 x y) -> (SRA (SignExt16to64 x) (CSELULT (ZeroExt32to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt32to64 y)))) +(Rsh16x16 x y) -> (SRA (SignExt16to64 x) (CSELULT (ZeroExt16to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt16to64 y)))) +(Rsh16x8 x y) -> (SRA (SignExt16to64 x) (CSELULT (ZeroExt8to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt8to64 y)))) -(Rsh8x64 x y) -> (SRA (SignExt8to64 x) (CSELULT y (Const64 [63]) (CMPconst [64] y))) -(Rsh8x32 x y) -> (SRA (SignExt8to64 x) (CSELULT (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) -(Rsh8x16 x y) -> (SRA (SignExt8to64 x) (CSELULT (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) -(Rsh8x8 x y) -> (SRA (SignExt8to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) +(Rsh8x64 x y) -> (SRA (SignExt8to64 x) (CSELULT y (MOVDconst [63]) (CMPconst [64] y))) +(Rsh8x32 x y) -> (SRA (SignExt8to64 x) (CSELULT (ZeroExt32to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt32to64 y)))) +(Rsh8x16 x y) -> (SRA (SignExt8to64 x) (CSELULT (ZeroExt16to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt16to64 y)))) +(Rsh8x8 x y) -> (SRA (SignExt8to64 x) (CSELULT (ZeroExt8to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt8to64 y)))) // constants (Const64 [val]) -> (MOVDconst [val]) @@ -365,36 +335,69 @@ (MOVBstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))) +(Zero [9] ptr mem) -> + (MOVBstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [10] ptr mem) -> + (MOVHstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [11] ptr mem) -> + (MOVBstore [10] ptr (MOVDconst [0]) + (MOVHstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem))) (Zero [12] ptr mem) -> (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) -(Zero [16] ptr mem) -> - (MOVDstore [8] ptr (MOVDconst [0]) - (MOVDstore ptr (MOVDconst [0]) mem)) -(Zero [24] ptr mem) -> - (MOVDstore [16] ptr (MOVDconst [0]) - (MOVDstore [8] ptr (MOVDconst [0]) +(Zero [13] ptr mem) -> + (MOVBstore [12] ptr (MOVDconst [0]) + (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) +(Zero [14] ptr mem) -> + (MOVHstore [12] ptr (MOVDconst [0]) + (MOVWstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem))) +(Zero [15] ptr mem) -> + (MOVBstore [14] ptr (MOVDconst [0]) + (MOVHstore [12] ptr (MOVDconst [0]) + (MOVWstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)))) +(Zero [16] ptr mem) -> + (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) + +(Zero [32] ptr mem) -> + (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) + +(Zero [48] ptr mem) -> + (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) + +(Zero [64] ptr mem) -> + (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) // strip off fractional word zeroing -(Zero [s] ptr mem) && s%8 != 0 && s > 8 -> - (Zero [s%8] - (OffPtr ptr [s-s%8]) - (Zero [s-s%8] ptr mem)) +(Zero [s] ptr mem) && s%16 != 0 && s > 16 -> + (Zero [s-s%16] + (OffPtr ptr [s%16]) + (Zero [s%16] ptr mem)) // medium zeroing uses a duff device -// 4, 8, and 128 are magic constants, see runtime/mkduff.go +// 4, 16, and 64 are magic constants, see runtime/mkduff.go (Zero [s] ptr mem) - && s%8 == 0 && s > 24 && s <= 8*128 + && s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice -> - (DUFFZERO [4 * (128 - int64(s/8))] ptr mem) + (DUFFZERO [4 * (64 - int64(s/16))] ptr mem) // large zeroing uses a loop (Zero [s] ptr mem) - && s%8 == 0 && (s > 8*128 || config.noDuffDevice) -> + && s%16 == 0 && (s > 16*64 || config.noDuffDevice) -> (LoweredZero ptr - (ADDconst [s-8] ptr) + (ADDconst [s-16] ptr) mem) // moves @@ -464,6 +467,7 @@ // pseudo-ops (GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerSP) -> (LoweredGetCallerSP) (Convert x mem) -> (MOVDconvert x mem) // Absorb pseudo-ops into blocks. @@ -521,6 +525,12 @@ (EQ (CMPWconst [0] x) yes no) -> (ZW x yes no) (NE (CMPWconst [0] x) yes no) -> (NZW x yes no) +// Absorb bit-tests into block +(Z (ANDconst [c] x) yes no) && oneBit(c) -> (TBZ {ntz(c)} x yes no) +(NZ (ANDconst [c] x) yes no) && oneBit(c) -> (TBNZ {ntz(c)} x yes no) +(ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) -> (TBZ {ntz(int64(uint32(c)))} x yes no) +(NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) -> (TBNZ {ntz(int64(uint32(c)))} x yes no) + // fold offset into address (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) -> (MOVDaddr [off1+off2] {sym} ptr) @@ -565,6 +575,9 @@ (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> (MOVDstore [off1+off2] {sym} ptr val mem) +(STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(off1+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> + (STP [off1+off2] {sym} ptr val1 val2 mem) (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> (FMOVSstore [off1+off2] {sym} ptr val mem) @@ -583,6 +596,9 @@ (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> (MOVDstorezero [off1+off2] {sym} ptr mem) +(MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(off1+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> + (MOVQstorezero [off1+off2] {sym} ptr mem) (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) @@ -637,6 +653,10 @@ && canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) + && canMergeSym(sym1,sym2) && is32Bit(off1+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> + (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem) (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> @@ -661,12 +681,17 @@ && canMergeSym(sym1,sym2) && is32Bit(off1+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(off1+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) -> + (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // store zero (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem) (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem) (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem) (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem) +(STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) -> (MOVQstorezero [off] {sym} ptr mem) // replace load from same location as preceding store with zero/sign extension (or copy in case of full width) // these seem to have bad interaction with other rules, resulting in slower code @@ -1027,6 +1052,10 @@ (GreaterEqual (InvertFlags x)) -> (LessEqual x) (GreaterEqualU (InvertFlags x)) -> (LessEqualU x) +// Boolean-generating instructions always +// zero upper bit of the register; no need to zero-extend +(MOVBUreg x) && x.Type.IsBoolean() -> (MOVDreg x) + // absorb flag constants into conditional instructions (CSELULT _ y (FlagEQ)) -> y (CSELULT x _ (FlagLT_ULT)) -> x diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go index 832bea227c2..10a19cbd41d 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go @@ -144,6 +144,7 @@ func init() { gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} gpstore0 = regInfo{inputs: []regMask{gpspsbg}} + gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}} gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} @@ -275,13 +276,15 @@ func init() { {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVDstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "STP", argLength: 4, reg: gpstore2, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of arg1 and arg2 to arg0 + auxInt + aux. arg3=mem. {name: "FMOVSstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVS", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "FMOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. - {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem. + {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVQstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of zero to arg0 + auxInt + aux. arg1=mem. // conversions {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte @@ -347,7 +350,7 @@ func init() { aux: "Int64", argLength: 2, reg: regInfo{ - inputs: []regMask{gp}, + inputs: []regMask{buildReg("R16")}, clobbers: buildReg("R16 R30"), }, faultOnNilArg0: true, @@ -355,14 +358,14 @@ func init() { // large zeroing // arg0 = address of memory to zero (in R16 aka arm64.REGRT1, changed as side effect) - // arg1 = address of the last element to zero + // arg1 = address of the last 16-byte unit to zero // arg2 = mem // returns mem - // MOVD.P ZR, 8(R16) + // STP.P (ZR,ZR), 16(R16) // CMP Rarg1, R16 // BLE -2(PC) // Note: the-end-of-the-memory may be not a valid pointer. it's a problem if it is spilled. - // the-end-of-the-memory - 8 is with the area to zero, ok to spill. + // the-end-of-the-memory - 16 is with the area to zero, ok to spill. { name: "LoweredZero", argLength: 3, @@ -422,6 +425,9 @@ func init() { // use of R26 (arm64.REGCTXT, the closure pointer) {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R26")}}}, + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, + // MOVDconvert converts between pointers and integers. // We have a special op for this so as to not confuse GC // (particularly stack maps). It takes a memory arg so it @@ -512,10 +518,12 @@ func init() { {name: "ULE"}, {name: "UGT"}, {name: "UGE"}, - {name: "Z"}, // Control == 0 (take a register instead of flags) - {name: "NZ"}, // Control != 0 - {name: "ZW"}, // Control == 0, 32-bit - {name: "NZW"}, // Control != 0, 32-bit + {name: "Z"}, // Control == 0 (take a register instead of flags) + {name: "NZ"}, // Control != 0 + {name: "ZW"}, // Control == 0, 32-bit + {name: "NZW"}, // Control != 0, 32-bit + {name: "TBZ"}, // Control & (1 << Aux.(int64)) == 0 + {name: "TBNZ"}, // Control & (1 << Aux.(int64)) != 0 } archs = append(archs, arch{ diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index 40be5d66477..d16675fddb2 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -122,6 +122,7 @@ func init() { fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")} fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} fp2flags = regInfo{inputs: []regMask{fp, fp}} fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} @@ -168,15 +169,23 @@ func init() { {name: "MULLU", argLength: 2, reg: gp22, asm: "MULLU", commutative: true}, // arg0 * arg1, high 32 bits in out0, low 32 bits in out1 {name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2 + {name: "MULS", argLength: 3, reg: gp31, asm: "MULS"}, // arg2 - arg0 * arg1 - {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 - {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 - {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 - {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 - {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 - {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 - {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 - {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 + {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 + {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 + {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 + {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 + {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 + {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 + {name: "NMULF", argLength: 2, reg: fp21, asm: "NMULF", commutative: true}, // -(arg0 * arg1) + {name: "NMULD", argLength: 2, reg: fp21, asm: "NMULD", commutative: true}, // -(arg0 * arg1) + {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 + {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 + + {name: "MULAF", argLength: 3, reg: fp31, asm: "MULAF", resultInArg0: true}, // arg0 + (arg1 * arg2) + {name: "MULAD", argLength: 3, reg: fp31, asm: "MULAD", resultInArg0: true}, // arg0 + (arg1 * arg2) + {name: "MULSF", argLength: 3, reg: fp31, asm: "MULSF", resultInArg0: true}, // arg0 - (arg1 * arg2) + {name: "MULSD", argLength: 3, reg: fp31, asm: "MULSD", resultInArg0: true}, // arg0 - (arg1 * arg2) {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt @@ -187,6 +196,10 @@ func init() { {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1 {name: "BICconst", argLength: 1, reg: gp11, asm: "BIC", aux: "Int32"}, // arg0 &^ auxInt + // bit extraction, AuxInt = Width<<8 | LSB + {name: "BFX", argLength: 1, reg: gp11, asm: "BFX", aux: "Int32"}, // extract W bits from bit L in arg0, then signed extend + {name: "BFXU", argLength: 1, reg: gp11, asm: "BFXU", aux: "Int32"}, // extract W bits from bit L in arg0, then unsigned extend + // unary ops {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0 @@ -301,7 +314,7 @@ func init() { // comparisons {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt - {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags"}, // arg0 compare to -arg1 + {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1 {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0 {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0 @@ -313,10 +326,28 @@ func init() { {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<>auxInt, unsigned shift {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift + {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1<>auxInt), unsigned shift + {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), signed shift + {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1<>auxInt) compare to 0, unsigned shift + {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, signed shift + {name: "TEQshiftLL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1<>auxInt) compare to 0, unsigned shift + {name: "TEQshiftRA", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, signed shift {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<>arg2, unsigned shift {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift + {name: "CMNshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1<>arg2) compare to 0, unsigned shift + {name: "CMNshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, signed shift + {name: "TSTshiftLLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1<>arg2) compare to 0, unsigned shift + {name: "TSTshiftRAreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, signed shift + {name: "TEQshiftLLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1<>arg2) compare to 0, unsigned shift + {name: "TEQshiftRAreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, signed shift {name: "CMPF0", argLength: 1, reg: fp1flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to 0, float32 {name: "CMPD0", argLength: 1, reg: fp1flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to 0, float64 @@ -346,11 +377,17 @@ func init() { {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1<>auxInt, unsigned shift. arg2=mem {name: "MOVWloadshiftRA", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32"}, // load from arg0 + arg1>>auxInt, signed shift. arg2=mem + {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU"}, // load from arg0 + arg1. arg2=mem + {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB"}, // load from arg0 + arg1. arg2=mem + {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU"}, // load from arg0 + arg1. arg2=mem + {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH"}, // load from arg0 + arg1. arg2=mem {name: "MOVWstoreidx", argLength: 4, reg: gp2store, asm: "MOVW"}, // store arg2 to arg0 + arg1. arg3=mem {name: "MOVWstoreshiftLL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1<>auxInt, unsigned shift. arg3=mem {name: "MOVWstoreshiftRA", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32"}, // store arg2 to arg0 + arg1>>auxInt, signed shift. arg3=mem + {name: "MOVBstoreidx", argLength: 4, reg: gp2store, asm: "MOVB"}, // store arg2 to arg0 + arg1. arg3=mem + {name: "MOVHstoreidx", argLength: 4, reg: gp2store, asm: "MOVH"}, // store arg2 to arg0 + arg1. arg3=mem {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVBS"}, // move from arg0, sign-extended from byte {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte @@ -479,6 +516,9 @@ func init() { // use of R7 (arm.REGCTXT, the closure pointer) {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R7")}}}, + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, + // MOVWconvert converts between pointers and integers. // We have a special op for this so as to not confuse GC // (particularly stack maps). It takes a memory arg so it diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules index 60a37224087..2e7a0230b8b 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules @@ -437,6 +437,7 @@ // pseudo-ops (GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerSP) -> (LoweredGetCallerSP) (Convert x mem) -> (MOVWconvert x mem) (If cond yes no) -> (NE cond yes no) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules index 2a3a9c20181..a95b1fc0f97 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64.rules +++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules @@ -71,65 +71,65 @@ // shifts // hardware instruction uses only the low 6 bits of the shift // we compare to 64 to ensure Go semantics for large shifts -(Lsh64x64 x y) -> (AND (NEGV (SGTU (Const64 [64]) y)) (SLLV x y)) -(Lsh64x32 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) -(Lsh64x16 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) -(Lsh64x8 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) +(Lsh64x64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh64x32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh64x16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh64x8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) -(Lsh32x64 x y) -> (AND (NEGV (SGTU (Const64 [64]) y)) (SLLV x y)) -(Lsh32x32 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) -(Lsh32x16 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) -(Lsh32x8 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) +(Lsh32x64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh32x32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh32x16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh32x8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) -(Lsh16x64 x y) -> (AND (NEGV (SGTU (Const64 [64]) y)) (SLLV x y)) -(Lsh16x32 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) -(Lsh16x16 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) -(Lsh16x8 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) +(Lsh16x64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh16x32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh16x16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh16x8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) -(Lsh8x64 x y) -> (AND (NEGV (SGTU (Const64 [64]) y)) (SLLV x y)) -(Lsh8x32 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) -(Lsh8x16 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) -(Lsh8x8 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) +(Lsh8x64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh8x32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh8x16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh8x8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) -(Rsh64Ux64 x y) -> (AND (NEGV (SGTU (Const64 [64]) y)) (SRLV x y)) -(Rsh64Ux32 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) -(Rsh64Ux16 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) -(Rsh64Ux8 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) +(Rsh64Ux64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) +(Rsh64Ux32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) +(Rsh64Ux16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) +(Rsh64Ux8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) -(Rsh32Ux64 x y) -> (AND (NEGV (SGTU (Const64 [64]) y)) (SRLV (ZeroExt32to64 x) y)) -(Rsh32Ux32 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) -(Rsh32Ux16 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) -(Rsh32Ux8 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) +(Rsh32Ux64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) +(Rsh32Ux32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Rsh32Ux16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) +(Rsh32Ux8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) -(Rsh16Ux64 x y) -> (AND (NEGV (SGTU (Const64 [64]) y)) (SRLV (ZeroExt16to64 x) y)) -(Rsh16Ux32 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) -(Rsh16Ux16 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) -(Rsh16Ux8 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) +(Rsh16Ux64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) +(Rsh16Ux32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) +(Rsh16Ux16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Rsh16Ux8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) -(Rsh8Ux64 x y) -> (AND (NEGV (SGTU (Const64 [64]) y)) (SRLV (ZeroExt8to64 x) y)) -(Rsh8Ux32 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) -(Rsh8Ux16 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) -(Rsh8Ux8 x y) -> (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Rsh8Ux64 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) +(Rsh8Ux32 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) +(Rsh8Ux16 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) +(Rsh8Ux8 x y) -> (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) -(Rsh64x64 x y) -> (SRAV x (OR (NEGV (SGTU y (Const64 [63]))) y)) -(Rsh64x32 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (Const64 [63]))) (ZeroExt32to64 y))) -(Rsh64x16 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (Const64 [63]))) (ZeroExt16to64 y))) -(Rsh64x8 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) +(Rsh64x64 x y) -> (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh64x32 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh64x16 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh64x8 x y) -> (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) -(Rsh32x64 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (Const64 [63]))) y)) -(Rsh32x32 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (Const64 [63]))) (ZeroExt32to64 y))) -(Rsh32x16 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (Const64 [63]))) (ZeroExt16to64 y))) -(Rsh32x8 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) +(Rsh32x64 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh32x32 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh32x16 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh32x8 x y) -> (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) -(Rsh16x64 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (Const64 [63]))) y)) -(Rsh16x32 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (Const64 [63]))) (ZeroExt32to64 y))) -(Rsh16x16 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (Const64 [63]))) (ZeroExt16to64 y))) -(Rsh16x8 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) +(Rsh16x64 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh16x32 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh16x16 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh16x8 x y) -> (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) -(Rsh8x64 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (Const64 [63]))) y)) -(Rsh8x32 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (Const64 [63]))) (ZeroExt32to64 y))) -(Rsh8x16 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (Const64 [63]))) (ZeroExt16to64 y))) -(Rsh8x8 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) +(Rsh8x64 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh8x32 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh8x16 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh8x8 x y) -> (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) // unary ops (Neg64 x) -> (NEGV x) @@ -419,6 +419,24 @@ (ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) (InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) +// atomic intrinsics +(AtomicLoad32 ptr mem) -> (LoweredAtomicLoad32 ptr mem) +(AtomicLoad64 ptr mem) -> (LoweredAtomicLoad64 ptr mem) +(AtomicLoadPtr ptr mem) -> (LoweredAtomicLoad64 ptr mem) + +(AtomicStore32 ptr val mem) -> (LoweredAtomicStore32 ptr val mem) +(AtomicStore64 ptr val mem) -> (LoweredAtomicStore64 ptr val mem) +(AtomicStorePtrNoWB ptr val mem) -> (LoweredAtomicStore64 ptr val mem) + +(AtomicExchange32 ptr val mem) -> (LoweredAtomicExchange32 ptr val mem) +(AtomicExchange64 ptr val mem) -> (LoweredAtomicExchange64 ptr val mem) + +(AtomicAdd32 ptr val mem) -> (LoweredAtomicAdd32 ptr val mem) +(AtomicAdd64 ptr val mem) -> (LoweredAtomicAdd64 ptr val mem) + +(AtomicCompareAndSwap32 ptr old new_ mem) -> (LoweredAtomicCas32 ptr old new_ mem) +(AtomicCompareAndSwap64 ptr old new_ mem) -> (LoweredAtomicCas64 ptr old new_ mem) + // checks (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) (IsNonNil ptr) -> (SGTU ptr (MOVVconst [0])) @@ -427,6 +445,7 @@ // pseudo-ops (GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerSP) -> (LoweredGetCallerSP) (Convert x mem) -> (MOVVconvert x mem) (If cond yes no) -> (NE cond yes no) @@ -661,6 +680,10 @@ (MOVWreg (MOVVconst [c])) -> (MOVVconst [int64(int32(c))]) (MOVWUreg (MOVVconst [c])) -> (MOVVconst [int64(uint32(c))]) (MOVVreg (MOVVconst [c])) -> (MOVVconst [c]) +(LoweredAtomicStore32 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero32 ptr mem) +(LoweredAtomicStore64 ptr (MOVVconst [0]) mem) -> (LoweredAtomicStorezero64 ptr mem) +(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst32 [c] ptr mem) +(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) -> (LoweredAtomicAddconst64 [c] ptr mem) // constant comparisons (SGTconst [c] (MOVVconst [d])) && int64(c)>int64(d) -> (MOVVconst [1]) diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go index b0e6564d521..592a85eafb7 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go @@ -147,6 +147,8 @@ func init() { gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} gpstore0 = regInfo{inputs: []regMask{gpspsbg}} + gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} //fp1flags = regInfo{inputs: []regMask{fp}} @@ -161,7 +163,7 @@ func init() { ops := []opData{ // binary ops {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1 - {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt + {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops. {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1 {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt {name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true, typ: "(Int64,Int64)"}, // arg0 * arg1, signed, results hi,lo @@ -333,6 +335,65 @@ func init() { faultOnNilArg1: true, }, + // atomic loads. + // load from arg0. arg1=mem. + // returns so they can be properly ordered with other loads. + {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true}, + + // atomic stores. + // store arg1 to arg0. arg2=mem. returns memory. + {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + // store zero to arg0. arg1=mem. returns memory. + {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic exchange. + // store arg1 to arg0. arg2=mem. returns . + // SYNC + // LL (Rarg0), Rout + // MOVV Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic add. + // *arg0 += arg1. arg2=mem. returns . + // SYNC + // LL (Rarg0), Rout + // ADDV Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDV Rarg1, Rout + {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + // *arg0 += auxint. arg1=mem. returns . auxint is 32-bit. + {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // SYNC + // MOVV $0, Rout + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVV Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // SYNC + {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + // pseudo-ops {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. @@ -344,6 +405,9 @@ func init() { // use of R22 (mips.REGCTXT, the closure pointer) {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}}, + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, + // MOVDconvert converts between pointers and integers. // We have a special op for this so as to not confuse GC // (particularly stack maps). It takes a memory arg so it diff --git a/src/cmd/compile/internal/ssa/gen/MIPSOps.go b/src/cmd/compile/internal/ssa/gen/MIPSOps.go index b632c6bfdc1..155a20bbad2 100644 --- a/src/cmd/compile/internal/ssa/gen/MIPSOps.go +++ b/src/cmd/compile/internal/ssa/gen/MIPSOps.go @@ -376,6 +376,9 @@ func init() { // use of R22 (mips.REGCTXT, the closure pointer) {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}}, + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, + // MOVWconvert converts between pointers and integers. // We have a special op for this so as to not confuse GC // (particularly stack maps). It takes a memory arg so it diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 45853b4b481..4fd6a5a1022 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -57,15 +57,15 @@ (Div64F x y) -> (FDIV x y) // Lowering float <-> int -(Cvt32to32F x) -> (FRSP (FCFID (Xi2f64 (SignExt32to64 x)))) -(Cvt32to64F x) -> (FCFID (Xi2f64 (SignExt32to64 x))) -(Cvt64to32F x) -> (FRSP (FCFID (Xi2f64 x))) -(Cvt64to64F x) -> (FCFID (Xi2f64 x)) +(Cvt32to32F x) -> (FCFIDS (MTVSRD (SignExt32to64 x))) +(Cvt32to64F x) -> (FCFID (MTVSRD (SignExt32to64 x))) +(Cvt64to32F x) -> (FCFIDS (MTVSRD x)) +(Cvt64to64F x) -> (FCFID (MTVSRD x)) -(Cvt32Fto32 x) -> (Xf2i64 (FCTIWZ x)) -(Cvt32Fto64 x) -> (Xf2i64 (FCTIDZ x)) -(Cvt64Fto32 x) -> (Xf2i64 (FCTIWZ x)) -(Cvt64Fto64 x) -> (Xf2i64 (FCTIDZ x)) +(Cvt32Fto32 x) -> (MFVSRD (FCTIWZ x)) +(Cvt32Fto64 x) -> (MFVSRD (FCTIDZ x)) +(Cvt64Fto32 x) -> (MFVSRD (FCTIWZ x)) +(Cvt64Fto64 x) -> (MFVSRD (FCTIDZ x)) (Cvt32Fto64F x) -> x // Note x will have the wrong type for patterns dependent on Float32/Float64 (Cvt64Fto32F x) -> (FRSP x) @@ -74,6 +74,11 @@ (Round64F x) -> (LoweredRound64F x) (Sqrt x) -> (FSQRT x) +(Floor x) -> (FFLOOR x) +(Ceil x) -> (FCEIL x) +(Trunc x) -> (FTRUNC x) +(Copysign x y) -> (FCPSGN y x) +(Abs x) -> (FABS x) // Lowering constants (Const8 [val]) -> (MOVDconst [val]) @@ -85,7 +90,14 @@ (ConstNil) -> (MOVDconst [0]) (ConstBool [b]) -> (MOVDconst [b]) -// Rotate generation +// Constant folding +(FABS (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Abs(i2f(x)))]) +(FSQRT (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Sqrt(i2f(x)))]) +(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Floor(i2f(x)))]) +(FCEIL (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Ceil(i2f(x)))]) +(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [f2i(math.Trunc(i2f(x)))]) + +// Rotate generation with const shift (ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) ( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) (XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c -> (ROTLconst [c] x) @@ -94,6 +106,16 @@ ( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x) (XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c -> (ROTLWconst [c] x) +// Rotate generation with non-const shift +// these match patterns from math/bits/RotateLeft[32|64], but there could be others +(ADD (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) -> (ROTL x y) +( OR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) -> (ROTL x y) +(XOR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) -> (ROTL x y) + +(ADD (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) -> (ROTLW x y) +( OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) -> (ROTLW x y) +(XOR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) -> (ROTLW x y) + (Lsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SLDconst x [c]) (Rsh64x64 x (Const64 [c])) && uint64(c) < 64 -> (SRADconst x [c]) (Rsh64Ux64 x (Const64 [c])) && uint64(c) < 64 -> (SRDconst x [c]) @@ -163,10 +185,38 @@ (Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c]) (Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c]) +// non-constant rotates +// These are subexpressions found in statements that can become rotates +// In these cases the shift count is known to be < 64 so the more complicated expressions +// with Mask & Carry is not needed +(Lsh64x64 x (AND y (MOVDconst [63]))) -> (SLD x (ANDconst [63] y)) +(Lsh64x64 x (ANDconst [63] y)) -> (SLD x (ANDconst [63] y)) +(Rsh64Ux64 x (AND y (MOVDconst [63]))) -> (SRD x (ANDconst [63] y)) +(Rsh64Ux64 x (ANDconst [63] y)) -> (SRD x (ANDconst [63] y)) +(Rsh64Ux64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) -> (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) +(Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) -> (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) +(Rsh64x64 x (AND y (MOVDconst [63]))) -> (SRAD x (ANDconst [63] y)) +(Rsh64x64 x (ANDconst [63] y)) -> (SRAD x (ANDconst [63] y)) +(Rsh64x64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) -> (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) +(Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) -> (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + (Rsh64x64 x y) -> (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) (Rsh64Ux64 x y) -> (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) (Lsh64x64 x y) -> (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) +(Lsh32x64 x (AND y (MOVDconst [31]))) -> (SLW x (ANDconst [31] y)) +(Lsh32x64 x (ANDconst [31] y)) -> (SLW x (ANDconst [31] y)) + +(Rsh32Ux64 x (AND y (MOVDconst [31]))) -> (SRW x (ANDconst [31] y)) +(Rsh32Ux64 x (ANDconst [31] y)) -> (SRW x (ANDconst [31] y)) +(Rsh32Ux64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) -> (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) +(Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) -> (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + +(Rsh32x64 x (AND y (MOVDconst [31]))) -> (SRAW x (ANDconst [31] y)) +(Rsh32x64 x (ANDconst [31] y)) -> (SRAW x (ANDconst [31] y)) +(Rsh32x64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) -> (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) +(Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) -> (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + (Rsh32x64 x y) -> (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) (Rsh32Ux64 x y) -> (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) (Lsh32x64 x y) -> (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) @@ -179,7 +229,6 @@ (Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) (Lsh8x64 x y) -> (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-8] y)))) - (Rsh64x32 x y) -> (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) (Rsh64Ux32 x y) -> (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) (Lsh64x32 x y) -> (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y))))) @@ -241,7 +290,6 @@ // (MaskIfNotCarry CarrySet) -> -1 (Addr {sym} base) -> (MOVDaddr {sym} base) -// (Addr {sym} base) -> (ADDconst {sym} base) (OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr) (Ctz64 x) -> (POPCNTD (ANDN (ADDconst [-1] x) x)) @@ -597,6 +645,7 @@ // Miscellaneous (Convert x mem) -> (MOVDconvert x mem) (GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerSP) -> (LoweredGetCallerSP) (IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr)) (IsInBounds idx len) -> (LessThan (CMPU idx len)) (IsSliceInBounds idx len) -> (LessEqual (CMPU idx len)) @@ -688,6 +737,20 @@ (ADDconst [c] (MOVDaddr [d] {sym} x)) -> (MOVDaddr [c+d] {sym} x) +// Use register moves instead of stores and loads to move int<->float values +// Common with math Float64bits, Float64frombits +(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) -> (MFVSRD x) +(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) -> (MTVSRD x) + +(FMOVDstore [off] {sym} ptr (MTVSRD x) mem) -> (MOVDstore [off] {sym} ptr x mem) +(MOVDstore [off] {sym} ptr (MFVSRD x) mem) -> (FMOVDstore [off] {sym} ptr x mem) + +(MTVSRD (MOVDconst [c])) -> (FMOVDconst [c]) +(MFVSRD (FMOVDconst [c])) -> (MOVDconst [c]) + +(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (FMOVDload [off] {sym} ptr mem) +(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem) + // Fold offsets for stores. (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVDstore [off1+off2] {sym} x val mem) (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(off1+off2) -> (MOVWstore [off1+off2] {sym} x val mem) @@ -697,35 +760,54 @@ (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem) (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem) -(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> +// Fold address into load/store. +// The assembler needs to generate several instructions and use +// temp register for accessing global, and each time it will reload +// the temp register. So don't fold address of global, unless there +// is only one use. +(MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> +(MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> +(MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> +(MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> +(FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> +(FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) -(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> +(MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> +(MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> +(MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> +(MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> +(MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> +(MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> +(FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) -(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) -> +(FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && (ptr.Op != OpSB || p.Uses == 1) -> (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) // Fold offsets for loads. @@ -756,13 +838,17 @@ (MOVBstorezero [off1+off2] {sym} x mem) // Fold symbols into storezero -(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) -> +(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) + && (x.Op != OpSB || p.Uses == 1) -> (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) -(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) -> +(MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) + && (x.Op != OpSB || p.Uses == 1) -> (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) -(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) -> +(MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) + && (x.Op != OpSB || p.Uses == 1) -> (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) -(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) -> +(MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) + && (x.Op != OpSB || p.Uses == 1) -> (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) // atomic intrinsics @@ -850,8 +936,126 @@ (AND (MOVDconst [c]) x:(MOVBZload _ _)) -> (ANDconst [c&0xFF] x) (AND x:(MOVBZload _ _) (MOVDconst [c])) -> (ANDconst [c&0xFF] x) +// floating point negative abs +(FNEG (FABS x)) -> (FNABS x) +(FNEG (FNABS x)) -> (FABS x) + // floating-point fused multiply-add/sub (FADD (FMUL x y) z) -> (FMADD x y z) (FSUB (FMUL x y) z) -> (FMSUB x y z) (FADDS (FMULS x y) z) -> (FMADDS x y z) (FSUBS (FMULS x y) z) -> (FMSUBS x y z) + + +// The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store) +// and convert the statements in these functions from multiple single byte loads or stores to +// the single largest possible load or store. For now only little endian loads and stores on +// little endian machines are implemented. Longer rules make use of the match with shorter rules +// where possible. +// TODO implement big endian loads and stores for little endian machines (using byte reverse +// loads and stores). +// b[0] | b[1]<<8 -> load 16-bit Little endian +(OR x0:(MOVBZload [i0] {s} p mem) + o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8])) + && !config.BigEndian + && i1 == i0+1 + && x0.Uses ==1 && x1.Uses == 1 + && o1.Uses == 1 + && mergePoint(b, x0, x1) != nil + && clobber(x0) && clobber(x1) && clobber(o1) + -> @mergePoint(b,x0,x1) (MOVHZload {s} [i0] p mem) + +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 -> load 32-bit Little endian +(OR s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24]) + o0:(OR s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem))) + && !config.BigEndian + && i2 == i0+2 + && i3 == i0+3 + && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 + && o0.Uses == 1 + && s0.Uses == 1 && s1.Uses == 1 + && mergePoint(b, x0, x1, x2) != nil + && clobber(x0) && clobber(x1) && clobber(x2) + && clobber(s0) && clobber(s1) + && clobber(o0) + -> @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) + +// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 -> load 64-bit Little endian +// Can't build on shorter rules because they use SLW instead of SLD +// Offset must be multiple of 4 for MOVD +(OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) + o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) + o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) + o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) + o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) + o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) + o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))))))) + && !config.BigEndian + && i0%4 == 0 + && i1 == i0+1 + && i2 == i0+2 + && i3 == i0+3 + && i4 == i0+4 + && i5 == i0+5 + && i6 == i0+6 + && i7 == i0+7 + && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 + && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 + && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 + && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil + && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) + && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) + && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + -> @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + +// 2 byte store Little endian as in: +// b[0] = byte(v) +// b[1] = byte(v >> 8) +(MOVBstore [i1] {s} p (SRWconst (MOVHZreg w) [8]) + x0:(MOVBstore [i0] {s} p w mem)) + && !config.BigEndian + && x0.Uses == 1 + && i1 == i0+1 + && clobber(x0) + -> (MOVHstore [i0] {s} p w mem) + +// 4 byte store Little endian as in: +// b[0] = byte(v) +// b[1] = byte(v >> 8) +// b[2] = byte(v >> 16) +// b[3] = byte(v >> 24) +(MOVBstore [i3] {s} p (SRWconst w [24]) + x0:(MOVBstore [i2] {s} p (SRWconst w [16]) + x1:(MOVBstore [i1] {s} p (SRWconst w [8]) + x2:(MOVBstore [i0] {s} p w mem)))) + && !config.BigEndian + && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 + && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 + && clobber(x0) && clobber(x1) && clobber(x2) + -> (MOVWstore [i0] {s} p w mem) + +// 8 byte store Little endian as in: +// b[0] = byte(v) +// b[1] = byte(v >> 8) +// b[2] = byte(v >> 16) +// b[3] = byte(v >> 24) +// b[4] = byte(v >> 32) +// b[5] = byte(v >> 40) +// b[6] = byte(v >> 48) +// b[7] = byte(v >> 56) +// Offset must be multiple of 4 for MOVDstore +// Can't build on previous rules for 2 or 4 bytes because they use SRW not SRD +(MOVBstore [i7] {s} p (SRDconst w [56]) + x0:(MOVBstore [i6] {s} p (SRDconst w [48]) + x1:(MOVBstore [i5] {s} p (SRDconst w [40]) + x2:(MOVBstore [i4] {s} p (SRDconst w [32]) + x3:(MOVBstore [i3] {s} p (SRDconst w [24]) + x4:(MOVBstore [i2] {s} p (SRDconst w [16]) + x5:(MOVBstore [i1] {s} p (SRDconst w [8]) + x6:(MOVBstore [i0] {s} p w mem)))))))) + && !config.BigEndian + && i0%4 == 0 + && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 + && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 + && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) + -> (MOVDstore [i0] {s} p w mem) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index 2e8e239f15f..c6269e0f48c 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -128,8 +128,9 @@ func init() { // cr = buildReg("CR") // ctr = buildReg("CTR") // lr = buildReg("LR") - tmp = buildReg("R31") - ctxt = buildReg("R11") + tmp = buildReg("R31") + ctxt = buildReg("R11") + callptr = buildReg("R12") // tls = buildReg("R13") gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} @@ -154,13 +155,13 @@ func init() { callerSave = regMask(gp | fp | gr) ) ops := []opData{ - {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 - {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "SymOff", symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym) - {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1 - {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1 - {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1 - {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1 - {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1 + {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 + {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt + {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1 + {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1 + {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1 + {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1 + {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1 {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit) {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit) @@ -185,6 +186,9 @@ func init() { {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << arg1, 64 bits (0 if arg1 & 64 != 0) {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << arg1, 32 bits (0 if arg1 & 32 != 0) + {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64 + {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 + {name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + aux {name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1) @@ -219,6 +223,7 @@ func init() { {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float + {name: "FCFIDS", argLength: 1, reg: fp11, asm: "FCFIDS", typ: "Float32"}, // convert 32-bit integer to float {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC. @@ -227,8 +232,8 @@ func init() { // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr) - {name: "Xf2i64", argLength: 1, reg: fpgp, typ: "Int64"}, // move 64 bits of F register into G register - {name: "Xi2f64", argLength: 1, reg: gpfp, typ: "Float64"}, // move 64 bits of G register into F register + {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register + {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1 {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1 @@ -241,6 +246,12 @@ func init() { {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point) {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point) {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision) + {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64 + {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64 + {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64 + {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64 + {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64 + {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64 {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux @@ -307,6 +318,9 @@ func init() { // use of the closure pointer. {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}}, + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, + //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, // Round ops to block fused-multiply-add extraction. @@ -317,8 +331,8 @@ func init() { {name: "MOVDconvert", argLength: 2, reg: gp11, asm: "MOVD"}, {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gp | sp, ctxt, 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // large or unaligned zeroing // arg0 = address of memory to zero (in R3, changed as side effect) diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules index 8a627e75f51..6b997bd46d8 100644 --- a/src/cmd/compile/internal/ssa/gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/gen/S390X.rules @@ -107,7 +107,13 @@ (Bswap64 x) -> (MOVDBR x) (Bswap32 x) -> (MOVWBR x) -(Sqrt x) -> (FSQRT x) +// math package intrinsics +(Sqrt x) -> (FSQRT x) +(Floor x) -> (FIDBR [7] x) +(Ceil x) -> (FIDBR [6] x) +(Trunc x) -> (FIDBR [5] x) +(RoundToEven x) -> (FIDBR [4] x) +(Round x) -> (FIDBR [1] x) // Atomic loads. (AtomicLoad32 ptr mem) -> (MOVWZatomicload ptr mem) @@ -422,6 +428,7 @@ (NilCheck ptr mem) -> (LoweredNilCheck ptr mem) (GetG mem) -> (LoweredGetG mem) (GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerSP) -> (LoweredGetCallerSP) (Addr {sym} base) -> (MOVDaddr {sym} base) (ITab (Load ptr mem)) -> (MOVDload ptr mem) @@ -474,21 +481,20 @@ (MOVDnop x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) (MOVDnop x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDload [off] {sym} ptr mem) -// TODO(mundaym): uncomment rules once signed indexed loads are added. (MOVDreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) -//(MOVDreg x:(MOVBloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx [off] {sym} ptr idx mem) +(MOVDreg x:(MOVBloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx [off] {sym} ptr idx mem) (MOVDreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) -//(MOVDreg x:(MOVHloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHloadidx [off] {sym} ptr idx mem) +(MOVDreg x:(MOVHloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHloadidx [off] {sym} ptr idx mem) (MOVDreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) -//(MOVDreg x:(MOVWloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx [off] {sym} ptr idx mem) +(MOVDreg x:(MOVWloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx [off] {sym} ptr idx mem) (MOVDreg x:(MOVDloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDloadidx [off] {sym} ptr idx mem) (MOVDnop x:(MOVBZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) -//(MOVDnop x:(MOVBloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx [off] {sym} ptr idx mem) +(MOVDnop x:(MOVBloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx [off] {sym} ptr idx mem) (MOVDnop x:(MOVHZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) -//(MOVDnop x:(MOVHloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHloadidx [off] {sym} ptr idx mem) +(MOVDnop x:(MOVHloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHloadidx [off] {sym} ptr idx mem) (MOVDnop x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) -//(MOVDnop x:(MOVWloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx [off] {sym} ptr idx mem) +(MOVDnop x:(MOVWloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx [off] {sym} ptr idx mem) (MOVDnop x:(MOVDloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVDloadidx [off] {sym} ptr idx mem) // Fold sign extensions into conditional moves of constants. @@ -514,31 +520,31 @@ // Fold constants into instructions. (ADD x (MOVDconst [c])) && is32Bit(c) -> (ADDconst [c] x) -(ADDW x (MOVDconst [c])) -> (ADDWconst [c] x) +(ADDW x (MOVDconst [c])) -> (ADDWconst [int64(int32(c))] x) (SUB x (MOVDconst [c])) && is32Bit(c) -> (SUBconst x [c]) (SUB (MOVDconst [c]) x) && is32Bit(c) -> (NEG (SUBconst x [c])) -(SUBW x (MOVDconst [c])) -> (SUBWconst x [c]) -(SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst x [c])) +(SUBW x (MOVDconst [c])) -> (SUBWconst x [int64(int32(c))]) +(SUBW (MOVDconst [c]) x) -> (NEGW (SUBWconst x [int64(int32(c))])) (MULLD x (MOVDconst [c])) && is32Bit(c) -> (MULLDconst [c] x) -(MULLW x (MOVDconst [c])) -> (MULLWconst [c] x) +(MULLW x (MOVDconst [c])) -> (MULLWconst [int64(int32(c))] x) // NILF instructions leave the high 32 bits unchanged which is // equivalent to the leftmost 32 bits being set. // TODO(mundaym): modify the assembler to accept 64-bit values // and use isU32Bit(^c). (AND x (MOVDconst [c])) && is32Bit(c) && c < 0 -> (ANDconst [c] x) -(ANDW x (MOVDconst [c])) -> (ANDWconst [c] x) +(ANDW x (MOVDconst [c])) -> (ANDWconst [int64(int32(c))] x) (ANDWconst [c] (ANDWconst [d] x)) -> (ANDWconst [c & d] x) (ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c & d] x) (OR x (MOVDconst [c])) && isU32Bit(c) -> (ORconst [c] x) -(ORW x (MOVDconst [c])) -> (ORWconst [c] x) +(ORW x (MOVDconst [c])) -> (ORWconst [int64(int32(c))] x) (XOR x (MOVDconst [c])) && isU32Bit(c) -> (XORconst [c] x) -(XORW x (MOVDconst [c])) -> (XORWconst [c] x) +(XORW x (MOVDconst [c])) -> (XORWconst [int64(int32(c))] x) (SLD x (MOVDconst [c])) -> (SLDconst [c&63] x) (SLW x (MOVDconst [c])) -> (SLWconst [c&63] x) @@ -565,12 +571,12 @@ (CMP x (MOVDconst [c])) && is32Bit(c) -> (CMPconst x [c]) (CMP (MOVDconst [c]) x) && is32Bit(c) -> (InvertFlags (CMPconst x [c])) -(CMPW x (MOVDconst [c])) -> (CMPWconst x [c]) -(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst x [c])) -(CMPU x (MOVDconst [c])) && isU32Bit(c) -> (CMPUconst x [int64(uint32(c))]) -(CMPU (MOVDconst [c]) x) && isU32Bit(c) -> (InvertFlags (CMPUconst x [int64(uint32(c))])) -(CMPWU x (MOVDconst [c])) -> (CMPWUconst x [int64(uint32(c))]) -(CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(uint32(c))])) +(CMPW x (MOVDconst [c])) -> (CMPWconst x [int64(int32(c))]) +(CMPW (MOVDconst [c]) x) -> (InvertFlags (CMPWconst x [int64(int32(c))])) +(CMPU x (MOVDconst [c])) && isU32Bit(c) -> (CMPUconst x [int64(int32(c))]) +(CMPU (MOVDconst [c]) x) && isU32Bit(c) -> (InvertFlags (CMPUconst x [int64(int32(c))])) +(CMPWU x (MOVDconst [c])) -> (CMPWUconst x [int64(int32(c))]) +(CMPWU (MOVDconst [c]) x) -> (InvertFlags (CMPWUconst x [int64(int32(c))])) // Using MOV{W,H,B}Zreg instead of AND is cheaper. (AND x (MOVDconst [0xFF])) -> (MOVBZreg x) @@ -670,22 +676,82 @@ // Make sure we don't combine these ops if the load has another use. // This prevents a single load from being split into multiple loads // which then might return different values. See test/atomicload.go. -(MOVBreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBreg x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBload [off] {sym} ptr mem) (MOVBZreg x:(MOVBZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZload [off] {sym} ptr mem) -(MOVHreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload [off] {sym} ptr mem) +(MOVBZreg x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZload [off] {sym} ptr mem) +(MOVHreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload [off] {sym} ptr mem) +(MOVHreg x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHload [off] {sym} ptr mem) (MOVHZreg x:(MOVHZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZload [off] {sym} ptr mem) -(MOVWreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) +(MOVHZreg x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZload [off] {sym} ptr mem) +(MOVWreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) +(MOVWreg x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWload [off] {sym} ptr mem) (MOVWZreg x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZload [off] {sym} ptr mem) +(MOVWZreg x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZload [off] {sym} ptr mem) +(MOVBreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx [off] {sym} ptr idx mem) +(MOVBreg x:(MOVBloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBloadidx [off] {sym} ptr idx mem) (MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) +(MOVBZreg x:(MOVBloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) +(MOVHreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHloadidx [off] {sym} ptr idx mem) +(MOVHreg x:(MOVHloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHloadidx [off] {sym} ptr idx mem) (MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) +(MOVHZreg x:(MOVHloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) +(MOVWreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx [off] {sym} ptr idx mem) +(MOVWreg x:(MOVWloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWloadidx [off] {sym} ptr idx mem) (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) +(MOVWZreg x:(MOVWloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 && clobber(x) -> @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) // replace load from same location as preceding store with copy -(MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBZreg x) -(MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHZreg x) -(MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWZreg x) -(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVDreg x) +(MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (MOVDreg x) +(MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (MOVWreg x) +(MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (MOVHreg x) +(MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (MOVBreg x) +(MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (MOVWZreg x) +(MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (MOVHZreg x) +(MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (MOVBZreg x) +(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (LGDR x) +(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> (LDGR x) +(FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> x +(FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) -> x + +// prefer FPR <-> GPR moves over combined load ops +(MULLDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) -> (MULLD x (LGDR y)) +(ADDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) -> (ADD x (LGDR y)) +(SUBload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) -> (SUB x (LGDR y)) +(ORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) -> (OR x (LGDR y)) +(ANDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) -> (AND x (LGDR y)) +(XORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) -> (XOR x (LGDR y)) + +// detect attempts to set/clear the sign bit +// may need to be reworked when NIHH/OIHH are added +(SRDconst [1] (SLDconst [1] (LGDR x))) -> (LGDR (LPDFR x)) +(LDGR (SRDconst [1] (SLDconst [1] x))) -> (LPDFR (LDGR x)) +(OR (MOVDconst [-1<<63]) (LGDR x)) -> (LGDR (LNDFR x)) +(LDGR (OR (MOVDconst [-1<<63]) x)) -> (LNDFR (LDGR x)) + +// detect attempts to set the sign bit with load +(LDGR x:(ORload [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) -> @x.Block (LNDFR (LDGR (MOVDload [off] {sym} ptr mem))) + +// detect copysign +(OR (SLDconst [63] (SRDconst [63] (LGDR x))) (LGDR (LPDFR y))) -> (LGDR (CPSDR y x)) +(OR (SLDconst [63] (SRDconst [63] (LGDR x))) (MOVDconst [c])) && c & -1<<63 == 0 -> (LGDR (CPSDR (FMOVDconst [c]) x)) +(CPSDR y (FMOVDconst [c])) && c & -1<<63 == 0 -> (LPDFR y) +(CPSDR y (FMOVDconst [c])) && c & -1<<63 != 0 -> (LNDFR y) + +// absorb negations into set/clear sign bit +(FNEG (LPDFR x)) -> (LNDFR x) +(FNEG (LNDFR x)) -> (LPDFR x) +(FNEGS (LPDFR x)) -> (LNDFR x) +(FNEGS (LNDFR x)) -> (LPDFR x) + +// no need to convert float32 to float64 to set/clear sign bit +(LEDBR (LPDFR (LDEBR x))) -> (LPDFR x) +(LEDBR (LNDFR (LDEBR x))) -> (LNDFR x) + +// remove unnecessary FPR <-> GPR moves +(LDGR (LGDR x)) -> x +(LGDR (LDGR x)) -> (MOVDreg x) // Don't extend before storing (MOVWstore [off] {sym} ptr (MOVWreg x) mem) -> (MOVWstore [off] {sym} ptr x mem) @@ -717,6 +783,20 @@ (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (FMOVSstore [off1+off2] {sym} ptr val mem) (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(off1+off2) -> (FMOVDstore [off1+off2] {sym} ptr val mem) +(ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ADDload [off1+off2] {sym} x ptr mem) +(ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ADDWload [off1+off2] {sym} x ptr mem) +(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (MULLDload [off1+off2] {sym} x ptr mem) +(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (MULLWload [off1+off2] {sym} x ptr mem) +(SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (SUBload [off1+off2] {sym} x ptr mem) +(SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (SUBWload [off1+off2] {sym} x ptr mem) + +(ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ANDload [off1+off2] {sym} x ptr mem) +(ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ANDWload [off1+off2] {sym} x ptr mem) +(ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ORload [off1+off2] {sym} x ptr mem) +(ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (ORWload [off1+off2] {sym} x ptr mem) +(XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (XORload [off1+off2] {sym} x ptr mem) +(XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(off1+off2) -> (XORWload [off1+off2] {sym} x ptr mem) + // Fold constants into stores. (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(off) && ptr.Op != OpSB -> (MOVDstoreconst [makeValAndOff(c,off)] {sym} ptr mem) @@ -774,6 +854,20 @@ (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + +(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) -> (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + // Cannot store constant to SB directly (no 'move relative long immediate' instructions). (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> (MOVDstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) @@ -787,10 +881,16 @@ // generating indexed loads and stores (MOVBZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVBZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOVHZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVHZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOVWZload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVWZloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) +(MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (MOVDload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> (MOVDloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) (FMOVSload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> @@ -812,46 +912,56 @@ (FMOVDstoreidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) (MOVBZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVBZloadidx [off] {sym} ptr idx mem) +(MOVBload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVBloadidx [off] {sym} ptr idx mem) (MOVHZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVHZloadidx [off] {sym} ptr idx mem) +(MOVHload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVHloadidx [off] {sym} ptr idx mem) (MOVWZload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVWZloadidx [off] {sym} ptr idx mem) -(MOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVDloadidx [off] {sym} ptr idx mem) +(MOVWload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVWloadidx [off] {sym} ptr idx mem) +(MOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (MOVDloadidx [off] {sym} ptr idx mem) (FMOVSload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVSloadidx [off] {sym} ptr idx mem) (FMOVDload [off] {sym} (ADD ptr idx) mem) && ptr.Op != OpSB -> (FMOVDloadidx [off] {sym} ptr idx mem) -(MOVBstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx [off] {sym} ptr idx val mem) -(MOVHstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVHstoreidx [off] {sym} ptr idx val mem) -(MOVWstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx [off] {sym} ptr idx val mem) -(MOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVDstoreidx [off] {sym} ptr idx val mem) + +(MOVBstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVBstoreidx [off] {sym} ptr idx val mem) +(MOVHstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVHstoreidx [off] {sym} ptr idx val mem) +(MOVWstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVWstoreidx [off] {sym} ptr idx val mem) +(MOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (MOVDstoreidx [off] {sym} ptr idx val mem) (FMOVSstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVSstoreidx [off] {sym} ptr idx val mem) (FMOVDstore [off] {sym} (ADD ptr idx) val mem) && ptr.Op != OpSB -> (FMOVDstoreidx [off] {sym} ptr idx val mem) // combine ADD into indexed loads and stores -(MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem) -(MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem) -(MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem) -(MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem) -(FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem) -(FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem) +(MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVBZloadidx [c+d] {sym} ptr idx mem) +(MOVBloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVBloadidx [c+d] {sym} ptr idx mem) +(MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVHZloadidx [c+d] {sym} ptr idx mem) +(MOVHloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVHloadidx [c+d] {sym} ptr idx mem) +(MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVWZloadidx [c+d] {sym} ptr idx mem) +(MOVWloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVWloadidx [c+d] {sym} ptr idx mem) +(MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (MOVDloadidx [c+d] {sym} ptr idx mem) +(FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (FMOVSloadidx [c+d] {sym} ptr idx mem) +(FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) && is20Bit(c+d) -> (FMOVDloadidx [c+d] {sym} ptr idx mem) -(MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem) -(MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem) -(MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem) -(MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem) -(FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem) -(FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem) +(MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem) +(MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem) +(MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem) +(MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem) +(FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem) +(FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) && is20Bit(c+d) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem) -(MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVBZloadidx [c+d] {sym} ptr idx mem) -(MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVHZloadidx [c+d] {sym} ptr idx mem) -(MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVWZloadidx [c+d] {sym} ptr idx mem) -(MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (MOVDloadidx [c+d] {sym} ptr idx mem) -(FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVSloadidx [c+d] {sym} ptr idx mem) -(FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) -> (FMOVDloadidx [c+d] {sym} ptr idx mem) +(MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVBZloadidx [c+d] {sym} ptr idx mem) +(MOVBloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVBloadidx [c+d] {sym} ptr idx mem) +(MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVHZloadidx [c+d] {sym} ptr idx mem) +(MOVHloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVHloadidx [c+d] {sym} ptr idx mem) +(MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVWZloadidx [c+d] {sym} ptr idx mem) +(MOVWloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVWloadidx [c+d] {sym} ptr idx mem) +(MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (MOVDloadidx [c+d] {sym} ptr idx mem) +(FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (FMOVSloadidx [c+d] {sym} ptr idx mem) +(FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) && is20Bit(c+d) -> (FMOVDloadidx [c+d] {sym} ptr idx mem) -(MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem) -(MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem) -(MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem) -(MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem) -(FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem) -(FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem) +(MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (MOVBstoreidx [c+d] {sym} ptr idx val mem) +(MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (MOVHstoreidx [c+d] {sym} ptr idx val mem) +(MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (MOVWstoreidx [c+d] {sym} ptr idx val mem) +(MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (MOVDstoreidx [c+d] {sym} ptr idx val mem) +(FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (FMOVSstoreidx [c+d] {sym} ptr idx val mem) +(FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) && is20Bit(c+d) -> (FMOVDstoreidx [c+d] {sym} ptr idx val mem) // MOVDaddr into MOVDaddridx (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) && x.Op != OpSB -> diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go index 2a08a276d9f..ab781cee90c 100644 --- a/src/cmd/compile/internal/ssa/gen/S390XOps.go +++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go @@ -205,6 +205,20 @@ func init() { {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD", resultInArg0: true}, // fp64 arg1 * arg2 + arg0 {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", resultInArg0: true}, // fp32 arg1 * arg2 - arg0 {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB", resultInArg0: true}, // fp64 arg1 * arg2 - arg0 + {name: "LPDFR", argLength: 1, reg: fp11, asm: "LPDFR"}, // fp64/fp32 set sign bit + {name: "LNDFR", argLength: 1, reg: fp11, asm: "LNDFR"}, // fp64/fp32 clear sign bit + {name: "CPSDR", argLength: 2, reg: fp21, asm: "CPSDR"}, // fp64/fp32 copy arg1 sign bit to arg0 + + // Round to integer, float64 only. + // + // aux | rounding mode + // ----+----------------------------------- + // 1 | round to nearest, ties away from 0 + // 4 | round to nearest, ties to even + // 5 | round toward 0 + // 6 | round toward +∞ + // 7 | round toward -∞ + {name: "FIDBR", argLength: 1, reg: fp11, asm: "FIDBR", aux: "Int8"}, {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load @@ -221,21 +235,21 @@ func init() { // binary ops {name: "ADD", argLength: 2, reg: gp21sp, asm: "ADD", commutative: true, clobberFlags: true}, // arg0 + arg1 {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDW", commutative: true, clobberFlags: true}, // arg0 + arg1 - {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint + {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDW", aux: "Int32", clobberFlags: true}, // arg0 + auxint {name: "ADDload", argLength: 3, reg: gpopload, asm: "ADD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem {name: "ADDWload", argLength: 3, reg: gpopload, asm: "ADDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem {name: "SUB", argLength: 2, reg: gp21, asm: "SUB", clobberFlags: true}, // arg0 - arg1 {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW", clobberFlags: true}, // arg0 - arg1 - {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint + {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint {name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint {name: "SUBload", argLength: 3, reg: gpopload, asm: "SUB", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem {name: "SUBWload", argLength: 3, reg: gpopload, asm: "SUBW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 - {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int64", typ: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint + {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint {name: "MULLDload", argLength: 3, reg: gpopload, asm: "MULLD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem {name: "MULLWload", argLength: 3, reg: gpopload, asm: "MULLW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem @@ -281,9 +295,9 @@ func init() { {name: "CMPU", argLength: 2, reg: gp2flags, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPWU", argLength: 2, reg: gp2flags, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1 - {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint + {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint - {name: "CMPUconst", argLength: 1, reg: gp1flags, asm: "CMPU", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint + {name: "CMPUconst", argLength: 1, reg: gp1flags, asm: "CMPU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint {name: "CMPWUconst", argLength: 1, reg: gp1flags, asm: "CMPWU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32 @@ -346,6 +360,8 @@ func init() { {name: "MOVDconst", reg: gp01, asm: "MOVD", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint + {name: "LDGR", argLength: 1, reg: gpfp, asm: "LDGR"}, // move int64 to float64 (no conversion) + {name: "LGDR", argLength: 1, reg: fpgp, asm: "LGDR"}, // move float64 to int64 (no conversion) {name: "CFDBRA", argLength: 1, reg: fpgp, asm: "CFDBRA"}, // convert float64 to int32 {name: "CGDBRA", argLength: 1, reg: fpgp, asm: "CGDBRA"}, // convert float64 to int64 {name: "CFEBRA", argLength: 1, reg: fpgp, asm: "CFEBRA"}, // convert float32 to int32 @@ -357,8 +373,8 @@ func init() { {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32 {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64 - {name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, clobberFlags: true, symEffect: "Read"}, // arg0 + auxint + offset encoded in aux - {name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", clobberFlags: true, symEffect: "Read"}, // arg0 + arg1 + auxint + aux + {name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Read"}, // arg0 + auxint + offset encoded in aux + {name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Read"}, // arg0 + arg1 + auxint + aux // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend. @@ -387,10 +403,12 @@ func init() { {name: "MVC", argLength: 3, reg: gpmvc, asm: "MVC", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, faultOnNilArg1: true, symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size,off // indexed loads/stores - // TODO(mundaym): add sign-extended indexed loads - {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", clobberFlags: true, symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem - {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", clobberFlags: true, symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVBloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVB", aux: "SymOff", typ: "Int8", clobberFlags: true, symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Sign extend. + {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVHloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVH", aux: "SymOff", typ: "Int16", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend. + {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVWloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVW", aux: "SymOff", typ: "Int32", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend. {name: "MOVDloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVD", aux: "SymOff", typ: "UInt64", clobberFlags: true, symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHBR", aux: "SymOff", typ: "Int16", clobberFlags: true, symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWBR", aux: "SymOff", typ: "Int32", clobberFlags: true, symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. @@ -428,6 +446,8 @@ func init() { // use of R12 (the closure pointer) {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R12")}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{ptrsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, // Round ops to block fused-multiply-add extraction. {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true}, diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules index 944a84df852..9b004be9f12 100644 --- a/src/cmd/compile/internal/ssa/gen/generic.rules +++ b/src/cmd/compile/internal/ssa/gen/generic.rules @@ -38,14 +38,22 @@ // For now, the generated successors must be a permutation of the matched successors. // constant folding -(Trunc16to8 (Const16 [c])) -> (Const8 [int64(int8(c))]) -(Trunc32to8 (Const32 [c])) -> (Const8 [int64(int8(c))]) -(Trunc32to16 (Const32 [c])) -> (Const16 [int64(int16(c))]) -(Trunc64to8 (Const64 [c])) -> (Const8 [int64(int8(c))]) -(Trunc64to16 (Const64 [c])) -> (Const16 [int64(int16(c))]) -(Trunc64to32 (Const64 [c])) -> (Const32 [int64(int32(c))]) +(Trunc16to8 (Const16 [c])) -> (Const8 [int64(int8(c))]) +(Trunc32to8 (Const32 [c])) -> (Const8 [int64(int8(c))]) +(Trunc32to16 (Const32 [c])) -> (Const16 [int64(int16(c))]) +(Trunc64to8 (Const64 [c])) -> (Const8 [int64(int8(c))]) +(Trunc64to16 (Const64 [c])) -> (Const16 [int64(int16(c))]) +(Trunc64to32 (Const64 [c])) -> (Const32 [int64(int32(c))]) (Cvt64Fto32F (Const64F [c])) -> (Const32F [f2i(float64(i2f32(c)))]) (Cvt32Fto64F (Const32F [c])) -> (Const64F [c]) // c is already a 64 bit float +(Cvt32to32F (Const32 [c])) -> (Const32F [f2i(float64(float32(int32(c))))]) +(Cvt32to64F (Const32 [c])) -> (Const64F [f2i(float64(int32(c)))]) +(Cvt64to32F (Const64 [c])) -> (Const32F [f2i(float64(float32(c)))]) +(Cvt64to64F (Const64 [c])) -> (Const64F [f2i(float64(c))]) +(Cvt32Fto32 (Const32F [c])) -> (Const32 [int64(int32(i2f(c)))]) +(Cvt32Fto64 (Const32F [c])) -> (Const64 [int64(i2f(c))]) +(Cvt64Fto32 (Const64F [c])) -> (Const32 [int64(int32(i2f(c)))]) +(Cvt64Fto64 (Const64F [c])) -> (Const64 [int64(i2f(c))]) (Round32F x:(Const32F)) -> x (Round64F x:(Const64F)) -> x @@ -208,6 +216,17 @@ // (Mod64u x y) is always between 0 (inclusive) and y (exclusive). (IsInBounds (Mod32u _ y) y) -> (ConstBool [1]) (IsInBounds (Mod64u _ y) y) -> (ConstBool [1]) +// Right shifting a unsigned number limits its value. +(IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 8 && 1< (ConstBool [1]) +(IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d])) && 0 < c && c < 8 && 1< (ConstBool [1]) +(IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d])) && 0 < c && c < 8 && 1< (ConstBool [1]) +(IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 8 && 1< (ConstBool [1]) +(IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1< (ConstBool [1]) +(IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1< (ConstBool [1]) +(IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 16 && 1< (ConstBool [1]) +(IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 32 && 1< (ConstBool [1]) +(IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 32 && 1< (ConstBool [1]) +(IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 64 && 1< (ConstBool [1]) (IsSliceInBounds x x) -> (ConstBool [1]) (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c <= d -> (ConstBool [1]) @@ -311,6 +330,18 @@ (Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) -> (Add32 (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) x)) +// Rewrite x*y + x*z to x*(y+z) +(Add64 (Mul64 x y) (Mul64 x z)) -> (Mul64 x (Add64 y z)) +(Add32 (Mul32 x y) (Mul32 x z)) -> (Mul32 x (Add32 y z)) +(Add16 (Mul16 x y) (Mul16 x z)) -> (Mul16 x (Add16 y z)) +(Add8 (Mul8 x y) (Mul8 x z)) -> (Mul8 x (Add8 y z)) + +// Rewrite x*y - x*z to x*(y-z) +(Sub64 (Mul64 x y) (Mul64 x z)) -> (Mul64 x (Sub64 y z)) +(Sub32 (Mul32 x y) (Mul32 x z)) -> (Mul32 x (Sub32 y z)) +(Sub16 (Mul16 x y) (Mul16 x z)) -> (Mul16 x (Sub16 y z)) +(Sub8 (Mul8 x y) (Mul8 x z)) -> (Mul8 x (Sub8 y z)) + // rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce // the number of the other rewrite rules for const shifts (Lsh64x32 x (Const32 [c])) -> (Lsh64x64 x (Const64 [int64(uint32(c))])) @@ -538,6 +569,25 @@ (Leq16U (Const16 [c]) (Const16 [d])) -> (ConstBool [b2i(uint16(c) <= uint16(d))]) (Leq8U (Const8 [c]) (Const8 [d])) -> (ConstBool [b2i(uint8(c) <= uint8(d))]) +// constant floating point comparisons +(Eq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(i2f(c) == i2f(d))]) +(Eq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(i2f(c) == i2f(d))]) + +(Neq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(i2f(c) != i2f(d))]) +(Neq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(i2f(c) != i2f(d))]) + +(Greater64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(i2f(c) > i2f(d))]) +(Greater32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(i2f(c) > i2f(d))]) + +(Geq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(i2f(c) >= i2f(d))]) +(Geq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(i2f(c) >= i2f(d))]) + +(Less64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(i2f(c) < i2f(d))]) +(Less32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(i2f(c) < i2f(d))]) + +(Leq64F (Const64F [c]) (Const64F [d])) -> (ConstBool [b2i(i2f(c) <= i2f(d))]) +(Leq32F (Const32F [c]) (Const32F [d])) -> (ConstBool [b2i(i2f(c) <= i2f(d))]) + // simplifications (Or64 x x) -> x (Or32 x x) -> x @@ -691,6 +741,12 @@ // Load of store of same address, with compatibly typed value and same size (Load p1 (Store {t2} p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.(*types.Type).Size() -> x +// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits +(Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitFloat(t1) -> (Const64F [x]) +(Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitFloat(t1) -> (Const32F [f2i(float64(math.Float32frombits(uint32(x))))]) +(Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitInt(t1) -> (Const64 [x]) +(Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(float32(i2f(x)))))]) + // Eliminate stores of values that have just been loaded from the same location. // We also handle the common case where there are some intermediate stores to non-overlapping struct fields. (Store {t1} p1 (Load p2 mem) mem) && @@ -819,8 +875,6 @@ (Store _ (ArrayMake0) mem) -> mem (Store dst (ArrayMake1 e) mem) -> (Store {e.Type} dst e mem) -(ArraySelect [0] (Load ptr mem)) -> (Load ptr mem) - // Putting [1]{*byte} and similar into direct interfaces. (IMake typ (ArrayMake1 val)) -> (IMake typ val) (ArraySelect [0] x:(IData _)) -> x @@ -947,6 +1001,13 @@ (Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 [log2(c)])) (Div64u n (Const64 [-1<<63])) -> (Rsh64Ux64 n (Const64 [63])) +// Signed non-negative divide by power of 2. +(Div8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 [log2(c&0xff)])) +(Div16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 [log2(c&0xffff)])) +(Div32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 [log2(c&0xffffffff)])) +(Div64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 [log2(c)])) +(Div64 n (Const64 [-1<<63])) && isNonNegative(n) -> (Const64 [0]) + // Unsigned divide, not a power of 2. Strength reduce to a multiply. // For 8-bit divides, we just do a direct 9-bit by 8-bit multiply. (Div8u x (Const8 [c])) && umagicOK(8, c) -> @@ -1180,6 +1241,13 @@ (Mod64u n (Const64 [c])) && isPowerOfTwo(c) -> (And64 n (Const64 [c-1])) (Mod64u n (Const64 [-1<<63])) -> (And64 n (Const64 [1<<63-1])) +// Signed non-negative mod by power of 2 constant. +(Mod8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo(c&0xff) -> (And8 n (Const8 [(c&0xff)-1])) +(Mod16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo(c&0xffff) -> (And16 n (Const16 [(c&0xffff)-1])) +(Mod32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo(c&0xffffffff) -> (And32 n (Const32 [(c&0xffffffff)-1])) +(Mod64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo(c) -> (And64 n (Const64 [c-1])) +(Mod64 n (Const64 [-1<<63])) && isNonNegative(n) -> n + // Signed mod by negative constant. (Mod8 n (Const8 [c])) && c < 0 && c != -1<<7 -> (Mod8 n (Const8 [-c])) (Mod16 n (Const16 [c])) && c < 0 && c != -1<<15 -> (Mod16 n (Const16 [-c])) @@ -1360,15 +1428,13 @@ -> mem // nil checks just need to rewrite to something useless. // they will be deadcode eliminated soon afterwards. -(NilCheck (Load (OffPtr [c] (SP)) mem) mem) - && mem.Op == OpStaticCall - && isSameSym(mem.Aux, "runtime.newobject") +(NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _) + && isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check") -> (Invalid) -(NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem)) mem) - && mem.Op == OpStaticCall - && isSameSym(mem.Aux, "runtime.newobject") +(NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _) + && isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check") -> (Invalid) @@ -1377,6 +1443,14 @@ (EqPtr x x) -> (ConstBool [1]) (EqPtr (Addr {a} x) (Addr {b} x)) -> (ConstBool [b2i(a == b)]) +// Inline small runtime.memmove calls with constant length. +(StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) + && isSameSym(sym,"runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmoveSize(sz,config) + -> (Move {t.(*types.Type).Elem()} [sz] dst src mem) +(StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) + && isSameSym(sym,"runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmoveSize(sz,config) + -> (Move {t.(*types.Type).Elem()} [sz] dst src mem) + // De-virtualize interface calls into static calls. // Note that (ITab (IMake)) doesn't get // rewritten until after the first opt pass, diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go index d962e4a193b..5ce11c7e879 100644 --- a/src/cmd/compile/internal/ssa/gen/genericOps.go +++ b/src/cmd/compile/internal/ssa/gen/genericOps.go @@ -255,7 +255,28 @@ var genericOps = []opData{ {name: "PopCount32", argLength: 1}, // Count bits in arg[0] {name: "PopCount64", argLength: 1}, // Count bits in arg[0] - {name: "Sqrt", argLength: 1}, // sqrt(arg0), float64 only + // Square root, float64 only. + // Special cases: + // +∞ → +∞ + // ±0 → ±0 (sign preserved) + // x<0 → NaN + // NaN → NaN + {name: "Sqrt", argLength: 1}, // √arg0 + + // Round to integer, float64 only. + // Special cases: + // ±∞ → ±∞ (sign preserved) + // ±0 → ±0 (sign preserved) + // NaN → NaN + {name: "Floor", argLength: 1}, // round arg0 toward -∞ + {name: "Ceil", argLength: 1}, // round arg0 toward +∞ + {name: "Trunc", argLength: 1}, // round arg0 toward 0 + {name: "Round", argLength: 1}, // round arg0 to nearest, ties away from 0 + {name: "RoundToEven", argLength: 1}, // round arg0 to nearest, ties to even + + // Modify the sign bit + {name: "Abs", argLength: 1}, // absolute value arg0 + {name: "Copysign", argLength: 2}, // copy sign from arg0 to arg1 // Data movement, max argument length for Phi is indefinite so just pick // a really large number @@ -286,12 +307,13 @@ var genericOps = []opData{ // Constant-like things {name: "InitMem"}, // memory input to the function. - {name: "Arg", aux: "SymOff", symEffect: "None"}, // argument to the function. aux=GCNode of arg, off = offset in that arg. + {name: "Arg", aux: "SymOff", symEffect: "Read"}, // argument to the function. aux=GCNode of arg, off = offset in that arg. - // The address of a variable. arg0 is the base pointer (SB or SP, depending - // on whether it is a global or stack variable). The Aux field identifies the - // variable. It will be either an *ExternSymbol (with arg0=SB), *ArgSymbol (arg0=SP), - // or *AutoSymbol (arg0=SP). + // The address of a variable. arg0 is the base pointer. + // If the variable is a global, the base pointer will be SB and + // the Aux field will be a *obj.LSym. + // If the variable is a local, the base pointer will be SP and + // the Aux field will be a *gc.Node. {name: "Addr", argLength: 1, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SP or SB. Aux identifies the variable. {name: "SP"}, // stack pointer @@ -299,8 +321,11 @@ var genericOps = []opData{ {name: "Invalid"}, // unused value // Memory operations - {name: "Load", argLength: 2}, // Load from arg0. arg1=memory - {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "Load", argLength: 2}, // Load from arg0. arg1=memory + {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + // The source and destination of Move may overlap in some cases. See e.g. + // memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go) + // returns true, we must do all loads before all stores, when lowering Move. {name: "Move", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory. {name: "Zero", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory. @@ -310,6 +335,12 @@ var genericOps = []opData{ {name: "MoveWB", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory. {name: "ZeroWB", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory. + // WB invokes runtime.gcWriteBarrier. This is not a normal + // call: it takes arguments in registers, doesn't clobber + // general-purpose registers (the exact clobber set is + // arch-dependent), and is not a safe-point. + {name: "WB", argLength: 3, typ: "Mem", aux: "Sym", symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier + // Function calls. Arguments to the call have already been written to the stack. // Return values appear on the stack. The method receiver, if any, is treated // as a phantom first argument. @@ -361,6 +392,8 @@ var genericOps = []opData{ // Pseudo-ops {name: "GetG", argLength: 1}, // runtime.getg() (read g pointer). arg0=mem {name: "GetClosurePtr"}, // get closure pointer from dedicated register + {name: "GetCallerPC"}, // for getcallerpc intrinsic + {name: "GetCallerSP"}, // for getcallersp intrinsic // Indexing operations {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type @@ -415,8 +448,9 @@ var genericOps = []opData{ {name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem", symEffect: "None"}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem {name: "VarKill", argLength: 1, aux: "Sym", symEffect: "None"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem - {name: "VarLive", argLength: 1, aux: "Sym", symEffect: "None"}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem + {name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read"}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem {name: "KeepAlive", argLength: 2, typ: "Mem"}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem + {name: "RegKill"}, // regalloc has determined that the value in this register is dead // Ops for breaking 64-bit operations on 32-bit architectures {name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go index ea6fa878136..0966d4c39db 100644 --- a/src/cmd/compile/internal/ssa/gen/main.go +++ b/src/cmd/compile/internal/ssa/gen/main.go @@ -169,6 +169,9 @@ func genOp() { if v.reg.clobbers != 0 { log.Fatalf("%s is rematerializeable and clobbers registers", v.name) } + if v.clobberFlags { + log.Fatalf("%s is rematerializeable and clobbers flags", v.name) + } fmt.Fprintln(w, "rematerializeable: true,") } if v.commutative { @@ -177,10 +180,10 @@ func genOp() { if v.resultInArg0 { fmt.Fprintln(w, "resultInArg0: true,") if v.reg.inputs[0] != v.reg.outputs[0] { - log.Fatalf("input[0] and output[0] must use the same registers for %s", v.name) + log.Fatalf("%s: input[0] and output[0] must use the same registers for %s", a.name, v.name) } if v.commutative && v.reg.inputs[1] != v.reg.outputs[0] { - log.Fatalf("input[1] and output[0] must use the same registers for %s", v.name) + log.Fatalf("%s: input[1] and output[0] must use the same registers for %s", a.name, v.name) } } if v.resultNotInArgs { diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go index 4306f7e7c0e..c23a54d9b5d 100644 --- a/src/cmd/compile/internal/ssa/gen/rulegen.go +++ b/src/cmd/compile/internal/ssa/gen/rulegen.go @@ -280,29 +280,32 @@ func genRules(arch arch) { fmt.Fprintf(w, "for {\n") - s := split(match[1 : len(match)-1]) // remove parens, then split + _, _, _, aux, s := extract(match) // remove parens, then split // check match of control value - if s[1] != "nil" { + if s[0] != "nil" { fmt.Fprintf(w, "v := b.Control\n") - if strings.Contains(s[1], "(") { - genMatch0(w, arch, s[1], "v", map[string]struct{}{}, false, rule.loc) + if strings.Contains(s[0], "(") { + genMatch0(w, arch, s[0], "v", map[string]struct{}{}, false, rule.loc) } else { fmt.Fprintf(w, "_ = v\n") // in case we don't use v - fmt.Fprintf(w, "%s := b.Control\n", s[1]) + fmt.Fprintf(w, "%s := b.Control\n", s[0]) } } + if aux != "" { + fmt.Fprintf(w, "%s := b.Aux\n", aux) + } if cond != "" { fmt.Fprintf(w, "if !(%s) {\nbreak\n}\n", cond) } // Rule matches. Generate result. - t := split(result[1 : len(result)-1]) // remove parens, then split - newsuccs := t[2:] + outop, _, _, aux, t := extract(result) // remove parens, then split + newsuccs := t[1:] // Check if newsuccs is the same set as succs. - succs := s[2:] + succs := s[1:] m := map[string]bool{} for _, succ := range succs { if m[succ] { @@ -320,11 +323,16 @@ func genRules(arch arch) { log.Fatalf("unmatched successors %v in %s", m, rule) } - fmt.Fprintf(w, "b.Kind = %s\n", blockName(t[0], arch)) - if t[1] == "nil" { + fmt.Fprintf(w, "b.Kind = %s\n", blockName(outop, arch)) + if t[0] == "nil" { fmt.Fprintf(w, "b.SetControl(nil)\n") } else { - fmt.Fprintf(w, "b.SetControl(%s)\n", genResult0(w, arch, t[1], new(int), false, false, rule.loc)) + fmt.Fprintf(w, "b.SetControl(%s)\n", genResult0(w, arch, t[0], new(int), false, false, rule.loc)) + } + if aux != "" { + fmt.Fprintf(w, "b.Aux = %s\n", aux) + } else { + fmt.Fprintln(w, "b.Aux = nil") } succChanged := false @@ -622,11 +630,7 @@ func isBlock(name string, arch arch) bool { return false } -// parseValue parses a parenthesized value from a rule. -// The value can be from the match or the result side. -// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args. -// oparch is the architecture that op is located in, or "" for generic. -func parseValue(val string, arch arch, loc string) (op opData, oparch string, typ string, auxint string, aux string, args []string) { +func extract(val string) (op string, typ string, auxint string, aux string, args []string) { val = val[1 : len(val)-1] // remove () // Split val up into regions. @@ -634,6 +638,7 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch string, ty s := split(val) // Extract restrictions and args. + op = s[0] for _, a := range s[1:] { switch a[0] { case '<': @@ -646,8 +651,18 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch string, ty args = append(args, a) } } + return +} + +// parseValue parses a parenthesized value from a rule. +// The value can be from the match or the result side. +// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args. +// oparch is the architecture that op is located in, or "" for generic. +func parseValue(val string, arch arch, loc string) (op opData, oparch string, typ string, auxint string, aux string, args []string) { // Resolve the op. + var s string + s, typ, auxint, aux, args = extract(val) // match reports whether x is a good op to select. // If strict is true, rule generation might succeed. @@ -656,14 +671,14 @@ func parseValue(val string, arch arch, loc string) (op opData, oparch string, ty // Doing strict=true then strict=false allows // precise op matching while retaining good error messages. match := func(x opData, strict bool, archname string) bool { - if x.name != s[0] { + if x.name != s { return false } if x.argLength != -1 && int(x.argLength) != len(args) { if strict { return false } else { - log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s[0], archname, x.argLength, len(args)) + log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s, archname, x.argLength, len(args)) } } return true diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go index d554907bebe..47f37f23375 100644 --- a/src/cmd/compile/internal/ssa/html.go +++ b/src/cmd/compile/internal/ssa/html.go @@ -11,6 +11,7 @@ import ( "html" "io" "os" + "strings" ) type HTMLWriter struct { @@ -65,6 +66,11 @@ th, td { padding: 5px; } +td.ssa-prog { + width: 600px; + word-wrap: break-word; +} + li { list-style-type: none; } @@ -120,6 +126,11 @@ dd.ssa-prog { font-style: italic; } +.line-number { + font-style: italic; + font-size: 11px; +} + .highlight-yellow { background-color: yellow; } .highlight-aquamarine { background-color: aquamarine; } .highlight-coral { background-color: coral; } @@ -309,17 +320,21 @@ func (w *HTMLWriter) WriteFunc(title string, f *Func) { if w == nil { return // avoid generating HTML just to discard it } - w.WriteColumn(title, f.HTML()) + w.WriteColumn(title, "", f.HTML()) // TODO: Add visual representation of f's CFG. } // WriteColumn writes raw HTML in a column headed by title. // It is intended for pre- and post-compilation log output. -func (w *HTMLWriter) WriteColumn(title string, html string) { +func (w *HTMLWriter) WriteColumn(title, class, html string) { if w == nil { return } - w.WriteString("
    ") @@ -352,7 +367,14 @@ func (v *Value) LongHTML() string { // We already have visual noise in the form of punctuation // maybe we could replace some of that with formatting. s := fmt.Sprintf("", v.String()) - s += fmt.Sprintf("%s = %s", v.HTML(), v.Op.String()) + + linenumber := "(?)" + if v.Pos.IsKnown() { + linenumber = fmt.Sprintf("(%d)", v.Pos.Line()) + } + + s += fmt.Sprintf("%s %s = %s", v.HTML(), linenumber, v.Op.String()) + s += " <" + html.EscapeString(v.Type.String()) + ">" s += html.EscapeString(v.auxString()) for _, a := range v.Args { @@ -360,8 +382,21 @@ func (v *Value) LongHTML() string { } r := v.Block.Func.RegAlloc if int(v.ID) < len(r) && r[v.ID] != nil { - s += " : " + html.EscapeString(r[v.ID].Name()) + s += " : " + html.EscapeString(r[v.ID].String()) } + var names []string + for name, values := range v.Block.Func.NamedValues { + for _, value := range values { + if value == v { + names = append(names, name.String()) + break // drop duplicates. + } + } + } + if len(names) != 0 { + s += " (" + strings.Join(names, ", ") + ")" + } + s += "" return s } @@ -396,6 +431,11 @@ func (b *Block) LongHTML() string { case BranchLikely: s += " (likely)" } + if b.Pos.IsKnown() { + // TODO does not begin to deal with the full complexity of line numbers. + // Maybe we want a string/slice instead, of outer-inner when inlining. + s += fmt.Sprintf(" (line %d)", b.Pos.Line()) + } return s } @@ -469,7 +509,7 @@ func (p htmlFuncPrinter) endDepCycle() { } func (p htmlFuncPrinter) named(n LocalSlot, vals []*Value) { - fmt.Fprintf(p.w, "
  • name %s: ", n.Name()) + fmt.Fprintf(p.w, "
  • name %s: ", n) for _, val := range vals { fmt.Fprintf(p.w, "%s ", val.HTML()) } diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go index 323de3d76bc..5f4c5d1ccd4 100644 --- a/src/cmd/compile/internal/ssa/likelyadjust.go +++ b/src/cmd/compile/internal/ssa/likelyadjust.go @@ -12,7 +12,7 @@ type loop struct { header *Block // The header node of this (reducible) loop outer *loop // loop containing this loop - // By default, children exits, and depth are not initialized. + // By default, children, exits, and depth are not initialized. children []*loop // loops nested directly within this loop. Initialized by assembleChildren(). exits []*Block // exits records blocks reached by exits from this loop. Initialized by findExits(). @@ -23,7 +23,7 @@ type loop struct { isInner bool // True if never discovered to contain a loop // register allocation uses this. - containsCall bool // if any block in this loop or any loop it contains has a call + containsCall bool // if any block in this loop or any loop within it contains has a call } // outerinner records that outer contains inner @@ -72,11 +72,12 @@ func (l *loop) checkContainsCall(bb *Block) { } type loopnest struct { - f *Func - b2l []*loop - po []*Block - sdom SparseTree - loops []*loop + f *Func + b2l []*loop + po []*Block + sdom SparseTree + loops []*loop + hasIrreducible bool // TODO current treatment of irreducible loops is very flaky, if accurate loops are needed, must punt at function level. // Record which of the lazily initialized fields have actually been initialized. initializedChildren, initializedDepth, initializedExits bool @@ -104,7 +105,7 @@ const ( blEXIT = 3 ) -var bllikelies [4]string = [4]string{"default", "call", "ret", "exit"} +var bllikelies = [4]string{"default", "call", "ret", "exit"} func describePredictionAgrees(b *Block, prediction BranchPrediction) string { s := "" @@ -285,6 +286,12 @@ func loopnestfor(f *Func) *loopnest { sdom := f.sdom() b2l := make([]*loop, f.NumBlocks()) loops := make([]*loop, 0) + visited := make([]bool, f.NumBlocks()) + sawIrred := false + + if f.pass.debug > 2 { + fmt.Printf("loop finding in %s\n", f.Name) + } // Reducible-loop-nest-finding. for _, b := range po { @@ -318,10 +325,17 @@ func loopnestfor(f *Func) *loopnest { b2l[bb.ID] = l l.checkContainsCall(bb) } - } else { // Perhaps a loop header is inherited. + } else if !visited[bb.ID] { // Found an irreducible loop + sawIrred = true + if f.pass != nil && f.pass.debug > 4 { + fmt.Printf("loop finding succ %s of %s is IRRED, in %s\n", bb.String(), b.String(), f.Name) + } + } else if l != nil { + // TODO handle case where l is irreducible. + // Perhaps a loop header is inherited. // is there any loop containing our successor whose // header dominates b? - if l != nil && !sdom.isAncestorEq(l.header, b) { + if !sdom.isAncestorEq(l.header, b) { l = l.nearestOuterLoop(sdom, b) } if f.pass != nil && f.pass.debug > 4 { @@ -331,6 +345,11 @@ func loopnestfor(f *Func) *loopnest { fmt.Printf("loop finding succ %s of %s provides loop with header %s\n", bb.String(), b.String(), l.header.String()) } } + } else { // No loop + if f.pass != nil && f.pass.debug > 4 { + fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String()) + } + } if l == nil || innermost == l { @@ -355,9 +374,10 @@ func loopnestfor(f *Func) *loopnest { innermost.checkContainsCall(b) innermost.nBlocks++ } + visited[b.ID] = true } - ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops} + ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops, hasIrreducible: sawIrred} // Curious about the loopiness? "-d=ssa/likelyadjust/stats" if f.pass != nil && f.pass.stats > 0 && len(loops) > 0 { diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go index 41b48947aad..a482a608a16 100644 --- a/src/cmd/compile/internal/ssa/location.go +++ b/src/cmd/compile/internal/ssa/location.go @@ -11,30 +11,56 @@ import ( // A place that an ssa variable can reside. type Location interface { - Name() string // name to use in assembly templates: %rax, 16(%rsp), ... + String() string // name to use in assembly templates: AX, 16(SP), ... } -// A Register is a machine register, like %rax. +// A Register is a machine register, like AX. // They are numbered densely from 0 (for each architecture). type Register struct { - num int32 + num int32 // dense numbering objNum int16 // register number from cmd/internal/obj/$ARCH name string } -func (r *Register) Name() string { +func (r *Register) String() string { return r.name } -// A LocalSlot is a location in the stack frame. -// It is (possibly a subpiece of) a PPARAM, PPARAMOUT, or PAUTO ONAME node. -type LocalSlot struct { - N GCNode // an ONAME *gc.Node representing a variable on the stack - Type *types.Type // type of slot - Off int64 // offset of slot in N +// ObjNum returns the register number from cmd/internal/obj/$ARCH that +// corresponds to this register. +func (r *Register) ObjNum() int16 { + return r.objNum } -func (s LocalSlot) Name() string { +// A LocalSlot is a location in the stack frame, which identifies and stores +// part or all of a PPARAM, PPARAMOUT, or PAUTO ONAME node. +// It can represent a whole variable, part of a larger stack slot, or part of a +// variable that has been decomposed into multiple stack slots. +// As an example, a string could have the following configurations: +// +// stack layout LocalSlots +// +// Optimizations are disabled. s is on the stack and represented in its entirety. +// [ ------- s string ---- ] { N: s, Type: string, Off: 0 } +// +// s was not decomposed, but the SSA operates on its parts individually, so +// there is a LocalSlot for each of its fields that points into the single stack slot. +// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8} +// +// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot. +// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0}, +// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8} +// parent = &{N: s, Type: string} +type LocalSlot struct { + N GCNode // an ONAME *gc.Node representing a stack location. + Type *types.Type // type of slot + Off int64 // offset of slot in N + + SplitOf *LocalSlot // slot is a decomposition of SplitOf + SplitOffset int64 // .. at this offset. +} + +func (s LocalSlot) String() string { if s.Off == 0 { return fmt.Sprintf("%v[%v]", s.N, s.Type) } @@ -43,13 +69,13 @@ func (s LocalSlot) Name() string { type LocPair [2]Location -func (t LocPair) Name() string { +func (t LocPair) String() string { n0, n1 := "nil", "nil" if t[0] != nil { - n0 = t[0].Name() + n0 = t[0].String() } if t[1] != nil { - n1 = t[1].Name() + n1 = t[1].String() } return fmt.Sprintf("<%s,%s>", n0, n1) } diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go index 4222bf81c56..c9c04ef41ce 100644 --- a/src/cmd/compile/internal/ssa/loopreschedchecks.go +++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go @@ -17,7 +17,7 @@ type edgeMem struct { m *Value // phi for memory at dest of e } -// a rewriteTarget is a a value-argindex pair indicating +// a rewriteTarget is a value-argindex pair indicating // where a rewrite is applied. Note that this is for values, // not for block controls, because block controls are not targets // for the rewrites performed in inserting rescheduling checks. @@ -267,8 +267,6 @@ func insertLoopReschedChecks(f *Func) { sdom = newSparseTree(f, f.Idom()) fmt.Printf("after %s = %s\n", f.Name, sdom.treestructure(f.Entry)) } - - return } // newPhiFor inserts a new Phi function into b, diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go index c5b768264db..2e5e421df7f 100644 --- a/src/cmd/compile/internal/ssa/looprotate.go +++ b/src/cmd/compile/internal/ssa/looprotate.go @@ -23,6 +23,9 @@ package ssa // JLT loop func loopRotate(f *Func) { loopnest := f.loopnest() + if loopnest.hasIrreducible { + return + } if len(loopnest.loops) == 0 { return } @@ -62,7 +65,7 @@ func loopRotate(f *Func) { break } nextb := f.Blocks[nextIdx] - if nextb == p { // original loop precedessor is next + if nextb == p { // original loop predecessor is next break } if loopnest.b2l[nextb.ID] != loop { // about to leave loop diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go index d01edcc77d5..b107f8a836c 100644 --- a/src/cmd/compile/internal/ssa/nilcheck.go +++ b/src/cmd/compile/internal/ssa/nilcheck.go @@ -126,7 +126,7 @@ func nilcheckelim(f *Func) { f.Warnl(v.Pos, "removed nil check") } v.reset(OpUnknown) - // TODO: f.freeValue(v) + f.freeValue(v) i-- continue } @@ -168,6 +168,8 @@ func nilcheckelim2(f *Func) { // input pointer is nil. Remove nil checks on those pointers, as the // faulting instruction effectively does the nil check for free. unnecessary.clear() + // Optimization: keep track of removed nilcheck with smallest index + firstToRemove := len(b.Values) for i := len(b.Values) - 1; i >= 0; i-- { v := b.Values[i] if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) { @@ -175,6 +177,7 @@ func nilcheckelim2(f *Func) { f.Warnl(v.Pos, "removed nil check") } v.reset(OpUnknown) + firstToRemove = i continue } if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() { @@ -224,8 +227,9 @@ func nilcheckelim2(f *Func) { } } // Remove values we've clobbered with OpUnknown. - i := 0 - for _, v := range b.Values { + i := firstToRemove + for j := i; j < len(b.Values); j++ { + v := b.Values[j] if v.Op != OpUnknown { b.Values[i] = v i++ diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 37c24ee4cf4..92560cdffb7 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -67,7 +67,7 @@ const ( auxFloat32 // auxInt is a float32 (encoded with math.Float64bits) auxFloat64 // auxInt is a float64 (encoded with math.Float64bits) auxString // aux is a string - auxSym // aux is a symbol + auxSym // aux is a symbol (a *gc.Node for locals or an *obj.LSym for globals) auxSymOff // aux is a symbol, auxInt is an offset auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff auxTyp // aux is a type diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 763a1cbd4dd..0beabb0b842 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -70,6 +70,8 @@ const ( BlockARM64NZ BlockARM64ZW BlockARM64NZW + BlockARM64TBZ + BlockARM64TBNZ BlockMIPSEQ BlockMIPSNE @@ -162,20 +164,22 @@ var blockString = [...]string{ BlockARMUGT: "UGT", BlockARMUGE: "UGE", - BlockARM64EQ: "EQ", - BlockARM64NE: "NE", - BlockARM64LT: "LT", - BlockARM64LE: "LE", - BlockARM64GT: "GT", - BlockARM64GE: "GE", - BlockARM64ULT: "ULT", - BlockARM64ULE: "ULE", - BlockARM64UGT: "UGT", - BlockARM64UGE: "UGE", - BlockARM64Z: "Z", - BlockARM64NZ: "NZ", - BlockARM64ZW: "ZW", - BlockARM64NZW: "NZW", + BlockARM64EQ: "EQ", + BlockARM64NE: "NE", + BlockARM64LT: "LT", + BlockARM64LE: "LE", + BlockARM64GT: "GT", + BlockARM64GE: "GE", + BlockARM64ULT: "ULT", + BlockARM64ULE: "ULE", + BlockARM64UGT: "UGT", + BlockARM64UGE: "UGE", + BlockARM64Z: "Z", + BlockARM64NZ: "NZ", + BlockARM64ZW: "ZW", + BlockARM64NZW: "NZW", + BlockARM64TBZ: "TBZ", + BlockARM64TBNZ: "TBNZ", BlockMIPSEQ: "EQ", BlockMIPSNE: "NE", @@ -392,6 +396,8 @@ const ( Op386InvertFlags Op386LoweredGetG Op386LoweredGetClosurePtr + Op386LoweredGetCallerPC + Op386LoweredGetCallerSP Op386LoweredNilCheck Op386MOVLconvert Op386FlagEQ @@ -427,8 +433,8 @@ const ( OpAMD64MOVSSstoreidx4 OpAMD64MOVSDstoreidx1 OpAMD64MOVSDstoreidx8 - OpAMD64ADDSDmem OpAMD64ADDSSmem + OpAMD64ADDSDmem OpAMD64SUBSSmem OpAMD64SUBSDmem OpAMD64MULSSmem @@ -437,6 +443,8 @@ const ( OpAMD64ADDL OpAMD64ADDQconst OpAMD64ADDLconst + OpAMD64ADDQconstmem + OpAMD64ADDLconstmem OpAMD64SUBQ OpAMD64SUBL OpAMD64SUBQconst @@ -549,6 +557,7 @@ const ( OpAMD64POPCNTQ OpAMD64POPCNTL OpAMD64SQRTSD + OpAMD64ROUNDSD OpAMD64SBBQcarrymask OpAMD64SBBLcarrymask OpAMD64SETEQ @@ -561,6 +570,16 @@ const ( OpAMD64SETBE OpAMD64SETA OpAMD64SETAE + OpAMD64SETEQmem + OpAMD64SETNEmem + OpAMD64SETLmem + OpAMD64SETLEmem + OpAMD64SETGmem + OpAMD64SETGEmem + OpAMD64SETBmem + OpAMD64SETBEmem + OpAMD64SETAmem + OpAMD64SETAEmem OpAMD64SETEQF OpAMD64SETNEF OpAMD64SETORD @@ -585,6 +604,10 @@ const ( OpAMD64CVTSQ2SD OpAMD64CVTSD2SS OpAMD64CVTSS2SD + OpAMD64MOVQi2f + OpAMD64MOVQf2i + OpAMD64MOVLi2f + OpAMD64MOVLf2i OpAMD64PXOR OpAMD64LEAQ OpAMD64LEAQ1 @@ -610,6 +633,7 @@ const ( OpAMD64MOVWloadidx2 OpAMD64MOVLloadidx1 OpAMD64MOVLloadidx4 + OpAMD64MOVLloadidx8 OpAMD64MOVQloadidx1 OpAMD64MOVQloadidx8 OpAMD64MOVBstoreidx1 @@ -617,6 +641,7 @@ const ( OpAMD64MOVWstoreidx2 OpAMD64MOVLstoreidx1 OpAMD64MOVLstoreidx4 + OpAMD64MOVLstoreidx8 OpAMD64MOVQstoreidx1 OpAMD64MOVQstoreidx8 OpAMD64MOVBstoreconst @@ -641,7 +666,10 @@ const ( OpAMD64InvertFlags OpAMD64LoweredGetG OpAMD64LoweredGetClosurePtr + OpAMD64LoweredGetCallerPC + OpAMD64LoweredGetCallerSP OpAMD64LoweredNilCheck + OpAMD64LoweredWB OpAMD64MOVQconvert OpAMD64MOVLconvert OpAMD64FlagEQ @@ -684,14 +712,21 @@ const ( OpARMRSCconst OpARMMULLU OpARMMULA + OpARMMULS OpARMADDF OpARMADDD OpARMSUBF OpARMSUBD OpARMMULF OpARMMULD + OpARMNMULF + OpARMNMULD OpARMDIVF OpARMDIVD + OpARMMULAF + OpARMMULAD + OpARMMULSF + OpARMMULSD OpARMAND OpARMANDconst OpARMOR @@ -700,6 +735,8 @@ const ( OpARMXORconst OpARMBIC OpARMBICconst + OpARMBFX + OpARMBFXU OpARMMVN OpARMNEGF OpARMNEGD @@ -812,9 +849,27 @@ const ( OpARMCMPshiftLL OpARMCMPshiftRL OpARMCMPshiftRA + OpARMCMNshiftLL + OpARMCMNshiftRL + OpARMCMNshiftRA + OpARMTSTshiftLL + OpARMTSTshiftRL + OpARMTSTshiftRA + OpARMTEQshiftLL + OpARMTEQshiftRL + OpARMTEQshiftRA OpARMCMPshiftLLreg OpARMCMPshiftRLreg OpARMCMPshiftRAreg + OpARMCMNshiftLLreg + OpARMCMNshiftRLreg + OpARMCMNshiftRAreg + OpARMTSTshiftLLreg + OpARMTSTshiftRLreg + OpARMTSTshiftRAreg + OpARMTEQshiftLLreg + OpARMTEQshiftRLreg + OpARMTEQshiftRAreg OpARMCMPF0 OpARMCMPD0 OpARMMOVWconst @@ -837,10 +892,16 @@ const ( OpARMMOVWloadshiftLL OpARMMOVWloadshiftRL OpARMMOVWloadshiftRA + OpARMMOVBUloadidx + OpARMMOVBloadidx + OpARMMOVHUloadidx + OpARMMOVHloadidx OpARMMOVWstoreidx OpARMMOVWstoreshiftLL OpARMMOVWstoreshiftRL OpARMMOVWstoreshiftRA + OpARMMOVBstoreidx + OpARMMOVHstoreidx OpARMMOVBreg OpARMMOVBUreg OpARMMOVHreg @@ -879,6 +940,7 @@ const ( OpARMLoweredZero OpARMLoweredMove OpARMLoweredGetClosurePtr + OpARMLoweredGetCallerSP OpARMMOVWconvert OpARMFlagEQ OpARMFlagLT_ULT @@ -989,12 +1051,14 @@ const ( OpARM64MOVHstore OpARM64MOVWstore OpARM64MOVDstore + OpARM64STP OpARM64FMOVSstore OpARM64FMOVDstore OpARM64MOVBstorezero OpARM64MOVHstorezero OpARM64MOVWstorezero OpARM64MOVDstorezero + OpARM64MOVQstorezero OpARM64MOVBreg OpARM64MOVBUreg OpARM64MOVHreg @@ -1042,6 +1106,7 @@ const ( OpARM64DUFFCOPY OpARM64LoweredMove OpARM64LoweredGetClosurePtr + OpARM64LoweredGetCallerSP OpARM64MOVDconvert OpARM64FlagEQ OpARM64FlagLT_ULT @@ -1161,6 +1226,7 @@ const ( OpMIPSFPFlagTrue OpMIPSFPFlagFalse OpMIPSLoweredGetClosurePtr + OpMIPSLoweredGetCallerSP OpMIPSMOVWconvert OpMIPS64ADDV @@ -1253,10 +1319,25 @@ const ( OpMIPS64DUFFZERO OpMIPS64LoweredZero OpMIPS64LoweredMove + OpMIPS64LoweredAtomicLoad32 + OpMIPS64LoweredAtomicLoad64 + OpMIPS64LoweredAtomicStore32 + OpMIPS64LoweredAtomicStore64 + OpMIPS64LoweredAtomicStorezero32 + OpMIPS64LoweredAtomicStorezero64 + OpMIPS64LoweredAtomicExchange32 + OpMIPS64LoweredAtomicExchange64 + OpMIPS64LoweredAtomicAdd32 + OpMIPS64LoweredAtomicAdd64 + OpMIPS64LoweredAtomicAddconst32 + OpMIPS64LoweredAtomicAddconst64 + OpMIPS64LoweredAtomicCas32 + OpMIPS64LoweredAtomicCas64 OpMIPS64LoweredNilCheck OpMIPS64FPFlagTrue OpMIPS64FPFlagFalse OpMIPS64LoweredGetClosurePtr + OpMIPS64LoweredGetCallerSP OpMIPS64MOVVconvert OpPPC64ADD @@ -1284,6 +1365,8 @@ const ( OpPPC64SRW OpPPC64SLD OpPPC64SLW + OpPPC64ROTL + OpPPC64ROTLW OpPPC64ADDconstForCarry OpPPC64MaskIfNotCarry OpPPC64SRADconst @@ -1308,9 +1391,10 @@ const ( OpPPC64FCTIDZ OpPPC64FCTIWZ OpPPC64FCFID + OpPPC64FCFIDS OpPPC64FRSP - OpPPC64Xf2i64 - OpPPC64Xi2f64 + OpPPC64MFVSRD + OpPPC64MTVSRD OpPPC64AND OpPPC64ANDN OpPPC64OR @@ -1322,6 +1406,12 @@ const ( OpPPC64FNEG OpPPC64FSQRT OpPPC64FSQRTS + OpPPC64FFLOOR + OpPPC64FCEIL + OpPPC64FTRUNC + OpPPC64FABS + OpPPC64FNABS + OpPPC64FCPSGN OpPPC64ORconst OpPPC64XORconst OpPPC64ANDconst @@ -1374,6 +1464,7 @@ const ( OpPPC64GreaterEqual OpPPC64FGreaterEqual OpPPC64LoweredGetClosurePtr + OpPPC64LoweredGetCallerSP OpPPC64LoweredNilCheck OpPPC64LoweredRound32F OpPPC64LoweredRound64F @@ -1415,6 +1506,10 @@ const ( OpS390XFMADD OpS390XFMSUBS OpS390XFMSUB + OpS390XLPDFR + OpS390XLNDFR + OpS390XCPSDR + OpS390XFIDBR OpS390XFMOVSload OpS390XFMOVDload OpS390XFMOVSconst @@ -1519,6 +1614,8 @@ const ( OpS390XMOVDreg OpS390XMOVDnop OpS390XMOVDconst + OpS390XLDGR + OpS390XLGDR OpS390XCFDBRA OpS390XCGDBRA OpS390XCFEBRA @@ -1552,8 +1649,11 @@ const ( OpS390XMOVDBRstore OpS390XMVC OpS390XMOVBZloadidx + OpS390XMOVBloadidx OpS390XMOVHZloadidx + OpS390XMOVHloadidx OpS390XMOVWZloadidx + OpS390XMOVWloadidx OpS390XMOVDloadidx OpS390XMOVHBRloadidx OpS390XMOVWBRloadidx @@ -1576,6 +1676,7 @@ const ( OpS390XInvertFlags OpS390XLoweredGetG OpS390XLoweredGetClosurePtr + OpS390XLoweredGetCallerSP OpS390XLoweredNilCheck OpS390XLoweredRound32F OpS390XLoweredRound64F @@ -1800,6 +1901,13 @@ const ( OpPopCount32 OpPopCount64 OpSqrt + OpFloor + OpCeil + OpTrunc + OpRound + OpRoundToEven + OpAbs + OpCopysign OpPhi OpCopy OpConvert @@ -1826,6 +1934,7 @@ const ( OpStoreWB OpMoveWB OpZeroWB + OpWB OpClosureCall OpStaticCall OpInterCall @@ -1865,6 +1974,8 @@ const ( OpNilCheck OpGetG OpGetClosurePtr + OpGetCallerPC + OpGetCallerSP OpPtrIndex OpOffPtr OpSliceMake @@ -1897,6 +2008,7 @@ const ( OpVarKill OpVarLive OpKeepAlive + OpRegKill OpInt64Make OpInt64Hi OpInt64Lo @@ -4245,6 +4357,26 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 239}, // AX CX DX BX BP SI DI + }, + }, + }, { name: "LoweredNilCheck", argLen: 2, @@ -4258,9 +4390,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVLconvert", - argLen: 2, - asm: x86.AMOVL, + name: "MOVLconvert", + argLen: 2, + resultInArg0: true, + asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ {0, 239}, // AX CX DX BX BP SI DI @@ -4683,13 +4816,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDSDmem", + name: "ADDSSmem", auxType: auxSymOff, argLen: 3, resultInArg0: true, faultOnNilArg1: true, symEffect: SymRead, - asm: x86.AADDSD, + asm: x86.AADDSS, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -4701,13 +4834,13 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDSSmem", + name: "ADDSDmem", auxType: auxSymOff, argLen: 3, resultInArg0: true, faultOnNilArg1: true, symEffect: SymRead, - asm: x86.AADDSS, + asm: x86.AADDSD, reg: regInfo{ inputs: []inputInfo{ {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 @@ -4852,6 +4985,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ADDQconstmem", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AADDQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "ADDLconstmem", + auxType: auxSymValAndOff, + argLen: 2, + clobberFlags: true, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.AADDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, { name: "SUBQ", argLen: 2, @@ -6559,6 +6720,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ROUNDSD", + auxType: auxInt8, + argLen: 1, + asm: x86.AROUNDSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + }, + }, { name: "SBBQcarrymask", argLen: 1, @@ -6679,6 +6854,136 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SETEQmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETNEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETNE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETLmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETLEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETGmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETGEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETGE, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETBmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETCS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETBEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETAmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETHI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, + { + name: "SETAEmem", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: x86.ASETCC, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, { name: "SETEQF", argLen: 1, @@ -6975,6 +7280,54 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVQi2f", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + }, + }, + { + name: "MOVQf2i", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "MOVLi2f", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + }, + }, + { + name: "MOVLf2i", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, { name: "PXOR", argLen: 2, @@ -7365,6 +7718,22 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVLloadidx8", + auxType: auxSymOff, + argLen: 3, + symEffect: SymRead, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, { name: "MOVQloadidx1", auxType: auxSymOff, @@ -7468,6 +7837,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVLstoreidx8", + auxType: auxSymOff, + argLen: 4, + symEffect: SymWrite, + asm: x86.AMOVL, + reg: regInfo{ + inputs: []inputInfo{ + {1, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {2, 65535}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB + }, + }, + }, { name: "MOVQstoreidx1", auxType: auxSymOff, @@ -7643,7 +8026,6 @@ var opcodeTable = [...]opInfo{ name: "DUFFZERO", auxType: auxInt64, argLen: 3, - clobberFlags: true, faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ @@ -7767,6 +8149,26 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 + }, + }, + }, { name: "LoweredNilCheck", argLen: 2, @@ -7780,9 +8182,24 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVQconvert", - argLen: 2, - asm: x86.AMOVQ, + name: "LoweredWB", + auxType: auxSym, + argLen: 3, + clobberFlags: true, + symEffect: SymNone, + reg: regInfo{ + inputs: []inputInfo{ + {0, 128}, // DI + {1, 1}, // AX + }, + clobbers: 4294901760, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 + }, + }, + { + name: "MOVQconvert", + argLen: 2, + resultInArg0: true, + asm: x86.AMOVQ, reg: regInfo{ inputs: []inputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -7793,9 +8210,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVLconvert", - argLen: 2, - asm: x86.AMOVL, + name: "MOVLconvert", + argLen: 2, + resultInArg0: true, + asm: x86.AMOVL, reg: regInfo{ inputs: []inputInfo{ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 @@ -8351,6 +8769,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MULS", + argLen: 3, + asm: arm.AMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "ADDF", argLen: 2, @@ -8439,6 +8872,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "NMULF", + argLen: 2, + commutative: true, + asm: arm.ANMULF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "NMULD", + argLen: 2, + commutative: true, + asm: arm.ANMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, { name: "DIVF", argLen: 2, @@ -8467,6 +8930,70 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MULAF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULAD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULAD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULSF", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "MULSD", + argLen: 3, + resultInArg0: true, + asm: arm.AMULSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, { name: "AND", argLen: 2, @@ -8582,6 +9109,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "BFX", + auxType: auxInt32, + argLen: 1, + asm: arm.ABFX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "BFXU", + auxType: auxInt32, + argLen: 1, + asm: arm.ABFXU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "MVN", argLen: 1, @@ -10080,9 +10635,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "CMN", - argLen: 2, - asm: arm.ACMN, + name: "CMN", + argLen: 2, + commutative: true, + asm: arm.ACMN, reg: regInfo{ inputs: []inputInfo{ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 @@ -10205,6 +10761,114 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CMNshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMNshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "CMNshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TSTshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftLL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftRL", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, + { + name: "TEQshiftRA", + auxType: auxInt32, + argLen: 2, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + }, + }, + }, { name: "CMPshiftLLreg", argLen: 3, @@ -10241,6 +10905,114 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "CMNshiftLLreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMNshiftRLreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "CMNshiftRAreg", + argLen: 3, + asm: arm.ACMN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftLLreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftRLreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TSTshiftRAreg", + argLen: 3, + asm: arm.ATST, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftLLreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftRLreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "TEQshiftRAreg", + argLen: 3, + asm: arm.ATEQ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "CMPF0", argLen: 1, @@ -10554,6 +11326,62 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVBUloadidx", + argLen: 3, + asm: arm.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVBloadidx", + argLen: 3, + asm: arm.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVHUloadidx", + argLen: 3, + asm: arm.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, + { + name: "MOVHloadidx", + argLen: 3, + asm: arm.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "MOVWstoreidx", argLen: 4, @@ -10605,6 +11433,30 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVBstoreidx", + argLen: 4, + asm: arm.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, + { + name: "MOVHstoreidx", + argLen: 4, + asm: arm.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 + {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB + }, + }, + }, { name: "MOVBreg", argLen: 1, @@ -11071,6 +11923,16 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "MOVWconvert", argLen: 2, @@ -12541,6 +13403,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "STP", + auxType: auxSymOff, + argLen: 4, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTP, + reg: regInfo{ + inputs: []inputInfo{ + {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, { name: "FMOVSstore", auxType: auxSymOff, @@ -12621,6 +13498,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVQstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: arm64.ASTP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB + }, + }, + }, { name: "MOVBreg", argLen: 1, @@ -13132,7 +14022,7 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + {0, 65536}, // R16 }, clobbers: 536936448, // R16 R30 }, @@ -13188,6 +14078,16 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 + }, + }, + }, { name: "MOVDconvert", argLen: 2, @@ -14766,6 +15666,16 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31 + }, + }, + }, { name: "MOVWconvert", argLen: 2, @@ -16021,6 +16931,208 @@ var opcodeTable = [...]opInfo{ clobbers: 6, // R1 R2 }, }, + { + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero32", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero64", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst32", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst64", + auxType: auxInt64, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, + { + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 + {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB + }, + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, { name: "LoweredNilCheck", argLen: 2, @@ -16059,6 +17171,16 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31 + }, + }, + }, { name: "MOVVconvert", argLen: 2, @@ -16089,11 +17211,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ADDconst", - auxType: auxSymOff, - argLen: 1, - symEffect: SymAddr, - asm: ppc64.AADD, + name: "ADDconst", + auxType: auxInt64, + argLen: 1, + asm: ppc64.AADD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -16439,6 +17560,34 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ROTL", + argLen: 2, + asm: ppc64.AROTL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, + { + name: "ROTLW", + argLen: 2, + asm: ppc64.AROTLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "ADDconstForCarry", auxType: auxInt16, @@ -16763,6 +17912,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FCFIDS", + argLen: 1, + asm: ppc64.AFCFIDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + outputs: []outputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + }, + }, { name: "FRSP", argLen: 1, @@ -16777,8 +17939,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "Xf2i64", + name: "MFVSRD", argLen: 1, + asm: ppc64.AMFVSRD, reg: regInfo{ inputs: []inputInfo{ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 @@ -16789,8 +17952,9 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "Xi2f64", + name: "MTVSRD", argLen: 1, + asm: ppc64.AMTVSRD, reg: regInfo{ inputs: []inputInfo{ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 @@ -16955,6 +18119,85 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FFLOOR", + argLen: 1, + asm: ppc64.AFRIM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + outputs: []outputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + }, + }, + { + name: "FCEIL", + argLen: 1, + asm: ppc64.AFRIP, + reg: regInfo{ + inputs: []inputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + outputs: []outputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + }, + }, + { + name: "FTRUNC", + argLen: 1, + asm: ppc64.AFRIZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + outputs: []outputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + }, + }, + { + name: "FABS", + argLen: 1, + asm: ppc64.AFABS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + outputs: []outputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + }, + }, + { + name: "FNABS", + argLen: 1, + asm: ppc64.AFNABS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + outputs: []outputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + }, + }, + { + name: "FCPSGN", + argLen: 2, + asm: ppc64.AFCPSGN, + reg: regInfo{ + inputs: []inputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + outputs: []outputInfo{ + {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 + }, + }, + }, { name: "ORconst", auxType: auxInt64, @@ -17601,6 +18844,16 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + }, + }, + }, { name: "LoweredNilCheck", argLen: 2, @@ -17672,8 +18925,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {1, 2048}, // R11 - {0, 1073733626}, // SP R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4096}, // R12 + {1, 2048}, // R11 }, clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 }, @@ -17686,7 +18939,7 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 + {0, 4096}, // R12 }, clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 }, @@ -18156,6 +19409,60 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LPDFR", + argLen: 1, + asm: s390x.ALPDFR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LNDFR", + argLen: 1, + asm: s390x.ALNDFR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "CPSDR", + argLen: 2, + asm: s390x.ACPSDR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "FIDBR", + auxType: auxInt8, + argLen: 1, + asm: s390x.AFIDBR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, { name: "FMOVSload", auxType: auxSymOff, @@ -18334,7 +19641,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ADDconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, clobberFlags: true, asm: s390x.AADD, @@ -18432,7 +19739,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SUBconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -18536,7 +19843,7 @@ var opcodeTable = [...]opInfo{ }, { name: "MULLDconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -19118,7 +20425,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CMPconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: s390x.ACMP, reg: regInfo{ @@ -19140,7 +20447,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CMPUconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: s390x.ACMPU, reg: regInfo{ @@ -19707,6 +21014,32 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LDGR", + argLen: 1, + asm: s390x.ALDGR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + outputs: []outputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + }, + }, + { + name: "LGDR", + argLen: 1, + asm: s390x.ALGDR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "CFDBRA", argLen: 1, @@ -19842,7 +21175,6 @@ var opcodeTable = [...]opInfo{ auxType: auxSymOff, argLen: 1, rematerializeable: true, - clobberFlags: true, symEffect: SymRead, reg: regInfo{ inputs: []inputInfo{ @@ -19854,11 +21186,10 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "MOVDaddridx", - auxType: auxSymOff, - argLen: 2, - clobberFlags: true, - symEffect: SymRead, + name: "MOVDaddridx", + auxType: auxSymOff, + argLen: 2, + symEffect: SymRead, reg: regInfo{ inputs: []inputInfo{ {0, 4295000064}, // SP SB @@ -20204,6 +21535,24 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVBloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + clobberFlags: true, + symEffect: SymRead, + asm: s390x.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP + {0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "MOVHZloadidx", auxType: auxSymOff, @@ -20222,6 +21571,24 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVHloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + clobberFlags: true, + symEffect: SymRead, + asm: s390x.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP + {0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "MOVWZloadidx", auxType: auxSymOff, @@ -20240,6 +21607,24 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVWloadidx", + auxType: auxSymOff, + argLen: 3, + commutative: true, + clobberFlags: true, + symEffect: SymRead, + asm: s390x.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 54270}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP + {0, 4295021566}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 SP SB + }, + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "MOVDloadidx", auxType: auxSymOff, @@ -20551,6 +21936,16 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14 + }, + }, + }, { name: "LoweredNilCheck", argLen: 2, @@ -21976,6 +23371,41 @@ var opcodeTable = [...]opInfo{ argLen: 1, generic: true, }, + { + name: "Floor", + argLen: 1, + generic: true, + }, + { + name: "Ceil", + argLen: 1, + generic: true, + }, + { + name: "Trunc", + argLen: 1, + generic: true, + }, + { + name: "Round", + argLen: 1, + generic: true, + }, + { + name: "RoundToEven", + argLen: 1, + generic: true, + }, + { + name: "Abs", + argLen: 1, + generic: true, + }, + { + name: "Copysign", + argLen: 2, + generic: true, + }, { name: "Phi", argLen: -1, @@ -22063,7 +23493,7 @@ var opcodeTable = [...]opInfo{ name: "Arg", auxType: auxSymOff, argLen: 0, - symEffect: SymNone, + symEffect: SymRead, generic: true, }, { @@ -22124,6 +23554,13 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "WB", + auxType: auxSym, + argLen: 3, + symEffect: SymNone, + generic: true, + }, { name: "ClosureCall", auxType: auxInt64, @@ -22326,6 +23763,16 @@ var opcodeTable = [...]opInfo{ argLen: 0, generic: true, }, + { + name: "GetCallerPC", + argLen: 0, + generic: true, + }, + { + name: "GetCallerSP", + argLen: 0, + generic: true, + }, { name: "PtrIndex", argLen: 2, @@ -22489,7 +23936,7 @@ var opcodeTable = [...]opInfo{ name: "VarLive", auxType: auxSym, argLen: 1, - symEffect: SymNone, + symEffect: SymRead, generic: true, }, { @@ -22497,6 +23944,11 @@ var opcodeTable = [...]opInfo{ argLen: 2, generic: true, }, + { + name: "RegKill", + argLen: 0, + generic: true, + }, { name: "Int64Make", argLen: 2, diff --git a/src/cmd/compile/internal/ssa/opt.go b/src/cmd/compile/internal/ssa/opt.go index 670b535a338..6e91fd7da35 100644 --- a/src/cmd/compile/internal/ssa/opt.go +++ b/src/cmd/compile/internal/ssa/opt.go @@ -8,10 +8,3 @@ package ssa func opt(f *Func) { applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric) } - -func dec(f *Func) { - applyRewrite(f, rewriteBlockdec, rewriteValuedec) - if f.Config.RegSize == 4 { - applyRewrite(f, rewriteBlockdec64, rewriteValuedec64) - } -} diff --git a/src/cmd/compile/internal/ssa/print.go b/src/cmd/compile/internal/ssa/print.go index d2a87eb6153..d66530a3738 100644 --- a/src/cmd/compile/internal/ssa/print.go +++ b/src/cmd/compile/internal/ssa/print.go @@ -78,7 +78,7 @@ func (p stringFuncPrinter) startDepCycle() { func (p stringFuncPrinter) endDepCycle() {} func (p stringFuncPrinter) named(n LocalSlot, vals []*Value) { - fmt.Fprintf(p.w, "name %s: %v\n", n.Name(), vals) + fmt.Fprintf(p.w, "name %s: %v\n", n, vals) } func fprintFunc(p funcPrinter, f *Func) { diff --git a/src/cmd/compile/internal/ssa/redblack32.go b/src/cmd/compile/internal/ssa/redblack32.go index ae1ec352e78..fc9cc71ba03 100644 --- a/src/cmd/compile/internal/ssa/redblack32.go +++ b/src/cmd/compile/internal/ssa/redblack32.go @@ -244,7 +244,7 @@ func (t *node32) max() *node32 { } func (t *node32) glb(key int32, allow_eq bool) *node32 { - var best *node32 = nil + var best *node32 for t != nil { if key <= t.key { if key == t.key && allow_eq { @@ -262,7 +262,7 @@ func (t *node32) glb(key int32, allow_eq bool) *node32 { } func (t *node32) lub(key int32, allow_eq bool) *node32 { - var best *node32 = nil + var best *node32 for t != nil { if key >= t.key { if key == t.key && allow_eq { diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index 137e5fc4c2e..bc0a972da46 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -242,6 +242,9 @@ type regAllocState struct { // current state of each (preregalloc) Value values []valState + // names associated with each Value + valueNames [][]LocalSlot + // ID of SP, SB values sp, sb ID @@ -300,6 +303,13 @@ type startReg struct { // freeReg frees up register r. Any current user of r is kicked out. func (s *regAllocState) freeReg(r register) { + s.freeOrResetReg(r, false) +} + +// freeOrResetReg frees up register r. Any current user of r is kicked out. +// resetting indicates that the operation is only for bookkeeping, +// e.g. when clearing out state upon entry to a new block. +func (s *regAllocState) freeOrResetReg(r register, resetting bool) { v := s.regs[r].v if v == nil { s.f.Fatalf("tried to free an already free register %d\n", r) @@ -307,7 +317,17 @@ func (s *regAllocState) freeReg(r register) { // Mark r as unused. if s.f.pass.debug > regDebug { - fmt.Printf("freeReg %s (dump %s/%s)\n", s.registers[r].Name(), v, s.regs[r].c) + fmt.Printf("freeReg %s (dump %s/%s)\n", &s.registers[r], v, s.regs[r].c) + } + if !resetting && s.f.Config.ctxt.Flag_locationlists && len(s.valueNames[v.ID]) != 0 { + kill := s.curBlock.NewValue0(src.NoXPos, OpRegKill, types.TypeVoid) + for int(kill.ID) >= len(s.orig) { + s.orig = append(s.orig, nil) + } + for _, name := range s.valueNames[v.ID] { + s.f.NamedValues[name] = append(s.f.NamedValues[name], kill) + } + s.f.setHome(kill, &s.registers[r]) } s.regs[r] = regState{} s.values[v.ID].regs &^= regMask(1) << r @@ -337,7 +357,7 @@ func (s *regAllocState) setOrig(c *Value, v *Value) { // r must be unused. func (s *regAllocState) assignReg(r register, v *Value, c *Value) { if s.f.pass.debug > regDebug { - fmt.Printf("assignReg %s %s/%s\n", s.registers[r].Name(), v, c) + fmt.Printf("assignReg %s %s/%s\n", &s.registers[r], v, c) } if s.regs[r].v != nil { s.f.Fatalf("tried to assign register %d to %s/%s but it is already used by %s", r, v, c, s.regs[r].v) @@ -402,7 +422,7 @@ func (s *regAllocState) allocReg(mask regMask, v *Value) register { c := s.curBlock.NewValue1(v2.Pos, OpCopy, v2.Type, s.regs[r].c) s.copies[c] = false if s.f.pass.debug > regDebug { - fmt.Printf("copy %s to %s : %s\n", v2, c, s.registers[r2].Name()) + fmt.Printf("copy %s to %s : %s\n", v2, c, &s.registers[r2]) } s.setOrig(c, v2) s.assignReg(r2, v2, c) @@ -468,7 +488,7 @@ func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos c = s.curBlock.NewValue1(pos, OpCopy, v.Type, s.regs[r2].c) } else if v.rematerializeable() { // Rematerialize instead of loading from the spill location. - c = v.copyIntoNoXPos(s.curBlock) + c = v.copyIntoWithXPos(s.curBlock, pos) } else { // Load v from its spill location. spill := s.makeSpill(v, s.curBlock) @@ -511,7 +531,7 @@ func (s *regAllocState) init(f *Func) { s.SBReg = noRegister s.GReg = noRegister for r := register(0); r < s.numRegs; r++ { - switch s.registers[r].Name() { + switch s.registers[r].String() { case "SP": s.SPReg = r case "SB": @@ -542,12 +562,6 @@ func (s *regAllocState) init(f *Func) { if s.f.Config.ctxt.Framepointer_enabled && s.f.Config.FPReg >= 0 { s.allocatable &^= 1 << uint(s.f.Config.FPReg) } - if s.f.Config.ctxt.Flag_shared { - switch s.f.Config.arch { - case "ppc64le": // R2 already reserved. - s.allocatable &^= 1 << 12 // R12 - } - } if s.f.Config.LinkReg != -1 { if isLeaf(f) { // Leaf functions don't save/restore the link register. @@ -567,7 +581,7 @@ func (s *regAllocState) init(f *Func) { case "arm": s.allocatable &^= 1 << 9 // R9 case "ppc64le": // R2 already reserved. - s.allocatable &^= 1 << 12 // R12 + // nothing to do case "arm64": // nothing to do? case "386": @@ -599,6 +613,17 @@ func (s *regAllocState) init(f *Func) { s.values = make([]valState, f.NumValues()) s.orig = make([]*Value, f.NumValues()) s.copies = make(map[*Value]bool) + if s.f.Config.ctxt.Flag_locationlists { + s.valueNames = make([][]LocalSlot, f.NumValues()) + for slot, values := range f.NamedValues { + if isSynthetic(&slot) { + continue + } + for _, value := range values { + s.valueNames[value.ID] = append(s.valueNames[value.ID], slot) + } + } + } for _, b := range f.Blocks { for _, v := range b.Values { if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() { @@ -692,7 +717,9 @@ func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool { // Sets the state of the registers to that encoded in regs. func (s *regAllocState) setState(regs []endReg) { - s.freeRegs(s.used) + for s.used != 0 { + s.freeOrResetReg(pickReg(s.used), true) + } for _, x := range regs { s.assignReg(x.r, x.v, x.c) } @@ -735,6 +762,9 @@ func (s *regAllocState) regalloc(f *Func) { } for _, b := range f.Blocks { + if s.f.pass.debug > regDebug { + fmt.Printf("Begin processing block %v\n", b) + } s.curBlock = b // Initialize regValLiveSet and uses fields for this block. @@ -830,9 +860,6 @@ func (s *regAllocState) regalloc(f *Func) { // This is the complicated case. We have more than one predecessor, // which means we may have Phi ops. - // Copy phi ops into new schedule. - b.Values = append(b.Values, phis...) - // Start with the final register state of the primary predecessor idx := s.primary[b.ID] if idx < 0 { @@ -844,14 +871,14 @@ func (s *regAllocState) regalloc(f *Func) { if s.f.pass.debug > regDebug { fmt.Printf("starting merge block %s with end state of %s:\n", b, p) for _, x := range s.endRegs[p.ID] { - fmt.Printf(" %s: orig:%s cache:%s\n", s.registers[x.r].Name(), x.v, x.c) + fmt.Printf(" %s: orig:%s cache:%s\n", &s.registers[x.r], x.v, x.c) } } // Decide on registers for phi ops. Use the registers determined // by the primary predecessor if we can. // TODO: pick best of (already processed) predecessors? - // Majority vote? Deepest nesting level? + // Majority vote? Deepest nesting level? phiRegs = phiRegs[:0] var phiUsed regMask for _, v := range phis { @@ -900,7 +927,7 @@ func (s *regAllocState) regalloc(f *Func) { c := p.NewValue1(a.Pos, OpCopy, a.Type, s.regs[r].c) s.copies[c] = false if s.f.pass.debug > regDebug { - fmt.Printf("copy %s to %s : %s\n", a, c, s.registers[r2].Name()) + fmt.Printf("copy %s to %s : %s\n", a, c, &s.registers[r2]) } s.setOrig(c, a) s.assignReg(r2, a, c) @@ -910,6 +937,9 @@ func (s *regAllocState) regalloc(f *Func) { } } + // Copy phi ops into new schedule. + b.Values = append(b.Values, phis...) + // Third pass - pick registers for phis whose inputs // were not in a register. for i, v := range phis { @@ -976,7 +1006,7 @@ func (s *regAllocState) regalloc(f *Func) { if s.f.pass.debug > regDebug { fmt.Printf("after phis\n") for _, x := range s.startRegs[b.ID] { - fmt.Printf(" %s: v%d\n", s.registers[x.r].Name(), x.v.ID) + fmt.Printf(" %s: v%d\n", &s.registers[x.r], x.v.ID) } } } @@ -1005,7 +1035,7 @@ func (s *regAllocState) regalloc(f *Func) { pidx := e.i for _, v := range succ.Values { if v.Op != OpPhi { - break + continue } if !s.values[v.ID].needReg { continue @@ -1103,12 +1133,15 @@ func (s *regAllocState) regalloc(f *Func) { if v.Op == OpKeepAlive { // Make sure the argument to v is still live here. s.advanceUses(v) - vi := &s.values[v.Args[0].ID] - if vi.spill != nil { + a := v.Args[0] + vi := &s.values[a.ID] + if vi.regs == 0 && !vi.rematerializeable { // Use the spill location. - v.SetArg(0, vi.spill) + // This forces later liveness analysis to make the + // value live at this point. + v.SetArg(0, s.makeSpill(a, b)) } else { - // No need to keep unspilled values live. + // In-register and rematerializeable values are already live. // These are typically rematerializeable constants like nil, // or values of a variable that were modified since the last call. v.Op = OpCopy @@ -1141,7 +1174,7 @@ func (s *regAllocState) regalloc(f *Func) { fmt.Printf(" out:") for _, r := range dinfo[idx].out { if r != noRegister { - fmt.Printf(" %s", s.registers[r].Name()) + fmt.Printf(" %s", &s.registers[r]) } } fmt.Println() @@ -1149,7 +1182,7 @@ func (s *regAllocState) regalloc(f *Func) { fmt.Printf(" in%d:", i) for _, r := range dinfo[idx].in[i] { if r != noRegister { - fmt.Printf(" %s", s.registers[r].Name()) + fmt.Printf(" %s", &s.registers[r]) } } fmt.Println() @@ -1533,7 +1566,7 @@ func (s *regAllocState) regalloc(f *Func) { if s.f.pass.debug > regDebug { fmt.Printf("delete copied value %s\n", c.LongString()) } - c.Args[0].Uses-- + c.RemoveArg(0) f.freeValue(c) delete(s.copies, c) progress = true @@ -1565,6 +1598,9 @@ func (s *regAllocState) placeSpills() { for _, b := range f.Blocks { var m regMask for _, v := range b.Values { + if v.Op == OpRegKill { + continue + } if v.Op != OpPhi { break } @@ -1675,21 +1711,17 @@ func (s *regAllocState) placeSpills() { for _, b := range f.Blocks { nphi := 0 for _, v := range b.Values { - if v.Op != OpPhi { + if v.Op != OpRegKill && v.Op != OpPhi { break } nphi++ } oldSched = append(oldSched[:0], b.Values[nphi:]...) b.Values = b.Values[:nphi] - for _, v := range start[b.ID] { - b.Values = append(b.Values, v) - } + b.Values = append(b.Values, start[b.ID]...) for _, v := range oldSched { b.Values = append(b.Values, v) - for _, w := range after[v.ID] { - b.Values = append(b.Values, w) - } + b.Values = append(b.Values, after[v.ID]...) } } } @@ -1800,6 +1832,9 @@ func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive } // Phis need their args to end up in a specific location. for _, v := range e.b.Values { + if v.Op == OpRegKill { + continue + } if v.Op != OpPhi { break } @@ -1815,11 +1850,11 @@ func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive for _, vid := range e.cachedVals { a := e.cache[vid] for _, c := range a { - fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID).Name(), vid, c) + fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID), vid, c) } } for _, d := range e.destinations { - fmt.Printf("dst %s: v%d\n", d.loc.Name(), d.vid) + fmt.Printf("dst %s: v%d\n", d.loc, d.vid) } } } @@ -1876,8 +1911,9 @@ func (e *edgeState) process() { c := e.contents[loc].c r := e.findRegFor(c.Type) if e.s.f.pass.debug > regDebug { - fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc.Name(), c) + fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc, c) } + e.erase(r) if _, isReg := loc.(*Register); isReg { c = e.p.NewValue1(d.pos, OpCopy, c.Type, c) } else { @@ -1921,13 +1957,13 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP var c *Value var src Location if e.s.f.pass.debug > regDebug { - fmt.Printf("moving v%d to %s\n", vid, loc.Name()) + fmt.Printf("moving v%d to %s\n", vid, loc) fmt.Printf("sources of v%d:", vid) } for _, w := range e.cache[vid] { h := e.s.f.getHome(w.ID) if e.s.f.pass.debug > regDebug { - fmt.Printf(" %s:%s", h.Name(), w) + fmt.Printf(" %s:%s", h, w) } _, isreg := h.(*Register) if src == nil || isreg { @@ -1937,12 +1973,24 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP } if e.s.f.pass.debug > regDebug { if src != nil { - fmt.Printf(" [use %s]\n", src.Name()) + fmt.Printf(" [use %s]\n", src) } else { fmt.Printf(" [no source]\n") } } _, dstReg := loc.(*Register) + + // Pre-clobber destination. This avoids the + // following situation: + // - v is currently held in R0 and stacktmp0. + // - We want to copy stacktmp1 to stacktmp0. + // - We choose R0 as the temporary register. + // During the copy, both R0 and stacktmp0 are + // clobbered, losing both copies of v. Oops! + // Erasing the destination early means R0 will not + // be chosen as the temp register, as it will then + // be the last copy of v. + e.erase(loc) var x *Value if c == nil { if !e.s.values[vid].rematerializeable { @@ -1953,9 +2001,9 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP } else { // Rematerialize into stack slot. Need a free // register to accomplish this. - e.erase(loc) // see pre-clobber comment below r := e.findRegFor(v.Type) - x = v.copyIntoNoXPos(e.p) + e.erase(r) + x = v.copyIntoWithXPos(e.p, pos) e.set(r, vid, x, false, pos) // Make sure we spill with the size of the slot, not the // size of x (which might be wider due to our dropping @@ -1976,20 +2024,8 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP x = e.p.NewValue1(pos, OpLoadReg, c.Type, c) } else { // mem->mem. Use temp register. - - // Pre-clobber destination. This avoids the - // following situation: - // - v is currently held in R0 and stacktmp0. - // - We want to copy stacktmp1 to stacktmp0. - // - We choose R0 as the temporary register. - // During the copy, both R0 and stacktmp0 are - // clobbered, losing both copies of v. Oops! - // Erasing the destination early means R0 will not - // be chosen as the temp register, as it will then - // be the last copy of v. - e.erase(loc) - r := e.findRegFor(c.Type) + e.erase(r) t := e.p.NewValue1(pos, OpLoadReg, c.Type, c) e.set(r, vid, t, false, pos) x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, t) @@ -2008,7 +2044,6 @@ func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XP // set changes the contents of location loc to hold the given value and its cached representative. func (e *edgeState) set(loc Location, vid ID, c *Value, final bool, pos src.XPos) { e.s.f.setHome(c, loc) - e.erase(loc) e.contents[loc] = contentRecord{vid, c, final, pos} a := e.cache[vid] if len(a) == 0 { @@ -2032,7 +2067,7 @@ func (e *edgeState) set(loc Location, vid ID, c *Value, final bool, pos src.XPos } if e.s.f.pass.debug > regDebug { fmt.Printf("%s\n", c.LongString()) - fmt.Printf("v%d now available in %s:%s\n", vid, loc.Name(), c) + fmt.Printf("v%d now available in %s:%s\n", vid, loc, c) } } @@ -2056,9 +2091,19 @@ func (e *edgeState) erase(loc Location) { for i, c := range a { if e.s.f.getHome(c.ID) == loc { if e.s.f.pass.debug > regDebug { - fmt.Printf("v%d no longer available in %s:%s\n", vid, loc.Name(), c) + fmt.Printf("v%d no longer available in %s:%s\n", vid, loc, c) } a[i], a = a[len(a)-1], a[:len(a)-1] + if e.s.f.Config.ctxt.Flag_locationlists { + if _, isReg := loc.(*Register); isReg && int(c.ID) < len(e.s.valueNames) && len(e.s.valueNames[c.ID]) != 0 { + kill := e.p.NewValue0(src.NoXPos, OpRegKill, types.TypeVoid) + e.s.f.setHome(kill, loc) + for _, name := range e.s.valueNames[c.ID] { + e.s.f.NamedValues[name] = append(e.s.f.NamedValues[name], kill) + } + } + } + break } } @@ -2118,11 +2163,11 @@ func (e *edgeState) findRegFor(typ *types.Type) Location { // Allocate a temp location to spill a register to. // The type of the slot is immaterial - it will not be live across // any safepoint. Just use a type big enough to hold any register. - t := LocalSlot{e.s.f.fe.Auto(c.Pos, types.Int64), types.Int64, 0} - // TODO: reuse these slots. + t := LocalSlot{N: e.s.f.fe.Auto(c.Pos, types.Int64), Type: types.Int64} + // TODO: reuse these slots. They'll need to be erased first. e.set(t, vid, x, false, c.Pos) if e.s.f.pass.debug > regDebug { - fmt.Printf(" SPILL %s->%s %s\n", r.Name(), t.Name(), x.LongString()) + fmt.Printf(" SPILL %s->%s %s\n", r, t, x.LongString()) } } // r will now be overwritten by the caller. At some point @@ -2137,7 +2182,7 @@ func (e *edgeState) findRegFor(typ *types.Type) Location { for _, vid := range e.cachedVals { a := e.cache[vid] for _, c := range a { - fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID).Name()) + fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID)) } } e.s.f.Fatalf("can't find empty register on edge %s->%s", e.p, e.b) @@ -2165,12 +2210,6 @@ type liveInfo struct { pos src.XPos // source position of next use } -// dblock contains information about desired & avoid registers at the end of a block. -type dblock struct { - prefers []desiredStateEntry - avoid regMask -} - // computeLive computes a map from block ID to a list of value IDs live at the end // of that block. Together with the value ID is a count of how many instructions // to the next use of that value. The resulting map is stored in s.live. @@ -2360,7 +2399,7 @@ func (s *regAllocState) computeLive() { if !first { fmt.Printf(",") } - fmt.Print(s.registers[r].Name()) + fmt.Print(&s.registers[r]) first = false } fmt.Printf("]") diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index b42d53032c3..4e8eb4d3b66 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -117,10 +117,6 @@ func isSigned(t *types.Type) bool { return t.IsSigned() } -func typeSize(t *types.Type) int64 { - return t.Size() -} - // mergeSym merges two symbolic offsets. There is no real merging of // offsets, we just pick the non-nil one. func mergeSym(x, y interface{}) interface{} { @@ -276,18 +272,6 @@ search: return true } -// isArg returns whether s is an arg symbol -func isArg(s interface{}) bool { - _, ok := s.(*ArgSymbol) - return ok -} - -// isAuto returns whether s is an auto symbol -func isAuto(s interface{}) bool { - _, ok := s.(*AutoSymbol) - return ok -} - // isSameSym returns whether sym is the same as the given named symbol func isSameSym(sym interface{}, name string) bool { s, ok := sym.(fmt.Stringer) @@ -305,6 +289,10 @@ func ntz(x int64) int64 { return 64 - nlz(^x&(x-1)) } +func oneBit(x int64) bool { + return nlz(x)+ntz(x) == 63 +} + // nlo returns the number of leading ones. func nlo(x int64) int64 { return nlz(^x) @@ -408,11 +396,11 @@ func uaddOvf(a, b int64) bool { // 'sym' is the symbol for the itab func devirt(v *Value, sym interface{}, offset int64) *obj.LSym { f := v.Block.Func - ext, ok := sym.(*ExternSymbol) + n, ok := sym.(*obj.LSym) if !ok { return nil } - lsym := f.fe.DerefItab(ext.Sym, offset) + lsym := f.fe.DerefItab(n, offset) if f.pass.debug > 0 { if lsym != nil { f.Warnl(v.Pos, "de-virtualizing call") @@ -644,3 +632,50 @@ func overlap(offset1, size1, offset2, size2 int64) bool { } return false } + +// check if value zeroes out upper 32-bit of 64-bit register. +// depth limits recursion depth. In AMD64.rules 3 is used as limit, +// because it catches same amount of cases as 4. +func zeroUpper32Bits(x *Value, depth int) bool { + switch x.Op { + case OpAMD64MOVLconst, OpAMD64MOVLload, OpAMD64MOVLQZX, OpAMD64MOVLloadidx1, + OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVBload, OpAMD64MOVBloadidx1, + OpAMD64MOVLloadidx4, OpAMD64ADDLmem, OpAMD64SUBLmem, OpAMD64ANDLmem, + OpAMD64ORLmem, OpAMD64XORLmem, OpAMD64CVTTSD2SL, + OpAMD64ADDL, OpAMD64ADDLconst, OpAMD64SUBL, OpAMD64SUBLconst, + OpAMD64ANDL, OpAMD64ANDLconst, OpAMD64ORL, OpAMD64ORLconst, + OpAMD64XORL, OpAMD64XORLconst, OpAMD64NEGL, OpAMD64NOTL: + return true + case OpArg, OpSelect0, OpSelect1: + return x.Type.Width == 4 + case OpPhi: + // Phis can use each-other as an arguments, instead of tracking visited values, + // just limit recursion depth. + if depth <= 0 { + return false + } + for i := range x.Args { + if !zeroUpper32Bits(x.Args[i], depth-1) { + return false + } + } + return true + + } + return false +} + +// inlineablememmovesize reports whether the given arch performs OpMove of the given size +// faster than memmove and in a safe way when src and dst overlap. +// This is used as a check for replacing memmove with OpMove. +func isInlinableMemmoveSize(sz int64, c *Config) bool { + switch c.arch { + case "amd64", "amd64p32": + return sz <= 16 + case "386", "ppc64", "s390x", "ppc64le": + return sz <= 8 + case "arm", "mips", "mips64", "mipsle", "mips64le": + return sz <= 4 + } + return false +} diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index f9cddd0f708..3706302d63c 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -329,6 +329,10 @@ func rewriteValue386(v *Value) bool { return rewriteValue386_OpGeq8_0(v) case OpGeq8U: return rewriteValue386_OpGeq8U_0(v) + case OpGetCallerPC: + return rewriteValue386_OpGetCallerPC_0(v) + case OpGetCallerSP: + return rewriteValue386_OpGetCallerSP_0(v) case OpGetClosurePtr: return rewriteValue386_OpGetClosurePtr_0(v) case OpGetG: @@ -8541,20 +8545,6 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { return true } // match: (MULLconst [c] x) - // cond: isPowerOfTwo(c) - // result: (SHLLconst [log2(c)] x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c)) { - break - } - v.reset(Op386SHLLconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (MULLconst [c] x) // cond: isPowerOfTwo(c+1) && c >= 15 // result: (SUBL (SHLLconst [log2(c+1)] x) x) for { @@ -8622,11 +8612,6 @@ func rewriteValue386_Op386MULLconst_10(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValue386_Op386MULLconst_20(v *Value) bool { - b := v.Block - _ = b // match: (MULLconst [c] x) // cond: isPowerOfTwo(c-8) && c >= 136 // result: (LEAL8 (SHLLconst [log2(c-8)] x) x) @@ -8644,6 +8629,11 @@ func rewriteValue386_Op386MULLconst_20(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValue386_Op386MULLconst_20(v *Value) bool { + b := v.Block + _ = b // match: (MULLconst [c] x) // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (SHLLconst [log2(c/3)] (LEAL2 x x)) @@ -15052,6 +15042,24 @@ func rewriteValue386_OpGeq8U_0(v *Value) bool { return true } } +func rewriteValue386_OpGetCallerPC_0(v *Value) bool { + // match: (GetCallerPC) + // cond: + // result: (LoweredGetCallerPC) + for { + v.reset(Op386LoweredGetCallerPC) + return true + } +} +func rewriteValue386_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) + // cond: + // result: (LoweredGetCallerSP) + for { + v.reset(Op386LoweredGetCallerSP) + return true + } +} func rewriteValue386_OpGetClosurePtr_0(v *Value) bool { // match: (GetClosurePtr) // cond: @@ -18325,6 +18333,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (FlagEQ) yes no) @@ -18337,6 +18346,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (EQ (FlagLT_ULT) yes no) @@ -18349,6 +18359,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18362,6 +18373,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18375,6 +18387,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18388,6 +18401,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18403,6 +18417,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386LE b.SetControl(cmp) + b.Aux = nil return true } // match: (GE (FlagEQ) yes no) @@ -18415,6 +18430,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (FlagLT_ULT) yes no) @@ -18427,6 +18443,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18440,6 +18457,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18453,6 +18471,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (FlagGT_UGT) yes no) @@ -18465,6 +18484,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case Block386GT: @@ -18479,6 +18499,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386LT b.SetControl(cmp) + b.Aux = nil return true } // match: (GT (FlagEQ) yes no) @@ -18491,6 +18512,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18504,6 +18526,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18517,6 +18540,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18530,6 +18554,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GT (FlagGT_UGT) yes no) @@ -18542,6 +18567,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockIf: @@ -18556,6 +18582,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386LT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETLE cmp) yes no) @@ -18569,6 +18596,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386LE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETG cmp) yes no) @@ -18582,6 +18610,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386GT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETGE cmp) yes no) @@ -18595,6 +18624,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386GE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETEQ cmp) yes no) @@ -18608,6 +18638,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETNE cmp) yes no) @@ -18621,6 +18652,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386NE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETB cmp) yes no) @@ -18634,6 +18666,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386ULT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETBE cmp) yes no) @@ -18647,6 +18680,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386ULE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETA cmp) yes no) @@ -18660,6 +18694,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETAE cmp) yes no) @@ -18673,6 +18708,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETGF cmp) yes no) @@ -18686,6 +18722,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETGEF cmp) yes no) @@ -18699,6 +18736,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETEQF cmp) yes no) @@ -18712,6 +18750,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386EQF b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETNEF cmp) yes no) @@ -18725,6 +18764,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386NEF b.SetControl(cmp) + b.Aux = nil return true } // match: (If cond yes no) @@ -18739,6 +18779,7 @@ func rewriteBlock386(b *Block) bool { v0.AddArg(cond) v0.AddArg(cond) b.SetControl(v0) + b.Aux = nil return true } case Block386LE: @@ -18753,6 +18794,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386GE b.SetControl(cmp) + b.Aux = nil return true } // match: (LE (FlagEQ) yes no) @@ -18765,6 +18807,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagLT_ULT) yes no) @@ -18777,6 +18820,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagLT_UGT) yes no) @@ -18789,6 +18833,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagGT_ULT) yes no) @@ -18801,6 +18846,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18814,6 +18860,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18829,6 +18876,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386GT b.SetControl(cmp) + b.Aux = nil return true } // match: (LT (FlagEQ) yes no) @@ -18841,6 +18889,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18854,6 +18903,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LT (FlagLT_UGT) yes no) @@ -18866,6 +18916,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LT (FlagGT_ULT) yes no) @@ -18878,6 +18929,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18891,6 +18943,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -18918,6 +18971,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386LT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) @@ -18943,6 +18997,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386LT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) @@ -18968,6 +19023,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386LE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) @@ -18993,6 +19049,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386LE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) @@ -19018,6 +19075,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386GT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) @@ -19043,6 +19101,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386GT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) @@ -19068,6 +19127,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386GE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) @@ -19093,6 +19153,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386GE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) @@ -19118,6 +19179,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) @@ -19143,6 +19205,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) @@ -19168,6 +19231,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386NE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) @@ -19193,6 +19257,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386NE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) @@ -19218,6 +19283,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386ULT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) @@ -19243,6 +19309,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386ULT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) @@ -19268,6 +19335,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386ULE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) @@ -19293,6 +19361,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386ULE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) @@ -19318,6 +19387,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) @@ -19343,6 +19413,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) @@ -19368,6 +19439,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) @@ -19393,6 +19465,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) @@ -19418,6 +19491,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) @@ -19443,6 +19517,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) @@ -19468,6 +19543,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) @@ -19493,6 +19569,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) @@ -19518,6 +19595,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386EQF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) @@ -19543,6 +19621,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386EQF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) @@ -19568,6 +19647,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386NEF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) @@ -19593,6 +19673,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = Block386NEF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (InvertFlags cmp) yes no) @@ -19606,6 +19687,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386NE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (FlagEQ) yes no) @@ -19618,6 +19700,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19631,6 +19714,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagLT_UGT) yes no) @@ -19643,6 +19727,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagGT_ULT) yes no) @@ -19655,6 +19740,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagGT_UGT) yes no) @@ -19667,6 +19753,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case Block386UGE: @@ -19681,6 +19768,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386ULE b.SetControl(cmp) + b.Aux = nil return true } // match: (UGE (FlagEQ) yes no) @@ -19693,6 +19781,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGE (FlagLT_ULT) yes no) @@ -19705,6 +19794,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19718,6 +19808,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGE (FlagGT_ULT) yes no) @@ -19730,6 +19821,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19743,6 +19835,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case Block386UGT: @@ -19757,6 +19850,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386ULT b.SetControl(cmp) + b.Aux = nil return true } // match: (UGT (FlagEQ) yes no) @@ -19769,6 +19863,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19782,6 +19877,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19795,6 +19891,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGT (FlagGT_ULT) yes no) @@ -19807,6 +19904,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19820,6 +19918,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case Block386ULE: @@ -19834,6 +19933,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (ULE (FlagEQ) yes no) @@ -19846,6 +19946,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagLT_ULT) yes no) @@ -19858,6 +19959,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagLT_UGT) yes no) @@ -19870,6 +19972,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19883,6 +19986,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagGT_UGT) yes no) @@ -19895,6 +19999,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19910,6 +20015,7 @@ func rewriteBlock386(b *Block) bool { cmp := v.Args[0] b.Kind = Block386UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (ULT (FlagEQ) yes no) @@ -19922,6 +20028,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19935,6 +20042,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULT (FlagLT_UGT) yes no) @@ -19947,6 +20055,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -19960,6 +20069,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULT (FlagGT_UGT) yes no) @@ -19972,6 +20082,7 @@ func rewriteBlock386(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index 9213616f83b..c54949fd9df 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -19,22 +19,38 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64ADDL_0(v) || rewriteValueAMD64_OpAMD64ADDL_10(v) case OpAMD64ADDLconst: return rewriteValueAMD64_OpAMD64ADDLconst_0(v) + case OpAMD64ADDLconstmem: + return rewriteValueAMD64_OpAMD64ADDLconstmem_0(v) + case OpAMD64ADDLmem: + return rewriteValueAMD64_OpAMD64ADDLmem_0(v) case OpAMD64ADDQ: return rewriteValueAMD64_OpAMD64ADDQ_0(v) || rewriteValueAMD64_OpAMD64ADDQ_10(v) || rewriteValueAMD64_OpAMD64ADDQ_20(v) case OpAMD64ADDQconst: return rewriteValueAMD64_OpAMD64ADDQconst_0(v) + case OpAMD64ADDQconstmem: + return rewriteValueAMD64_OpAMD64ADDQconstmem_0(v) + case OpAMD64ADDQmem: + return rewriteValueAMD64_OpAMD64ADDQmem_0(v) case OpAMD64ADDSD: return rewriteValueAMD64_OpAMD64ADDSD_0(v) + case OpAMD64ADDSDmem: + return rewriteValueAMD64_OpAMD64ADDSDmem_0(v) case OpAMD64ADDSS: return rewriteValueAMD64_OpAMD64ADDSS_0(v) + case OpAMD64ADDSSmem: + return rewriteValueAMD64_OpAMD64ADDSSmem_0(v) case OpAMD64ANDL: return rewriteValueAMD64_OpAMD64ANDL_0(v) case OpAMD64ANDLconst: return rewriteValueAMD64_OpAMD64ANDLconst_0(v) + case OpAMD64ANDLmem: + return rewriteValueAMD64_OpAMD64ANDLmem_0(v) case OpAMD64ANDQ: return rewriteValueAMD64_OpAMD64ANDQ_0(v) case OpAMD64ANDQconst: return rewriteValueAMD64_OpAMD64ANDQconst_0(v) + case OpAMD64ANDQmem: + return rewriteValueAMD64_OpAMD64ANDQmem_0(v) case OpAMD64BSFQ: return rewriteValueAMD64_OpAMD64BSFQ_0(v) case OpAMD64BTQconst: @@ -84,7 +100,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64MOVBloadidx1: return rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v) case OpAMD64MOVBstore: - return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) + return rewriteValueAMD64_OpAMD64MOVBstore_0(v) || rewriteValueAMD64_OpAMD64MOVBstore_10(v) || rewriteValueAMD64_OpAMD64MOVBstore_20(v) case OpAMD64MOVBstoreconst: return rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v) case OpAMD64MOVBstoreconstidx1: @@ -99,12 +115,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64MOVLQZX_0(v) case OpAMD64MOVLatomicload: return rewriteValueAMD64_OpAMD64MOVLatomicload_0(v) + case OpAMD64MOVLf2i: + return rewriteValueAMD64_OpAMD64MOVLf2i_0(v) + case OpAMD64MOVLi2f: + return rewriteValueAMD64_OpAMD64MOVLi2f_0(v) case OpAMD64MOVLload: return rewriteValueAMD64_OpAMD64MOVLload_0(v) case OpAMD64MOVLloadidx1: return rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v) case OpAMD64MOVLloadidx4: return rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v) + case OpAMD64MOVLloadidx8: + return rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v) case OpAMD64MOVLstore: return rewriteValueAMD64_OpAMD64MOVLstore_0(v) || rewriteValueAMD64_OpAMD64MOVLstore_10(v) case OpAMD64MOVLstoreconst: @@ -117,12 +139,18 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v) case OpAMD64MOVLstoreidx4: return rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v) + case OpAMD64MOVLstoreidx8: + return rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v) case OpAMD64MOVOload: return rewriteValueAMD64_OpAMD64MOVOload_0(v) case OpAMD64MOVOstore: return rewriteValueAMD64_OpAMD64MOVOstore_0(v) case OpAMD64MOVQatomicload: return rewriteValueAMD64_OpAMD64MOVQatomicload_0(v) + case OpAMD64MOVQf2i: + return rewriteValueAMD64_OpAMD64MOVQf2i_0(v) + case OpAMD64MOVQi2f: + return rewriteValueAMD64_OpAMD64MOVQi2f_0(v) case OpAMD64MOVQload: return rewriteValueAMD64_OpAMD64MOVQload_0(v) case OpAMD64MOVQloadidx1: @@ -130,7 +158,7 @@ func rewriteValueAMD64(v *Value) bool { case OpAMD64MOVQloadidx8: return rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v) case OpAMD64MOVQstore: - return rewriteValueAMD64_OpAMD64MOVQstore_0(v) + return rewriteValueAMD64_OpAMD64MOVQstore_0(v) || rewriteValueAMD64_OpAMD64MOVQstore_10(v) case OpAMD64MOVQstoreconst: return rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v) case OpAMD64MOVQstoreconstidx1: @@ -199,8 +227,12 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64MULQconst_0(v) || rewriteValueAMD64_OpAMD64MULQconst_10(v) || rewriteValueAMD64_OpAMD64MULQconst_20(v) case OpAMD64MULSD: return rewriteValueAMD64_OpAMD64MULSD_0(v) + case OpAMD64MULSDmem: + return rewriteValueAMD64_OpAMD64MULSDmem_0(v) case OpAMD64MULSS: return rewriteValueAMD64_OpAMD64MULSS_0(v) + case OpAMD64MULSSmem: + return rewriteValueAMD64_OpAMD64MULSSmem_0(v) case OpAMD64NEGL: return rewriteValueAMD64_OpAMD64NEGL_0(v) case OpAMD64NEGQ: @@ -213,10 +245,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64ORL_0(v) || rewriteValueAMD64_OpAMD64ORL_10(v) || rewriteValueAMD64_OpAMD64ORL_20(v) || rewriteValueAMD64_OpAMD64ORL_30(v) || rewriteValueAMD64_OpAMD64ORL_40(v) || rewriteValueAMD64_OpAMD64ORL_50(v) || rewriteValueAMD64_OpAMD64ORL_60(v) || rewriteValueAMD64_OpAMD64ORL_70(v) || rewriteValueAMD64_OpAMD64ORL_80(v) || rewriteValueAMD64_OpAMD64ORL_90(v) || rewriteValueAMD64_OpAMD64ORL_100(v) || rewriteValueAMD64_OpAMD64ORL_110(v) || rewriteValueAMD64_OpAMD64ORL_120(v) || rewriteValueAMD64_OpAMD64ORL_130(v) case OpAMD64ORLconst: return rewriteValueAMD64_OpAMD64ORLconst_0(v) + case OpAMD64ORLmem: + return rewriteValueAMD64_OpAMD64ORLmem_0(v) case OpAMD64ORQ: return rewriteValueAMD64_OpAMD64ORQ_0(v) || rewriteValueAMD64_OpAMD64ORQ_10(v) || rewriteValueAMD64_OpAMD64ORQ_20(v) || rewriteValueAMD64_OpAMD64ORQ_30(v) || rewriteValueAMD64_OpAMD64ORQ_40(v) || rewriteValueAMD64_OpAMD64ORQ_50(v) || rewriteValueAMD64_OpAMD64ORQ_60(v) || rewriteValueAMD64_OpAMD64ORQ_70(v) || rewriteValueAMD64_OpAMD64ORQ_80(v) || rewriteValueAMD64_OpAMD64ORQ_90(v) || rewriteValueAMD64_OpAMD64ORQ_100(v) || rewriteValueAMD64_OpAMD64ORQ_110(v) || rewriteValueAMD64_OpAMD64ORQ_120(v) || rewriteValueAMD64_OpAMD64ORQ_130(v) || rewriteValueAMD64_OpAMD64ORQ_140(v) || rewriteValueAMD64_OpAMD64ORQ_150(v) || rewriteValueAMD64_OpAMD64ORQ_160(v) case OpAMD64ORQconst: return rewriteValueAMD64_OpAMD64ORQconst_0(v) + case OpAMD64ORQmem: + return rewriteValueAMD64_OpAMD64ORQmem_0(v) case OpAMD64ROLB: return rewriteValueAMD64_OpAMD64ROLB_0(v) case OpAMD64ROLBconst: @@ -265,22 +301,42 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64SETA_0(v) case OpAMD64SETAE: return rewriteValueAMD64_OpAMD64SETAE_0(v) + case OpAMD64SETAEmem: + return rewriteValueAMD64_OpAMD64SETAEmem_0(v) + case OpAMD64SETAmem: + return rewriteValueAMD64_OpAMD64SETAmem_0(v) case OpAMD64SETB: return rewriteValueAMD64_OpAMD64SETB_0(v) case OpAMD64SETBE: return rewriteValueAMD64_OpAMD64SETBE_0(v) + case OpAMD64SETBEmem: + return rewriteValueAMD64_OpAMD64SETBEmem_0(v) + case OpAMD64SETBmem: + return rewriteValueAMD64_OpAMD64SETBmem_0(v) case OpAMD64SETEQ: return rewriteValueAMD64_OpAMD64SETEQ_0(v) || rewriteValueAMD64_OpAMD64SETEQ_10(v) + case OpAMD64SETEQmem: + return rewriteValueAMD64_OpAMD64SETEQmem_0(v) || rewriteValueAMD64_OpAMD64SETEQmem_10(v) case OpAMD64SETG: return rewriteValueAMD64_OpAMD64SETG_0(v) case OpAMD64SETGE: return rewriteValueAMD64_OpAMD64SETGE_0(v) + case OpAMD64SETGEmem: + return rewriteValueAMD64_OpAMD64SETGEmem_0(v) + case OpAMD64SETGmem: + return rewriteValueAMD64_OpAMD64SETGmem_0(v) case OpAMD64SETL: return rewriteValueAMD64_OpAMD64SETL_0(v) case OpAMD64SETLE: return rewriteValueAMD64_OpAMD64SETLE_0(v) + case OpAMD64SETLEmem: + return rewriteValueAMD64_OpAMD64SETLEmem_0(v) + case OpAMD64SETLmem: + return rewriteValueAMD64_OpAMD64SETLmem_0(v) case OpAMD64SETNE: return rewriteValueAMD64_OpAMD64SETNE_0(v) || rewriteValueAMD64_OpAMD64SETNE_10(v) + case OpAMD64SETNEmem: + return rewriteValueAMD64_OpAMD64SETNEmem_0(v) || rewriteValueAMD64_OpAMD64SETNEmem_10(v) case OpAMD64SHLL: return rewriteValueAMD64_OpAMD64SHLL_0(v) case OpAMD64SHLLconst: @@ -309,14 +365,22 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64SUBL_0(v) case OpAMD64SUBLconst: return rewriteValueAMD64_OpAMD64SUBLconst_0(v) + case OpAMD64SUBLmem: + return rewriteValueAMD64_OpAMD64SUBLmem_0(v) case OpAMD64SUBQ: return rewriteValueAMD64_OpAMD64SUBQ_0(v) case OpAMD64SUBQconst: return rewriteValueAMD64_OpAMD64SUBQconst_0(v) + case OpAMD64SUBQmem: + return rewriteValueAMD64_OpAMD64SUBQmem_0(v) case OpAMD64SUBSD: return rewriteValueAMD64_OpAMD64SUBSD_0(v) + case OpAMD64SUBSDmem: + return rewriteValueAMD64_OpAMD64SUBSDmem_0(v) case OpAMD64SUBSS: return rewriteValueAMD64_OpAMD64SUBSS_0(v) + case OpAMD64SUBSSmem: + return rewriteValueAMD64_OpAMD64SUBSSmem_0(v) case OpAMD64TESTB: return rewriteValueAMD64_OpAMD64TESTB_0(v) case OpAMD64TESTL: @@ -337,10 +401,14 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpAMD64XORL_0(v) || rewriteValueAMD64_OpAMD64XORL_10(v) case OpAMD64XORLconst: return rewriteValueAMD64_OpAMD64XORLconst_0(v) || rewriteValueAMD64_OpAMD64XORLconst_10(v) + case OpAMD64XORLmem: + return rewriteValueAMD64_OpAMD64XORLmem_0(v) case OpAMD64XORQ: return rewriteValueAMD64_OpAMD64XORQ_0(v) case OpAMD64XORQconst: return rewriteValueAMD64_OpAMD64XORQconst_0(v) + case OpAMD64XORQmem: + return rewriteValueAMD64_OpAMD64XORQmem_0(v) case OpAdd16: return rewriteValueAMD64_OpAdd16_0(v) case OpAdd32: @@ -405,6 +473,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpBswap32_0(v) case OpBswap64: return rewriteValueAMD64_OpBswap64_0(v) + case OpCeil: + return rewriteValueAMD64_OpCeil_0(v) case OpClosureCall: return rewriteValueAMD64_OpClosureCall_0(v) case OpCom16: @@ -495,6 +565,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpEqB_0(v) case OpEqPtr: return rewriteValueAMD64_OpEqPtr_0(v) + case OpFloor: + return rewriteValueAMD64_OpFloor_0(v) case OpGeq16: return rewriteValueAMD64_OpGeq16_0(v) case OpGeq16U: @@ -515,6 +587,10 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpGeq8_0(v) case OpGeq8U: return rewriteValueAMD64_OpGeq8U_0(v) + case OpGetCallerPC: + return rewriteValueAMD64_OpGetCallerPC_0(v) + case OpGetCallerSP: + return rewriteValueAMD64_OpGetCallerSP_0(v) case OpGetClosurePtr: return rewriteValueAMD64_OpGetClosurePtr_0(v) case OpGetG: @@ -719,6 +795,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpRound32F_0(v) case OpRound64F: return rewriteValueAMD64_OpRound64F_0(v) + case OpRoundToEven: + return rewriteValueAMD64_OpRoundToEven_0(v) case OpRsh16Ux16: return rewriteValueAMD64_OpRsh16Ux16_0(v) case OpRsh16Ux32: @@ -821,6 +899,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpSub8_0(v) case OpSubPtr: return rewriteValueAMD64_OpSubPtr_0(v) + case OpTrunc: + return rewriteValueAMD64_OpTrunc_0(v) case OpTrunc16to8: return rewriteValueAMD64_OpTrunc16to8_0(v) case OpTrunc32to16: @@ -833,6 +913,8 @@ func rewriteValueAMD64(v *Value) bool { return rewriteValueAMD64_OpTrunc64to32_0(v) case OpTrunc64to8: return rewriteValueAMD64_OpTrunc64to8_0(v) + case OpWB: + return rewriteValueAMD64_OpWB_0(v) case OpXor16: return rewriteValueAMD64_OpXor16_0(v) case OpXor32: @@ -842,7 +924,7 @@ func rewriteValueAMD64(v *Value) bool { case OpXor8: return rewriteValueAMD64_OpXor8_0(v) case OpZero: - return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) + return rewriteValueAMD64_OpZero_0(v) || rewriteValueAMD64_OpZero_10(v) || rewriteValueAMD64_OpZero_20(v) case OpZeroExt16to32: return rewriteValueAMD64_OpZeroExt16to32_0(v) case OpZeroExt16to64: @@ -1214,6 +1296,81 @@ func rewriteValueAMD64_OpAMD64ADDLconst_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ADDLconstmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ADDLconstmem [valOff] {sym} ptr (MOVSSstore [ValAndOff(valOff).Off()] {sym} ptr x _)) + // cond: + // result: (ADDLconst [ValAndOff(valOff).Val()] (MOVLf2i x)) + for { + valOff := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVSSstore { + break + } + if v_1.AuxInt != ValAndOff(valOff).Off() { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + x := v_1.Args[1] + v.reset(OpAMD64ADDLconst) + v.AuxInt = ValAndOff(valOff).Val() + v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDLmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ADDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // cond: + // result: (ADDL x (MOVLf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSSstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64ADDL) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDQ_0(v *Value) bool { // match: (ADDQ x (MOVQconst [c])) // cond: is32Bit(c) @@ -1899,6 +2056,81 @@ func rewriteValueAMD64_OpAMD64ADDQconst_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ADDQconstmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ADDQconstmem [valOff] {sym} ptr (MOVSDstore [ValAndOff(valOff).Off()] {sym} ptr x _)) + // cond: + // result: (ADDQconst [ValAndOff(valOff).Val()] (MOVQf2i x)) + for { + valOff := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVSDstore { + break + } + if v_1.AuxInt != ValAndOff(valOff).Off() { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + x := v_1.Args[1] + v.reset(OpAMD64ADDQconst) + v.AuxInt = ValAndOff(valOff).Val() + v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64ADDQmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ADDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // cond: + // result: (ADDQ x (MOVQf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64ADDQ) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoad(v, l, x) && clobber(l) @@ -1954,6 +2186,44 @@ func rewriteValueAMD64_OpAMD64ADDSD_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ADDSDmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ADDSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) + // cond: + // result: (ADDSD x (MOVQi2f y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVQstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64ADDSD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoad(v, l, x) && clobber(l) @@ -2009,6 +2279,44 @@ func rewriteValueAMD64_OpAMD64ADDSS_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ADDSSmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ADDSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) + // cond: + // result: (ADDSS x (MOVLi2f y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVLstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64ADDSS) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ANDL_0(v *Value) bool { // match: (ANDL x (MOVLconst [c])) // cond: @@ -2193,6 +2501,44 @@ func rewriteValueAMD64_OpAMD64ANDLconst_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ANDLmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ANDLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // cond: + // result: (ANDL x (MOVLf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSSstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64ANDL) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ANDQ_0(v *Value) bool { // match: (ANDQ x (MOVQconst [c])) // cond: is32Bit(c) @@ -2393,6 +2739,44 @@ func rewriteValueAMD64_OpAMD64ANDQconst_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ANDQmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ANDQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // cond: + // result: (ANDQ x (MOVQf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64ANDQ) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64BSFQ_0(v *Value) bool { b := v.Block _ = b @@ -4838,6 +5222,259 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { + // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem) + // cond: y.Uses == 1 + // result: (SETLmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETL { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETLmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem) + // cond: y.Uses == 1 + // result: (SETLEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETLE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETLEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem) + // cond: y.Uses == 1 + // result: (SETGmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETG { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETGmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem) + // cond: y.Uses == 1 + // result: (SETGEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETGE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETGEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem) + // cond: y.Uses == 1 + // result: (SETEQmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETEQ { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETEQmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem) + // cond: y.Uses == 1 + // result: (SETNEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETNE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETNEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem) + // cond: y.Uses == 1 + // result: (SETBmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETB { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem) + // cond: y.Uses == 1 + // result: (SETBEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETBE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETBEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem) + // cond: y.Uses == 1 + // result: (SETAmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETA { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETAmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem) + // cond: y.Uses == 1 + // result: (SETAEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + y := v.Args[1] + if y.Op != OpAMD64SETAE { + break + } + x := y.Args[0] + mem := v.Args[2] + if !(y.Uses == 1) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { b := v.Block _ = b // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem) @@ -5355,7 +5992,11 @@ func rewriteValueAMD64_OpAMD64MOVBstore_0(v *Value) bool { } return false } -func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { +func rewriteValueAMD64_OpAMD64MOVBstore_20(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVWstore [i-1] {s} p w mem) @@ -5451,6 +6092,73 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem)) + // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) + // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpAMD64MOVBload { + break + } + j := x1.AuxInt + s2 := x1.Aux + _ = x1.Args[1] + p2 := x1.Args[0] + mem := x1.Args[1] + mem2 := v.Args[2] + if mem2.Op != OpAMD64MOVBstore { + break + } + if mem2.AuxInt != i-1 { + break + } + if mem2.Aux != s { + break + } + _ = mem2.Args[2] + if p != mem2.Args[0] { + break + } + x2 := mem2.Args[1] + if x2.Op != OpAMD64MOVBload { + break + } + if x2.AuxInt != j-1 { + break + } + if x2.Aux != s2 { + break + } + _ = x2.Args[1] + if p2 != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if mem != mem2.Args[2] { + break + } + if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { + break + } + v.reset(OpAMD64MOVWstore) + v.AuxInt = i - 1 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16) + v0.AuxInt = j - 1 + v0.Aux = s2 + v0.AddArg(p2) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) @@ -6540,6 +7248,19 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVLQZX x) + // cond: zeroUpper32Bits(x,3) + // result: x + for { + x := v.Args[0] + if !(zeroUpper32Bits(x, 3)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } // match: (MOVLQZX x:(MOVLloadidx1 [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVLloadidx1 [off] {sym} ptr idx mem) @@ -6704,6 +7425,54 @@ func rewriteValueAMD64_OpAMD64MOVLatomicload_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64MOVLf2i_0(v *Value) bool { + b := v.Block + _ = b + // match: (MOVLf2i (Arg [off] {sym})) + // cond: + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpArg { + break + } + off := v_0.AuxInt + sym := v_0.Aux + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVLi2f_0(v *Value) bool { + b := v.Block + _ = b + // match: (MOVLi2f (Arg [off] {sym})) + // cond: + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpArg { + break + } + off := v_0.AuxInt + sym := v_0.Aux + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + return true + } + return false +} func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) @@ -6834,6 +7603,34 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVLload [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLloadidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ8 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLloadidx8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } // match: (MOVLload [off] {sym} (ADDQ ptr idx) mem) // cond: ptr.Op != OpSB // result: (MOVLloadidx1 [off] {sym} ptr idx mem) @@ -6909,6 +7706,33 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) + // cond: + // result: (MOVLf2i val) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVSSstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + val := v_1.Args[1] + v.reset(OpAMD64MOVLf2i) + v.AddArg(val) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { @@ -6962,6 +7786,56 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVLloadidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) + // cond: + // result: (MOVLloadidx8 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { + break + } + if v_1.AuxInt != 3 { + break + } + idx := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx1 [c] {sym} (SHLQconst [3] idx) ptr mem) + // cond: + // result: (MOVLloadidx8 [c] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64SHLQconst { + break + } + if v_0.AuxInt != 3 { + break + } + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64MOVLloadidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) @@ -7123,6 +7997,61 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64MOVLloadidx8_0(v *Value) bool { + // match: (MOVLloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) + // cond: is32Bit(c+d) + // result: (MOVLloadidx8 [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } + v.reset(OpAMD64MOVLloadidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVLloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) + // cond: is32Bit(c+8*d) + // result: (MOVLloadidx8 [c+8*d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + if !(is32Bit(c + 8*d)) { + break + } + v.reset(OpAMD64MOVLloadidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem) // cond: @@ -7305,6 +8234,36 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVLstore [off1] {sym1} (LEAQ8 [off2] {sym2} ptr idx) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVLstoreidx8 [off1+off2] {mergeSym(sym1,sym2)} ptr idx val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpAMD64LEAQ8 { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpAMD64MOVLstoreidx8) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (MOVLstore [off] {sym} (ADDQ ptr idx) val mem) // cond: ptr.Op != OpSB // result: (MOVLstoreidx1 [off] {sym} ptr idx val mem) @@ -7378,6 +8337,13 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { v.AddArg(mem) return true } + return false +} +func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem)) // cond: x.Uses == 1 && clobber(x) // result: (MOVQstore [i-4] {s} p w0 mem) @@ -7428,9 +8394,73 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { + // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem)) + // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) + // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpAMD64MOVLload { + break + } + j := x1.AuxInt + s2 := x1.Aux + _ = x1.Args[1] + p2 := x1.Args[0] + mem := x1.Args[1] + mem2 := v.Args[2] + if mem2.Op != OpAMD64MOVLstore { + break + } + if mem2.AuxInt != i-4 { + break + } + if mem2.Aux != s { + break + } + _ = mem2.Args[2] + if p != mem2.Args[0] { + break + } + x2 := mem2.Args[1] + if x2.Op != OpAMD64MOVLload { + break + } + if x2.AuxInt != j-4 { + break + } + if x2.Aux != s2 { + break + } + _ = x2.Args[1] + if p2 != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if mem != mem2.Args[2] { + break + } + if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = i - 4 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = j - 4 + v0.Aux = s2 + v0.AddArg(p2) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) @@ -7484,6 +8514,67 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) + // result: (ADDLconstmem {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64ADDLconst { + break + } + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVLload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { + break + } + v.reset(OpAMD64ADDLconstmem) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem) + // cond: + // result: (MOVSSstore [off] {sym} ptr val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLf2i { + break + } + val := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSSstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVLstoreconst_0(v *Value) bool { @@ -7956,6 +9047,33 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVLstoreidx1 [c] {sym} ptr (SHLQconst [3] idx) val mem) + // cond: + // result: (MOVLstoreidx8 [c] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64SHLQconst { + break + } + if v_1.AuxInt != 3 { + break + } + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpAMD64MOVLstoreidx8) + v.AuxInt = c + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) // cond: is32Bit(c+d) // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) @@ -8291,6 +9409,65 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64MOVLstoreidx8_0(v *Value) bool { + // match: (MOVLstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) + // cond: is32Bit(c+d) + // result: (MOVLstoreidx8 [c+d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpAMD64ADDQconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } + v.reset(OpAMD64MOVLstoreidx8) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVLstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) + // cond: is32Bit(c+8*d) + // result: (MOVLstoreidx8 [c+8*d] {sym} ptr idx val mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64ADDQconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + val := v.Args[2] + mem := v.Args[3] + if !(is32Bit(c + 8*d)) { + break + } + v.reset(OpAMD64MOVLstoreidx8) + v.AuxInt = c + 8*d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64MOVOload_0(v *Value) bool { // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem) // cond: is32Bit(off1+off2) @@ -8451,6 +9628,54 @@ func rewriteValueAMD64_OpAMD64MOVQatomicload_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64MOVQf2i_0(v *Value) bool { + b := v.Block + _ = b + // match: (MOVQf2i (Arg [off] {sym})) + // cond: + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpArg { + break + } + off := v_0.AuxInt + sym := v_0.Aux + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQi2f_0(v *Value) bool { + b := v.Block + _ = b + // match: (MOVQi2f (Arg [off] {sym})) + // cond: + // result: @b.Func.Entry (Arg [off] {sym}) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpArg { + break + } + off := v_0.AuxInt + sym := v_0.Aux + b = b.Func.Entry + v0 := b.NewValue0(v.Pos, OpArg, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + return true + } + return false +} func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) @@ -8657,6 +9882,33 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) + // cond: + // result: (MOVQf2i val) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVSDstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + val := v_1.Args[1] + v.reset(OpAMD64MOVQf2i) + v.AddArg(val) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { @@ -8872,6 +10124,10 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem) // cond: is32Bit(off1+off2) // result: (MOVQstore [off1+off2] {sym} ptr val mem) @@ -9037,6 +10293,73 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVQstore [i] {s} p x1:(MOVQload [j] {s2} p2 mem) mem2:(MOVQstore [i-8] {s} p x2:(MOVQload [j-8] {s2} p2 mem) mem)) + // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2) + // result: (MOVOstore [i-8] {s} p (MOVOload [j-8] {s2} p2 mem) mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpAMD64MOVQload { + break + } + j := x1.AuxInt + s2 := x1.Aux + _ = x1.Args[1] + p2 := x1.Args[0] + mem := x1.Args[1] + mem2 := v.Args[2] + if mem2.Op != OpAMD64MOVQstore { + break + } + if mem2.AuxInt != i-8 { + break + } + if mem2.Aux != s { + break + } + _ = mem2.Args[2] + if p != mem2.Args[0] { + break + } + x2 := mem2.Args[1] + if x2.Op != OpAMD64MOVQload { + break + } + if x2.AuxInt != j-8 { + break + } + if x2.Aux != s2 { + break + } + _ = x2.Args[1] + if p2 != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if mem != mem2.Args[2] { + break + } + if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && config.useSSE && clobber(x1) && clobber(x2) && clobber(mem2)) { + break + } + v.reset(OpAMD64MOVOstore) + v.AuxInt = i - 8 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) + v0.AuxInt = j - 8 + v0.Aux = s2 + v0.AddArg(p2) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) @@ -9090,9 +10413,77 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c,off) + // result: (ADDQconstmem {sym} [makeValAndOff(c,off)] ptr mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + a := v.Args[1] + if a.Op != OpAMD64ADDQconst { + break + } + c := a.AuxInt + l := a.Args[0] + if l.Op != OpAMD64MOVQload { + break + } + if l.AuxInt != off { + break + } + if l.Aux != sym { + break + } + _ = l.Args[1] + ptr2 := l.Args[0] + mem := l.Args[1] + if mem != v.Args[2] { + break + } + if !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && validValAndOff(c, off)) { + break + } + v.reset(OpAMD64ADDQconstmem) + v.AuxInt = makeValAndOff(c, off) + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64MOVQstore_10(v *Value) bool { + // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem) + // cond: + // result: (MOVSDstore [off] {sym} ptr val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQf2i { + break + } + val := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVSDstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem) // cond: ValAndOff(sc).canAdd(off) // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {s} ptr mem) @@ -9221,6 +10612,40 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconst_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [c2] {s} p mem)) + // cond: config.useSSE && x.Uses == 1 && ValAndOff(c2).Off() + 8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x) + // result: (MOVOstore [ValAndOff(c2).Off()] {s} p (MOVOconst [0]) mem) + for { + c := v.AuxInt + s := v.Aux + _ = v.Args[1] + p := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64MOVQstoreconst { + break + } + c2 := x.AuxInt + if x.Aux != s { + break + } + _ = x.Args[1] + if p != x.Args[0] { + break + } + mem := x.Args[1] + if !(config.useSSE && x.Uses == 1 && ValAndOff(c2).Off()+8 == ValAndOff(c).Off() && ValAndOff(c).Val() == 0 && ValAndOff(c2).Val() == 0 && clobber(x)) { + break + } + v.reset(OpAMD64MOVOstore) + v.AuxInt = ValAndOff(c2).Off() + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } // match: (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) // result: (MOVQstoreconst [ValAndOff(sc).add(off)] {mergeSym(sym1, sym2)} ptr mem) @@ -9684,6 +11109,33 @@ func rewriteValueAMD64_OpAMD64MOVSDload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) + // cond: + // result: (MOVQi2f val) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + val := v_1.Args[1] + v.reset(OpAMD64MOVQi2f) + v.AddArg(val) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { @@ -9963,6 +11415,28 @@ func rewriteValueAMD64_OpAMD64MOVSDstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) + // cond: + // result: (MOVQstore [off] {sym} ptr val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVQi2f { + break + } + val := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVQstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { @@ -10242,6 +11716,33 @@ func rewriteValueAMD64_OpAMD64MOVSSload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) + // cond: + // result: (MOVLi2f val) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + val := v_1.Args[1] + v.reset(OpAMD64MOVLi2f) + v.AddArg(val) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { @@ -10521,6 +12022,28 @@ func rewriteValueAMD64_OpAMD64MOVSSstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) + // cond: + // result: (MOVLstore [off] {sym} ptr val mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64MOVLi2f { + break + } + val := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64MOVLstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } return false } func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { @@ -11754,6 +13277,77 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { return false } func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem)) + // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2) + // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem) + for { + i := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + x1 := v.Args[1] + if x1.Op != OpAMD64MOVWload { + break + } + j := x1.AuxInt + s2 := x1.Aux + _ = x1.Args[1] + p2 := x1.Args[0] + mem := x1.Args[1] + mem2 := v.Args[2] + if mem2.Op != OpAMD64MOVWstore { + break + } + if mem2.AuxInt != i-2 { + break + } + if mem2.Aux != s { + break + } + _ = mem2.Args[2] + if p != mem2.Args[0] { + break + } + x2 := mem2.Args[1] + if x2.Op != OpAMD64MOVWload { + break + } + if x2.AuxInt != j-2 { + break + } + if x2.Aux != s2 { + break + } + _ = x2.Args[1] + if p2 != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if mem != mem2.Args[2] { + break + } + if !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1) && clobber(x2) && clobber(mem2)) { + break + } + v.reset(OpAMD64MOVLstore) + v.AuxInt = i - 2 + v.Aux = s + v.AddArg(p) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32) + v0.AuxInt = j - 2 + v0.Aux = s2 + v0.AddArg(p2) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) @@ -12934,20 +14528,6 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { return true } // match: (MULQconst [c] x) - // cond: isPowerOfTwo(c) - // result: (SHLQconst [log2(c)] x) - for { - c := v.AuxInt - x := v.Args[0] - if !(isPowerOfTwo(c)) { - break - } - v.reset(OpAMD64SHLQconst) - v.AuxInt = log2(c) - v.AddArg(x) - return true - } - // match: (MULQconst [c] x) // cond: isPowerOfTwo(c+1) && c >= 15 // result: (SUBQ (SHLQconst [log2(c+1)] x) x) for { @@ -13015,11 +14595,6 @@ func rewriteValueAMD64_OpAMD64MULQconst_10(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { - b := v.Block - _ = b // match: (MULQconst [c] x) // cond: isPowerOfTwo(c-8) && c >= 136 // result: (LEAQ8 (SHLQconst [log2(c-8)] x) x) @@ -13037,6 +14612,11 @@ func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValueAMD64_OpAMD64MULQconst_20(v *Value) bool { + b := v.Block + _ = b // match: (MULQconst [c] x) // cond: c%3 == 0 && isPowerOfTwo(c/3) // result: (SHLQconst [log2(c/3)] (LEAQ2 x x)) @@ -13159,6 +14739,44 @@ func rewriteValueAMD64_OpAMD64MULSD_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64MULSDmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MULSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) + // cond: + // result: (MULSD x (MOVQi2f y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVQstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64MULSD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoad(v, l, x) && clobber(l) @@ -13214,6 +14832,44 @@ func rewriteValueAMD64_OpAMD64MULSS_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64MULSSmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MULSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) + // cond: + // result: (MULSS x (MOVLi2f y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVLstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64MULSS) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64NEGL_0(v *Value) bool { // match: (NEGL (MOVLconst [c])) // cond: @@ -21815,6 +23471,44 @@ func rewriteValueAMD64_OpAMD64ORLconst_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ORLmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // cond: + // result: ( ORL x (MOVLf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSSstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64ORL) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ORQ_0(v *Value) bool { // match: (ORQ x (MOVQconst [c])) // cond: is32Bit(c) @@ -32519,6 +34213,44 @@ func rewriteValueAMD64_OpAMD64ORQconst_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64ORQmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (ORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // cond: + // result: ( ORQ x (MOVQf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64ORQ) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64ROLB_0(v *Value) bool { // match: (ROLB x (NEGQ y)) // cond: @@ -34101,6 +35833,290 @@ func rewriteValueAMD64_OpAMD64SETAE_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETAEmem_0(v *Value) bool { + b := v.Block + _ = b + // match: (SETAEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETBEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETBEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETAEmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETAEmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETAEmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETAEmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETAEmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETAmem_0(v *Value) bool { + b := v.Block + _ = b + // match: (SETAmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETBmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETAmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETAmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETAmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETAmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETAmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETB_0(v *Value) bool { // match: (SETB (InvertFlags x)) // cond: @@ -34253,6 +36269,290 @@ func rewriteValueAMD64_OpAMD64SETBE_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETBEmem_0(v *Value) bool { + b := v.Block + _ = b + // match: (SETBEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETAEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETBEmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETBEmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETBEmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETBEmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETBEmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETBmem_0(v *Value) bool { + b := v.Block + _ = b + // match: (SETBmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETAmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETAmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETBmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETBmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETBmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETBmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETBmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETEQ_0(v *Value) bool { b := v.Block _ = b @@ -34558,6 +36858,449 @@ func rewriteValueAMD64_OpAMD64SETEQ_10(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETEQmem_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (SETEQmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) + // cond: !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64SHLL { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVLconst { + break + } + if v_1_0_0.AuxInt != 1 { + break + } + x := v_1_0.Args[1] + y := v_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) + // cond: !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTL x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64SHLL { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64MOVLconst { + break + } + if v_1_1_0.AuxInt != 1 { + break + } + x := v_1_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) + // cond: !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64SHLQ { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVQconst { + break + } + if v_1_0_0.AuxInt != 1 { + break + } + x := v_1_0.Args[1] + y := v_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) + // cond: !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQ x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64SHLQ { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64MOVQconst { + break + } + if v_1_1_0.AuxInt != 1 { + break + } + x := v_1_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTLconst [c] x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTLconst { + break + } + c := v_1.AuxInt + x := v_1.Args[0] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQconst [c] x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQconst { + break + } + c := v_1.AuxInt + x := v_1.Args[0] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64MOVQconst { + break + } + c := v_1_0.AuxInt + x := v_1.Args[1] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETAEmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64MOVQconst { + break + } + c := v_1_1.AuxInt + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETAEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETEQmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETEQmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETEQmem_10(v *Value) bool { + b := v.Block + _ = b + // match: (SETEQmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETEQmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETG_0(v *Value) bool { // match: (SETG (InvertFlags x)) // cond: @@ -34710,6 +37453,290 @@ func rewriteValueAMD64_OpAMD64SETGE_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETGEmem_0(v *Value) bool { + b := v.Block + _ = b + // match: (SETGEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETLEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETLEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETGEmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETGEmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETGEmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETGEmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETGEmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETGmem_0(v *Value) bool { + b := v.Block + _ = b + // match: (SETGmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETLmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETLmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETGmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETGmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETGmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETGmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETGmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETL_0(v *Value) bool { // match: (SETL (InvertFlags x)) // cond: @@ -34862,6 +37889,290 @@ func rewriteValueAMD64_OpAMD64SETLE_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETLEmem_0(v *Value) bool { + b := v.Block + _ = b + // match: (SETLEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETGEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETGEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETLEmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETLEmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETLEmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETLEmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETLEmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETLmem_0(v *Value) bool { + b := v.Block + _ = b + // match: (SETLmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETGmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETGmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETLmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETLmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETLmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETLmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETLmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SETNE_0(v *Value) bool { b := v.Block _ = b @@ -35167,6 +38478,449 @@ func rewriteValueAMD64_OpAMD64SETNE_10(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SETNEmem_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (SETNEmem [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) + // cond: !config.nacl + // result: (SETBmem [off] {sym} ptr (BTL x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64SHLL { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVLconst { + break + } + if v_1_0_0.AuxInt != 1 { + break + } + x := v_1_0.Args[1] + y := v_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTL y (SHLL (MOVLconst [1]) x)) mem) + // cond: !config.nacl + // result: (SETBmem [off] {sym} ptr (BTL x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64SHLL { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64MOVLconst { + break + } + if v_1_1_0.AuxInt != 1 { + break + } + x := v_1_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) + // cond: !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64SHLQ { + break + } + _ = v_1_0.Args[1] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpAMD64MOVQconst { + break + } + if v_1_0_0.AuxInt != 1 { + break + } + x := v_1_0.Args[1] + y := v_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQ y (SHLQ (MOVQconst [1]) x)) mem) + // cond: !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQ x y) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64SHLQ { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpAMD64MOVQconst { + break + } + if v_1_1_0.AuxInt != 1 { + break + } + x := v_1_1.Args[1] + mem := v.Args[2] + if !(!config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTLconst [c] x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 32 && !config.nacl + // result: (SETBmem [off] {sym} ptr (BTLconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTLconst { + break + } + c := v_1.AuxInt + x := v_1.Args[0] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 32 && !config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQconst [c] x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQconst { + break + } + c := v_1.AuxInt + x := v_1.Args[0] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpAMD64MOVQconst { + break + } + c := v_1_0.AuxInt + x := v_1.Args[1] + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (TESTQ x (MOVQconst [c])) mem) + // cond: isPowerOfTwo(c) && log2(c) < 64 && !config.nacl + // result: (SETBmem [off] {sym} ptr (BTQconst [log2(c)] x) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64TESTQ { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpAMD64MOVQconst { + break + } + c := v_1_1.AuxInt + mem := v.Args[2] + if !(isPowerOfTwo(c) && log2(c) < 64 && !config.nacl) { + break + } + v.reset(OpAMD64SETBmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr (InvertFlags x) mem) + // cond: + // result: (SETNEmem [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpAMD64InvertFlags { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpAMD64SETNEmem) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr x:(FlagEQ) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagEQ { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueAMD64_OpAMD64SETNEmem_10(v *Value) bool { + b := v.Block + _ = b + // match: (SETNEmem [off] {sym} ptr x:(FlagLT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr x:(FlagLT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagLT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr x:(FlagGT_ULT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_ULT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (SETNEmem [off] {sym} ptr x:(FlagGT_UGT) mem) + // cond: + // result: (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + x := v.Args[1] + if x.Op != OpAMD64FlagGT_UGT { + break + } + mem := v.Args[2] + v.reset(OpAMD64MOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, x.Type) + v0.AuxInt = 1 + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SHLL_0(v *Value) bool { b := v.Block _ = b @@ -36418,6 +40172,44 @@ func rewriteValueAMD64_OpAMD64SUBLconst_0(v *Value) bool { return true } } +func rewriteValueAMD64_OpAMD64SUBLmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (SUBLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // cond: + // result: (SUBL x (MOVLf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSSstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64SUBL) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SUBQ_0(v *Value) bool { b := v.Block _ = b @@ -36565,6 +40357,44 @@ func rewriteValueAMD64_OpAMD64SUBQconst_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SUBQmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (SUBQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // cond: + // result: (SUBQ x (MOVQf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64SUBQ) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem)) // cond: canMergeLoad(v, l, x) && clobber(l) @@ -36594,6 +40424,44 @@ func rewriteValueAMD64_OpAMD64SUBSD_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SUBSDmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (SUBSDmem x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) + // cond: + // result: (SUBSD x (MOVQi2f y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVQstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64SUBSD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQi2f, typ.Float64) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem)) // cond: canMergeLoad(v, l, x) && clobber(l) @@ -36623,6 +40491,44 @@ func rewriteValueAMD64_OpAMD64SUBSS_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64SUBSSmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (SUBSSmem x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) + // cond: + // result: (SUBSS x (MOVLi2f y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVLstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64SUBSS) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLi2f, typ.Float32) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64TESTB_0(v *Value) bool { // match: (TESTB (MOVLconst [c]) x) // cond: @@ -37418,6 +41324,44 @@ func rewriteValueAMD64_OpAMD64XORLconst_10(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64XORLmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (XORLmem x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) + // cond: + // result: (XORL x (MOVLf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSSstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64XORL) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVLf2i, typ.UInt32) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAMD64XORQ_0(v *Value) bool { // match: (XORQ x (MOVQconst [c])) // cond: is32Bit(c) @@ -37624,6 +41568,44 @@ func rewriteValueAMD64_OpAMD64XORQconst_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpAMD64XORQmem_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (XORQmem x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) + // cond: + // result: (XORQ x (MOVQf2i y)) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpAMD64MOVSDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + if ptr != v_2.Args[0] { + break + } + y := v_2.Args[1] + v.reset(OpAMD64XORQ) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQf2i, typ.UInt64) + v0.AddArg(y) + v.AddArg(v0) + return true + } + return false +} func rewriteValueAMD64_OpAdd16_0(v *Value) bool { // match: (Add16 x y) // cond: @@ -38236,6 +42218,18 @@ func rewriteValueAMD64_OpBswap64_0(v *Value) bool { return true } } +func rewriteValueAMD64_OpCeil_0(v *Value) bool { + // match: (Ceil x) + // cond: + // result: (ROUNDSD [2] x) + for { + x := v.Args[0] + v.reset(OpAMD64ROUNDSD) + v.AuxInt = 2 + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpClosureCall_0(v *Value) bool { // match: (ClosureCall [argwid] entry closure mem) // cond: @@ -38984,6 +42978,18 @@ func rewriteValueAMD64_OpEqPtr_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpFloor_0(v *Value) bool { + // match: (Floor x) + // cond: + // result: (ROUNDSD [1] x) + for { + x := v.Args[0] + v.reset(OpAMD64ROUNDSD) + v.AuxInt = 1 + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpGeq16_0(v *Value) bool { b := v.Block _ = b @@ -39164,6 +43170,24 @@ func rewriteValueAMD64_OpGeq8U_0(v *Value) bool { return true } } +func rewriteValueAMD64_OpGetCallerPC_0(v *Value) bool { + // match: (GetCallerPC) + // cond: + // result: (LoweredGetCallerPC) + for { + v.reset(OpAMD64LoweredGetCallerPC) + return true + } +} +func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) + // cond: + // result: (LoweredGetCallerSP) + for { + v.reset(OpAMD64LoweredGetCallerSP) + return true + } +} func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool { // match: (GetClosurePtr) // cond: @@ -40601,6 +44625,8 @@ func rewriteValueAMD64_OpMod8u_0(v *Value) bool { func rewriteValueAMD64_OpMove_0(v *Value) bool { b := v.Block _ = b + config := b.Func.Config + _ = config typ := &b.Func.Config.Types _ = typ // match: (Move [0] _ _ mem) @@ -40698,7 +44724,7 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { return true } // match: (Move [16] dst src mem) - // cond: + // cond: config.useSSE // result: (MOVOstore dst (MOVOload src mem) mem) for { if v.AuxInt != 16 { @@ -40708,6 +44734,9 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { dst := v.Args[0] src := v.Args[1] mem := v.Args[2] + if !(config.useSSE) { + break + } v.reset(OpAMD64MOVOstore) v.AddArg(dst) v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128) @@ -40717,6 +44746,38 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { v.AddArg(mem) return true } + // match: (Move [16] dst src mem) + // cond: !config.useSSE + // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)) + for { + if v.AuxInt != 16 { + break + } + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(!config.useSSE) { + break + } + v.reset(OpAMD64MOVQstore) + v.AuxInt = 8 + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v0.AuxInt = 8 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } // match: (Move [3] dst src mem) // cond: // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem)) @@ -40804,6 +44865,15 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { v.AddArg(v1) return true } + return false +} +func rewriteValueAMD64_OpMove_10(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + typ := &b.Func.Config.Types + _ = typ // match: (Move [7] dst src mem) // cond: // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem)) @@ -40833,15 +44903,6 @@ func rewriteValueAMD64_OpMove_0(v *Value) bool { v.AddArg(v1) return true } - return false -} -func rewriteValueAMD64_OpMove_10(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - typ := &b.Func.Config.Types - _ = typ // match: (Move [s] dst src mem) // cond: s > 8 && s < 16 // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem)) @@ -40905,7 +44966,7 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { return true } // match: (Move [s] dst src mem) - // cond: s > 16 && s%16 != 0 && s%16 > 8 + // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVOstore dst (MOVOload src mem) mem)) for { s := v.AuxInt @@ -40913,7 +44974,7 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { dst := v.Args[0] src := v.Args[1] mem := v.Args[2] - if !(s > 16 && s%16 != 0 && s%16 > 8) { + if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) { break } v.reset(OpMove) @@ -40937,6 +44998,47 @@ func rewriteValueAMD64_OpMove_10(v *Value) bool { return true } // match: (Move [s] dst src mem) + // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE + // result: (Move [s-s%16] (OffPtr dst [s%16]) (OffPtr src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))) + for { + s := v.AuxInt + _ = v.Args[2] + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) { + break + } + v.reset(OpMove) + v.AuxInt = s - s%16 + v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type) + v0.AuxInt = s % 16 + v0.AddArg(dst) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type) + v1.AuxInt = s % 16 + v1.AddArg(src) + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v2.AuxInt = 8 + v2.AddArg(dst) + v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v3.AuxInt = 8 + v3.AddArg(src) + v3.AddArg(mem) + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v4.AddArg(dst) + v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64) + v5.AddArg(src) + v5.AddArg(mem) + v4.AddArg(v5) + v4.AddArg(mem) + v2.AddArg(v4) + v.AddArg(v2) + return true + } + // match: (Move [s] dst src mem) // cond: s >= 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice // result: (DUFFCOPY [14*(64-s/16)] dst src mem) for { @@ -41552,6 +45654,18 @@ func rewriteValueAMD64_OpRound64F_0(v *Value) bool { return true } } +func rewriteValueAMD64_OpRoundToEven_0(v *Value) bool { + // match: (RoundToEven x) + // cond: + // result: (ROUNDSD [0] x) + for { + x := v.Args[0] + v.reset(OpAMD64ROUNDSD) + v.AuxInt = 0 + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpRsh16Ux16_0(v *Value) bool { b := v.Block _ = b @@ -42815,6 +46929,18 @@ func rewriteValueAMD64_OpSubPtr_0(v *Value) bool { } return false } +func rewriteValueAMD64_OpTrunc_0(v *Value) bool { + // match: (Trunc x) + // cond: + // result: (ROUNDSD [3] x) + for { + x := v.Args[0] + v.reset(OpAMD64ROUNDSD) + v.AuxInt = 3 + v.AddArg(x) + return true + } +} func rewriteValueAMD64_OpTrunc16to8_0(v *Value) bool { // match: (Trunc16to8 x) // cond: @@ -42887,6 +47013,24 @@ func rewriteValueAMD64_OpTrunc64to8_0(v *Value) bool { return true } } +func rewriteValueAMD64_OpWB_0(v *Value) bool { + // match: (WB {fn} destptr srcptr mem) + // cond: + // result: (LoweredWB {fn} destptr srcptr mem) + for { + fn := v.Aux + _ = v.Args[2] + destptr := v.Args[0] + srcptr := v.Args[1] + mem := v.Args[2] + v.reset(OpAMD64LoweredWB) + v.Aux = fn + v.AddArg(destptr) + v.AddArg(srcptr) + v.AddArg(mem) + return true + } +} func rewriteValueAMD64_OpXor16_0(v *Value) bool { // match: (Xor16 x y) // cond: @@ -42946,6 +47090,8 @@ func rewriteValueAMD64_OpXor8_0(v *Value) bool { func rewriteValueAMD64_OpZero_0(v *Value) bool { b := v.Block _ = b + config := b.Func.Config + _ = config // match: (Zero [0] _ mem) // cond: // result: mem @@ -43105,14 +47251,14 @@ func rewriteValueAMD64_OpZero_0(v *Value) bool { return true } // match: (Zero [s] destptr mem) - // cond: s%8 != 0 && s > 8 + // cond: s%8 != 0 && s > 8 && !config.useSSE // result: (Zero [s-s%8] (OffPtr destptr [s%8]) (MOVQstoreconst [0] destptr mem)) for { s := v.AuxInt _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] - if !(s%8 != 0 && s > 8) { + if !(s%8 != 0 && s > 8 && !config.useSSE) { break } v.reset(OpZero) @@ -43135,10 +47281,8 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { _ = b config := b.Func.Config _ = config - typ := &b.Func.Config.Types - _ = typ // match: (Zero [16] destptr mem) - // cond: + // cond: !config.useSSE // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)) for { if v.AuxInt != 16 { @@ -43147,6 +47291,9 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] + if !(!config.useSSE) { + break + } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 8) v.AddArg(destptr) @@ -43158,7 +47305,7 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { return true } // match: (Zero [24] destptr mem) - // cond: + // cond: !config.useSSE // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))) for { if v.AuxInt != 24 { @@ -43167,6 +47314,9 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] + if !(!config.useSSE) { + break + } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 16) v.AddArg(destptr) @@ -43182,7 +47332,7 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { return true } // match: (Zero [32] destptr mem) - // cond: + // cond: !config.useSSE // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))) for { if v.AuxInt != 32 { @@ -43191,6 +47341,9 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] + if !(!config.useSSE) { + break + } v.reset(OpAMD64MOVQstoreconst) v.AuxInt = makeValAndOff(0, 24) v.AddArg(destptr) @@ -43210,25 +47363,46 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { return true } // match: (Zero [s] destptr mem) - // cond: s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice - // result: (Zero [s-8] (OffPtr [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem)) + // cond: s > 8 && s < 16 && config.useSSE + // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem)) for { s := v.AuxInt _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] - if !(s <= 1024 && s%8 == 0 && s%16 != 0 && !config.noDuffDevice) { + if !(s > 8 && s < 16 && config.useSSE) { + break + } + v.reset(OpAMD64MOVQstoreconst) + v.AuxInt = makeValAndOff(0, s-8) + v.AddArg(destptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [s] destptr mem) + // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE + // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem)) + for { + s := v.AuxInt + _ = v.Args[1] + destptr := v.Args[0] + mem := v.Args[1] + if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) { break } v.reset(OpZero) - v.AuxInt = s - 8 + v.AuxInt = s - s%16 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) - v0.AuxInt = 8 + v0.AuxInt = s % 16 v0.AddArg(destptr) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) v1.AddArg(destptr) - v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) v2.AuxInt = 0 v1.AddArg(v2) v1.AddArg(mem) @@ -43236,14 +47410,185 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { return true } // match: (Zero [s] destptr mem) - // cond: s <= 1024 && s%16 == 0 && !config.noDuffDevice + // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE + // result: (Zero [s-s%16] (OffPtr destptr [s%16]) (MOVQstoreconst [0] destptr mem)) + for { + s := v.AuxInt + _ = v.Args[1] + destptr := v.Args[0] + mem := v.Args[1] + if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) { + break + } + v.reset(OpZero) + v.AuxInt = s - s%16 + v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v0.AuxInt = s % 16 + v0.AddArg(destptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem) + v1.AuxInt = 0 + v1.AddArg(destptr) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [16] destptr mem) + // cond: config.useSSE + // result: (MOVOstore destptr (MOVOconst [0]) mem) + for { + if v.AuxInt != 16 { + break + } + _ = v.Args[1] + destptr := v.Args[0] + mem := v.Args[1] + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstore) + v.AddArg(destptr) + v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [32] destptr mem) + // cond: config.useSSE + // result: (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)) + for { + if v.AuxInt != 32 { + break + } + _ = v.Args[1] + destptr := v.Args[0] + mem := v.Args[1] + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v0.AuxInt = 16 + v0.AddArg(destptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v2.AddArg(destptr) + v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v3.AuxInt = 0 + v2.AddArg(v3) + v2.AddArg(mem) + v.AddArg(v2) + return true + } + // match: (Zero [48] destptr mem) + // cond: config.useSSE + // result: (MOVOstore (OffPtr destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))) + for { + if v.AuxInt != 48 { + break + } + _ = v.Args[1] + destptr := v.Args[0] + mem := v.Args[1] + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v0.AuxInt = 32 + v0.AddArg(destptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v3.AuxInt = 16 + v3.AddArg(destptr) + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v4.AuxInt = 0 + v2.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v5.AddArg(destptr) + v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v6.AuxInt = 0 + v5.AddArg(v6) + v5.AddArg(mem) + v2.AddArg(v5) + v.AddArg(v2) + return true + } + // match: (Zero [64] destptr mem) + // cond: config.useSSE + // result: (MOVOstore (OffPtr destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))) + for { + if v.AuxInt != 64 { + break + } + _ = v.Args[1] + destptr := v.Args[0] + mem := v.Args[1] + if !(config.useSSE) { + break + } + v.reset(OpAMD64MOVOstore) + v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v0.AuxInt = 48 + v0.AddArg(destptr) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v3.AuxInt = 32 + v3.AddArg(destptr) + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v4.AuxInt = 0 + v2.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type) + v6.AuxInt = 16 + v6.AddArg(destptr) + v5.AddArg(v6) + v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v7.AuxInt = 0 + v5.AddArg(v7) + v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem) + v8.AddArg(destptr) + v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128) + v9.AuxInt = 0 + v8.AddArg(v9) + v8.AddArg(mem) + v5.AddArg(v8) + v2.AddArg(v5) + v.AddArg(v2) + return true + } + return false +} +func rewriteValueAMD64_OpZero_20(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + typ := &b.Func.Config.Types + _ = typ + // match: (Zero [s] destptr mem) + // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice // result: (DUFFZERO [s] destptr (MOVOconst [0]) mem) for { s := v.AuxInt _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] - if !(s <= 1024 && s%16 == 0 && !config.noDuffDevice) { + if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) { break } v.reset(OpAMD64DUFFZERO) @@ -43256,14 +47601,14 @@ func rewriteValueAMD64_OpZero_10(v *Value) bool { return true } // match: (Zero [s] destptr mem) - // cond: (s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0 + // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0 // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) for { s := v.AuxInt _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] - if !((s > 1024 || (config.noDuffDevice && s > 32)) && s%8 == 0) { + if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) { break } v.reset(OpAMD64REPSTOSQ) @@ -43385,6 +47730,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(x) v0.AddArg(y) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (TESTL y (SHLL (MOVLconst [1]) x))) @@ -43418,6 +47764,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(x) v0.AddArg(y) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y)) @@ -43451,6 +47798,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(x) v0.AddArg(y) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (TESTQ y (SHLQ (MOVQconst [1]) x))) @@ -43484,6 +47832,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(x) v0.AddArg(y) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (TESTLconst [c] x)) @@ -43504,6 +47853,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AuxInt = log2(c) v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (TESTQconst [c] x)) @@ -43524,6 +47874,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AuxInt = log2(c) v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (TESTQ (MOVQconst [c]) x)) @@ -43549,6 +47900,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AuxInt = log2(c) v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (TESTQ x (MOVQconst [c]))) @@ -43574,6 +47926,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AuxInt = log2(c) v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (InvertFlags cmp) yes no) @@ -43587,6 +47940,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (FlagEQ) yes no) @@ -43599,6 +47953,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (EQ (FlagLT_ULT) yes no) @@ -43611,6 +47966,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43624,6 +47980,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43637,6 +47994,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43650,6 +48008,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43665,6 +48024,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64LE b.SetControl(cmp) + b.Aux = nil return true } // match: (GE (FlagEQ) yes no) @@ -43677,6 +48037,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (FlagLT_ULT) yes no) @@ -43689,6 +48050,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43702,6 +48064,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43715,6 +48078,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (FlagGT_UGT) yes no) @@ -43727,6 +48091,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockAMD64GT: @@ -43741,6 +48106,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64LT b.SetControl(cmp) + b.Aux = nil return true } // match: (GT (FlagEQ) yes no) @@ -43753,6 +48119,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43766,6 +48133,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43779,6 +48147,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -43792,6 +48161,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GT (FlagGT_UGT) yes no) @@ -43804,6 +48174,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockIf: @@ -43818,6 +48189,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64LT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETLE cmp) yes no) @@ -43831,6 +48203,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64LE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETG cmp) yes no) @@ -43844,6 +48217,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64GT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETGE cmp) yes no) @@ -43857,6 +48231,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64GE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETEQ cmp) yes no) @@ -43870,6 +48245,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETNE cmp) yes no) @@ -43883,6 +48259,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64NE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETB cmp) yes no) @@ -43896,6 +48273,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64ULT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETBE cmp) yes no) @@ -43909,6 +48287,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64ULE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETA cmp) yes no) @@ -43922,6 +48301,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETAE cmp) yes no) @@ -43935,6 +48315,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETGF cmp) yes no) @@ -43948,6 +48329,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETGEF cmp) yes no) @@ -43961,6 +48343,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETEQF cmp) yes no) @@ -43974,6 +48357,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64EQF b.SetControl(cmp) + b.Aux = nil return true } // match: (If (SETNEF cmp) yes no) @@ -43987,6 +48371,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64NEF b.SetControl(cmp) + b.Aux = nil return true } // match: (If cond yes no) @@ -44001,6 +48386,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(cond) v0.AddArg(cond) b.SetControl(v0) + b.Aux = nil return true } case BlockAMD64LE: @@ -44015,6 +48401,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64GE b.SetControl(cmp) + b.Aux = nil return true } // match: (LE (FlagEQ) yes no) @@ -44027,6 +48414,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagLT_ULT) yes no) @@ -44039,6 +48427,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagLT_UGT) yes no) @@ -44051,6 +48440,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagGT_ULT) yes no) @@ -44063,6 +48453,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -44076,6 +48467,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -44091,6 +48483,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64GT b.SetControl(cmp) + b.Aux = nil return true } // match: (LT (FlagEQ) yes no) @@ -44103,6 +48496,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -44116,6 +48510,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LT (FlagLT_UGT) yes no) @@ -44128,6 +48523,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LT (FlagGT_ULT) yes no) @@ -44140,6 +48536,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -44153,6 +48550,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -44180,6 +48578,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64LT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no) @@ -44205,6 +48604,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64LT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) @@ -44230,6 +48630,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64LE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) @@ -44255,6 +48656,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64LE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) @@ -44280,6 +48682,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64GT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no) @@ -44305,6 +48708,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64GT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) @@ -44330,6 +48734,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64GE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) @@ -44355,6 +48760,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64GE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) @@ -44380,6 +48786,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) @@ -44405,6 +48812,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) @@ -44430,6 +48838,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64NE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) @@ -44455,6 +48864,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64NE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) @@ -44480,6 +48890,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64ULT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no) @@ -44505,6 +48916,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64ULT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) @@ -44530,6 +48942,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64ULE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) @@ -44555,6 +48968,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64ULE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) @@ -44580,6 +48994,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no) @@ -44605,6 +49020,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) @@ -44630,6 +49046,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) @@ -44655,6 +49072,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y)) @@ -44688,6 +49106,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(x) v0.AddArg(y) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (TESTL y (SHLL (MOVLconst [1]) x))) @@ -44721,6 +49140,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(x) v0.AddArg(y) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y)) @@ -44754,6 +49174,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(x) v0.AddArg(y) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (TESTQ y (SHLQ (MOVQconst [1]) x))) @@ -44787,6 +49208,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AddArg(x) v0.AddArg(y) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (TESTLconst [c] x)) @@ -44807,6 +49229,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AuxInt = log2(c) v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (TESTQconst [c] x)) @@ -44827,6 +49250,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AuxInt = log2(c) v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (TESTQ (MOVQconst [c]) x)) @@ -44852,6 +49276,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AuxInt = log2(c) v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (TESTQ x (MOVQconst [c]))) @@ -44877,6 +49302,7 @@ func rewriteBlockAMD64(b *Block) bool { v0.AuxInt = log2(c) v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) @@ -44902,6 +49328,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) @@ -44927,6 +49354,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) @@ -44952,6 +49380,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) @@ -44977,6 +49406,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) @@ -45002,6 +49432,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64EQF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) @@ -45027,6 +49458,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64EQF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) @@ -45052,6 +49484,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64NEF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) @@ -45077,6 +49510,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockAMD64NEF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (InvertFlags cmp) yes no) @@ -45090,6 +49524,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64NE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (FlagEQ) yes no) @@ -45102,6 +49537,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45115,6 +49551,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagLT_UGT) yes no) @@ -45127,6 +49564,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagGT_ULT) yes no) @@ -45139,6 +49577,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagGT_UGT) yes no) @@ -45151,6 +49590,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockAMD64UGE: @@ -45165,6 +49605,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64ULE b.SetControl(cmp) + b.Aux = nil return true } // match: (UGE (FlagEQ) yes no) @@ -45177,6 +49618,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGE (FlagLT_ULT) yes no) @@ -45189,6 +49631,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45202,6 +49645,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGE (FlagGT_ULT) yes no) @@ -45214,6 +49658,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45227,6 +49672,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockAMD64UGT: @@ -45241,6 +49687,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64ULT b.SetControl(cmp) + b.Aux = nil return true } // match: (UGT (FlagEQ) yes no) @@ -45253,6 +49700,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45266,6 +49714,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45279,6 +49728,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGT (FlagGT_ULT) yes no) @@ -45291,6 +49741,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45304,6 +49755,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockAMD64ULE: @@ -45318,6 +49770,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64UGE b.SetControl(cmp) + b.Aux = nil return true } // match: (ULE (FlagEQ) yes no) @@ -45330,6 +49783,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagLT_ULT) yes no) @@ -45342,6 +49796,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagLT_UGT) yes no) @@ -45354,6 +49809,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45367,6 +49823,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagGT_UGT) yes no) @@ -45379,6 +49836,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45394,6 +49852,7 @@ func rewriteBlockAMD64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockAMD64UGT b.SetControl(cmp) + b.Aux = nil return true } // match: (ULT (FlagEQ) yes no) @@ -45406,6 +49865,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45419,6 +49879,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULT (FlagLT_UGT) yes no) @@ -45431,6 +49892,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -45444,6 +49906,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULT (FlagGT_UGT) yes no) @@ -45456,6 +49919,7 @@ func rewriteBlockAMD64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 6bb8da5daa8..058ae72dde1 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -33,6 +33,10 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMADCshiftRLreg_0(v) case OpARMADD: return rewriteValueARM_OpARMADD_0(v) || rewriteValueARM_OpARMADD_10(v) + case OpARMADDD: + return rewriteValueARM_OpARMADDD_0(v) + case OpARMADDF: + return rewriteValueARM_OpARMADDF_0(v) case OpARMADDS: return rewriteValueARM_OpARMADDS_0(v) || rewriteValueARM_OpARMADDS_10(v) case OpARMADDSshiftLL: @@ -77,6 +81,10 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMANDshiftRL_0(v) case OpARMANDshiftRLreg: return rewriteValueARM_OpARMANDshiftRLreg_0(v) + case OpARMBFX: + return rewriteValueARM_OpARMBFX_0(v) + case OpARMBFXU: + return rewriteValueARM_OpARMBFXU_0(v) case OpARMBIC: return rewriteValueARM_OpARMBIC_0(v) case OpARMBICconst: @@ -93,6 +101,22 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMBICshiftRL_0(v) case OpARMBICshiftRLreg: return rewriteValueARM_OpARMBICshiftRLreg_0(v) + case OpARMCMN: + return rewriteValueARM_OpARMCMN_0(v) || rewriteValueARM_OpARMCMN_10(v) + case OpARMCMNconst: + return rewriteValueARM_OpARMCMNconst_0(v) + case OpARMCMNshiftLL: + return rewriteValueARM_OpARMCMNshiftLL_0(v) + case OpARMCMNshiftLLreg: + return rewriteValueARM_OpARMCMNshiftLLreg_0(v) + case OpARMCMNshiftRA: + return rewriteValueARM_OpARMCMNshiftRA_0(v) + case OpARMCMNshiftRAreg: + return rewriteValueARM_OpARMCMNshiftRAreg_0(v) + case OpARMCMNshiftRL: + return rewriteValueARM_OpARMCMNshiftRL_0(v) + case OpARMCMNshiftRLreg: + return rewriteValueARM_OpARMCMNshiftRLreg_0(v) case OpARMCMOVWHSconst: return rewriteValueARM_OpARMCMOVWHSconst_0(v) case OpARMCMOVWLSconst: @@ -137,14 +161,20 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMLessThanU_0(v) case OpARMMOVBUload: return rewriteValueARM_OpARMMOVBUload_0(v) + case OpARMMOVBUloadidx: + return rewriteValueARM_OpARMMOVBUloadidx_0(v) case OpARMMOVBUreg: return rewriteValueARM_OpARMMOVBUreg_0(v) case OpARMMOVBload: return rewriteValueARM_OpARMMOVBload_0(v) + case OpARMMOVBloadidx: + return rewriteValueARM_OpARMMOVBloadidx_0(v) case OpARMMOVBreg: return rewriteValueARM_OpARMMOVBreg_0(v) case OpARMMOVBstore: return rewriteValueARM_OpARMMOVBstore_0(v) + case OpARMMOVBstoreidx: + return rewriteValueARM_OpARMMOVBstoreidx_0(v) case OpARMMOVDload: return rewriteValueARM_OpARMMOVDload_0(v) case OpARMMOVDstore: @@ -155,14 +185,20 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMMOVFstore_0(v) case OpARMMOVHUload: return rewriteValueARM_OpARMMOVHUload_0(v) + case OpARMMOVHUloadidx: + return rewriteValueARM_OpARMMOVHUloadidx_0(v) case OpARMMOVHUreg: return rewriteValueARM_OpARMMOVHUreg_0(v) case OpARMMOVHload: return rewriteValueARM_OpARMMOVHload_0(v) + case OpARMMOVHloadidx: + return rewriteValueARM_OpARMMOVHloadidx_0(v) case OpARMMOVHreg: return rewriteValueARM_OpARMMOVHreg_0(v) case OpARMMOVHstore: return rewriteValueARM_OpARMMOVHstore_0(v) + case OpARMMOVHstoreidx: + return rewriteValueARM_OpARMMOVHstoreidx_0(v) case OpARMMOVWload: return rewriteValueARM_OpARMMOVWload_0(v) case OpARMMOVWloadidx: @@ -189,6 +225,12 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMMUL_0(v) || rewriteValueARM_OpARMMUL_10(v) || rewriteValueARM_OpARMMUL_20(v) case OpARMMULA: return rewriteValueARM_OpARMMULA_0(v) || rewriteValueARM_OpARMMULA_10(v) || rewriteValueARM_OpARMMULA_20(v) + case OpARMMULD: + return rewriteValueARM_OpARMMULD_0(v) + case OpARMMULF: + return rewriteValueARM_OpARMMULF_0(v) + case OpARMMULS: + return rewriteValueARM_OpARMMULS_0(v) || rewriteValueARM_OpARMMULS_10(v) || rewriteValueARM_OpARMMULS_20(v) case OpARMMVN: return rewriteValueARM_OpARMMVN_0(v) case OpARMMVNshiftLL: @@ -203,6 +245,14 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMMVNshiftRL_0(v) case OpARMMVNshiftRLreg: return rewriteValueARM_OpARMMVNshiftRLreg_0(v) + case OpARMNEGD: + return rewriteValueARM_OpARMNEGD_0(v) + case OpARMNEGF: + return rewriteValueARM_OpARMNEGF_0(v) + case OpARMNMULD: + return rewriteValueARM_OpARMNMULD_0(v) + case OpARMNMULF: + return rewriteValueARM_OpARMNMULF_0(v) case OpARMNotEqual: return rewriteValueARM_OpARMNotEqual_0(v) case OpARMOR: @@ -295,6 +345,10 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMSRLconst_0(v) case OpARMSUB: return rewriteValueARM_OpARMSUB_0(v) || rewriteValueARM_OpARMSUB_10(v) + case OpARMSUBD: + return rewriteValueARM_OpARMSUBD_0(v) + case OpARMSUBF: + return rewriteValueARM_OpARMSUBF_0(v) case OpARMSUBS: return rewriteValueARM_OpARMSUBS_0(v) || rewriteValueARM_OpARMSUBS_10(v) case OpARMSUBSshiftLL: @@ -323,6 +377,38 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpARMSUBshiftRL_0(v) case OpARMSUBshiftRLreg: return rewriteValueARM_OpARMSUBshiftRLreg_0(v) + case OpARMTEQ: + return rewriteValueARM_OpARMTEQ_0(v) || rewriteValueARM_OpARMTEQ_10(v) + case OpARMTEQconst: + return rewriteValueARM_OpARMTEQconst_0(v) + case OpARMTEQshiftLL: + return rewriteValueARM_OpARMTEQshiftLL_0(v) + case OpARMTEQshiftLLreg: + return rewriteValueARM_OpARMTEQshiftLLreg_0(v) + case OpARMTEQshiftRA: + return rewriteValueARM_OpARMTEQshiftRA_0(v) + case OpARMTEQshiftRAreg: + return rewriteValueARM_OpARMTEQshiftRAreg_0(v) + case OpARMTEQshiftRL: + return rewriteValueARM_OpARMTEQshiftRL_0(v) + case OpARMTEQshiftRLreg: + return rewriteValueARM_OpARMTEQshiftRLreg_0(v) + case OpARMTST: + return rewriteValueARM_OpARMTST_0(v) || rewriteValueARM_OpARMTST_10(v) + case OpARMTSTconst: + return rewriteValueARM_OpARMTSTconst_0(v) + case OpARMTSTshiftLL: + return rewriteValueARM_OpARMTSTshiftLL_0(v) + case OpARMTSTshiftLLreg: + return rewriteValueARM_OpARMTSTshiftLLreg_0(v) + case OpARMTSTshiftRA: + return rewriteValueARM_OpARMTSTshiftRA_0(v) + case OpARMTSTshiftRAreg: + return rewriteValueARM_OpARMTSTshiftRAreg_0(v) + case OpARMTSTshiftRL: + return rewriteValueARM_OpARMTSTshiftRL_0(v) + case OpARMTSTshiftRLreg: + return rewriteValueARM_OpARMTSTshiftRLreg_0(v) case OpARMXOR: return rewriteValueARM_OpARMXOR_0(v) || rewriteValueARM_OpARMXOR_10(v) case OpARMXORconst: @@ -465,6 +551,8 @@ func rewriteValueARM(v *Value) bool { return rewriteValueARM_OpGeq8_0(v) case OpGeq8U: return rewriteValueARM_OpGeq8U_0(v) + case OpGetCallerSP: + return rewriteValueARM_OpGetCallerSP_0(v) case OpGetClosurePtr: return rewriteValueARM_OpGetClosurePtr_0(v) case OpGreater16: @@ -1371,7 +1459,7 @@ func rewriteValueARM_OpARMADCshiftLL_0(v *Value) bool { } // match: (ADCshiftLL x (MOVWconst [c]) [d] flags) // cond: - // result: (ADCconst x [int64(uint32(c)<>uint64(d))] flags) + // result: (ADCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) for { d := v.AuxInt _ = v.Args[2] @@ -1569,7 +1657,7 @@ func rewriteValueARM_OpARMADCshiftRL_0(v *Value) bool { c := v_1.AuxInt flags := v.Args[2] v.reset(OpARMADCconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) v.AddArg(flags) return true @@ -2013,6 +2101,188 @@ func rewriteValueARM_OpARMADD_10(v *Value) bool { } return false } +func rewriteValueARM_OpARMADDD_0(v *Value) bool { + // match: (ADDD a (MULD x y)) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULAD a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMULD { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULAD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDD (MULD x y) a) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULAD a x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMULD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULAD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDD a (NMULD x y)) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULSD a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMNMULD { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULSD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDD (NMULD x y) a) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULSD a x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMNMULD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULSD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMADDF_0(v *Value) bool { + // match: (ADDF a (MULF x y)) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULAF a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMULF { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULAF) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDF (MULF x y) a) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULAF a x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMULF { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULAF) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDF a (NMULF x y)) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULSF a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMNMULF { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULSF) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADDF (NMULF x y) a) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULSF a x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMNMULF { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULSF) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} func rewriteValueARM_OpARMADDS_0(v *Value) bool { // match: (ADDS x (MOVWconst [c])) // cond: @@ -2298,7 +2568,7 @@ func rewriteValueARM_OpARMADDSshiftLL_0(v *Value) bool { } // match: (ADDSshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (ADDSconst x [int64(uint32(c)<>uint64(d))]) + // result: (ADDSconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -2479,7 +2749,7 @@ func rewriteValueARM_OpARMADDSshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMADDSconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -2663,7 +2933,7 @@ func rewriteValueARM_OpARMADDshiftLL_0(v *Value) bool { } // match: (ADDshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (ADDconst x [int64(uint32(c)<>uint64(d))]) + // result: (ADDconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -2866,7 +3136,7 @@ func rewriteValueARM_OpARMADDshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMADDconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -3382,7 +3652,7 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { } // match: (ANDconst [c] x) // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) - // result: (BICconst [int64(^uint32(c))] x) + // result: (BICconst [int64(int32(^uint32(c)))] x) for { c := v.AuxInt x := v.Args[0] @@ -3390,7 +3660,7 @@ func rewriteValueARM_OpARMANDconst_0(v *Value) bool { break } v.reset(OpARMBICconst) - v.AuxInt = int64(^uint32(c)) + v.AuxInt = int64(int32(^uint32(c))) v.AddArg(x) return true } @@ -3451,7 +3721,7 @@ func rewriteValueARM_OpARMANDshiftLL_0(v *Value) bool { } // match: (ANDshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (ANDconst x [int64(uint32(c)<>uint64(d))]) + // result: (ANDconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -3678,7 +3948,7 @@ func rewriteValueARM_OpARMANDshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMANDconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -3750,6 +4020,40 @@ func rewriteValueARM_OpARMANDshiftRLreg_0(v *Value) bool { } return false } +func rewriteValueARM_OpARMBFX_0(v *Value) bool { + // match: (BFX [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [int64(int32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8))) + return true + } + return false +} +func rewriteValueARM_OpARMBFXU_0(v *Value) bool { + // match: (BFXU [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [int64(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(uint32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8))) + return true + } + return false +} func rewriteValueARM_OpARMBIC_0(v *Value) bool { // match: (BIC x (MOVWconst [c])) // cond: @@ -3921,7 +4225,7 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { } // match: (BICconst [c] x) // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) - // result: (ANDconst [int64(^uint32(c))] x) + // result: (ANDconst [int64(int32(^uint32(c)))] x) for { c := v.AuxInt x := v.Args[0] @@ -3929,7 +4233,7 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { break } v.reset(OpARMANDconst) - v.AuxInt = int64(^uint32(c)) + v.AuxInt = int64(int32(^uint32(c))) v.AddArg(x) return true } @@ -3968,7 +4272,7 @@ func rewriteValueARM_OpARMBICconst_0(v *Value) bool { func rewriteValueARM_OpARMBICshiftLL_0(v *Value) bool { // match: (BICshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (BICconst x [int64(uint32(c)<>uint64(d))]) + // result: (BICconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -4105,7 +4409,7 @@ func rewriteValueARM_OpARMBICshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMBICconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -4154,6 +4458,642 @@ func rewriteValueARM_OpARMBICshiftRLreg_0(v *Value) bool { } return false } +func rewriteValueARM_OpARMCMN_0(v *Value) bool { + // match: (CMN x (MOVWconst [c])) + // cond: + // result: (CMNconst [c] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMCMNconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (CMN (MOVWconst [c]) x) + // cond: + // result: (CMNconst [c] x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMCMNconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (CMN x (SLLconst [c] y)) + // cond: + // result: (CMNshiftLL x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSLLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMN (SLLconst [c] y) x) + // cond: + // result: (CMNshiftLL x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMCMNshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMN x (SRLconst [c] y)) + // cond: + // result: (CMNshiftRL x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMN (SRLconst [c] y) x) + // cond: + // result: (CMNshiftRL x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMCMNshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMN x (SRAconst [c] y)) + // cond: + // result: (CMNshiftRA x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRAconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMCMNshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMN (SRAconst [c] y) x) + // cond: + // result: (CMNshiftRA x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRAconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMCMNshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMN x (SLL y z)) + // cond: + // result: (CMNshiftLLreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSLL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMCMNshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (CMN (SLL y z) x) + // cond: + // result: (CMNshiftLLreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMCMNshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + return false +} +func rewriteValueARM_OpARMCMN_10(v *Value) bool { + // match: (CMN x (SRL y z)) + // cond: + // result: (CMNshiftRLreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMCMNshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (CMN (SRL y z) x) + // cond: + // result: (CMNshiftRLreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMCMNshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (CMN x (SRA y z)) + // cond: + // result: (CMNshiftRAreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRA { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMCMNshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (CMN (SRA y z) x) + // cond: + // result: (CMNshiftRAreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRA { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMCMNshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (CMN x (RSBconst [0] y)) + // cond: + // result: (CMP x y) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMRSBconst { + break + } + if v_1.AuxInt != 0 { + break + } + y := v_1.Args[0] + v.reset(OpARMCMP) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (CMN (RSBconst [0] y) x) + // cond: + // result: (CMP x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMRSBconst { + break + } + if v_0.AuxInt != 0 { + break + } + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMCMP) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMCMNconst_0(v *Value) bool { + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)==int32(-y) + // result: (FlagEQ) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x) == int32(-y)) { + break + } + v.reset(OpARMFlagEQ) + return true + } + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)uint32(-y) + // result: (FlagLT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x) < int32(-y) && uint32(x) > uint32(-y)) { + break + } + v.reset(OpARMFlagLT_UGT) + return true + } + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)>int32(-y) && uint32(x) int32(-y) && uint32(x) < uint32(-y)) { + break + } + v.reset(OpARMFlagGT_ULT) + return true + } + // match: (CMNconst (MOVWconst [x]) [y]) + // cond: int32(x)>int32(-y) && uint32(x)>uint32(-y) + // result: (FlagGT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x) > int32(-y) && uint32(x) > uint32(-y)) { + break + } + v.reset(OpARMFlagGT_UGT) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftLL_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftLL (MOVWconst [c]) x [d]) + // cond: + // result: (CMNconst [c] (SLLconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftLL x (MOVWconst [c]) [d]) + // cond: + // result: (CMNconst x [int64(int32(uint32(c)< x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (CMNshiftLLreg x y (MOVWconst [c])) + // cond: + // result: (CMNshiftLL x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMCMNshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftRA_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftRA (MOVWconst [c]) x [d]) + // cond: + // result: (CMNconst [c] (SRAconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftRA x (MOVWconst [c]) [d]) + // cond: + // result: (CMNconst x [int64(int32(c)>>uint64(d))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMCMNconst) + v.AuxInt = int64(int32(c) >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftRAreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftRAreg (MOVWconst [c]) x y) + // cond: + // result: (CMNconst [c] (SRA x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (CMNshiftRAreg x y (MOVWconst [c])) + // cond: + // result: (CMNshiftRA x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMCMNshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftRL_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftRL (MOVWconst [c]) x [d]) + // cond: + // result: (CMNconst [c] (SRLconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (CMNshiftRL x (MOVWconst [c]) [d]) + // cond: + // result: (CMNconst x [int64(int32(uint32(c)>>uint64(d)))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMCMNconst) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMCMNshiftRLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (CMNshiftRLreg (MOVWconst [c]) x y) + // cond: + // result: (CMNconst [c] (SRL x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMCMNconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (CMNshiftRLreg x y (MOVWconst [c])) + // cond: + // result: (CMNshiftRL x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMCMNshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} func rewriteValueARM_OpARMCMOVWHSconst_0(v *Value) bool { // match: (CMOVWHSconst _ (FlagEQ) [c]) // cond: @@ -4616,6 +5556,25 @@ func rewriteValueARM_OpARMCMP_10(v *Value) bool { v.AddArg(v0) return true } + // match: (CMP x (RSBconst [0] y)) + // cond: + // result: (CMN x y) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMRSBconst { + break + } + if v_1.AuxInt != 0 { + break + } + y := v_1.Args[0] + v.reset(OpARMCMN) + v.AddArg(x) + v.AddArg(y) + return true + } return false } func rewriteValueARM_OpARMCMPD_0(v *Value) bool { @@ -4830,7 +5789,7 @@ func rewriteValueARM_OpARMCMPshiftLL_0(v *Value) bool { } // match: (CMPshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (CMPconst x [int64(uint32(c)<>uint64(d))]) + // result: (CMPconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -5019,7 +5978,7 @@ func rewriteValueARM_OpARMCMPshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMCMPconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -5755,6 +6714,10 @@ func rewriteValueARM_OpARMLessThanU_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: // result: (MOVBUload [off1+off2] {sym} ptr mem) @@ -5846,6 +6809,95 @@ func rewriteValueARM_OpARMMOVBUload_0(v *Value) bool { v.AddArg(x) return true } + // match: (MOVBUload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil && !config.nacl + // result: (MOVBUloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVBUloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBUloadidx_0(v *Value) bool { + // match: (MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVBUreg x) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVBstoreidx { + break + } + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVBUreg) + v.AddArg(x) + return true + } + // match: (MOVBUloadidx ptr (MOVWconst [c]) mem) + // cond: + // result: (MOVBUload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARMMOVBUload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBUloadidx (MOVWconst [c]) ptr mem) + // cond: + // result: (MOVBUload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVBUload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } return false } func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { @@ -5905,6 +6957,10 @@ func rewriteValueARM_OpARMMOVBUreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: // result: (MOVBload [off1+off2] {sym} ptr mem) @@ -5996,6 +7052,95 @@ func rewriteValueARM_OpARMMOVBload_0(v *Value) bool { v.AddArg(x) return true } + // match: (MOVBload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil && !config.nacl + // result: (MOVBloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVBloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBloadidx_0(v *Value) bool { + // match: (MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVBreg x) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVBstoreidx { + break + } + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVBreg) + v.AddArg(x) + return true + } + // match: (MOVBloadidx ptr (MOVWconst [c]) mem) + // cond: + // result: (MOVBload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARMMOVBload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVBloadidx (MOVWconst [c]) ptr mem) + // cond: + // result: (MOVBload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVBload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } return false } func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { @@ -6058,6 +7203,10 @@ func rewriteValueARM_OpARMMOVBreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: // result: (MOVBstore [off1+off2] {sym} ptr val mem) @@ -6219,6 +7368,77 @@ func rewriteValueARM_OpARMMOVBstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem) + // cond: sym == nil && !config.nacl + // result: (MOVBstoreidx ptr idx val mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVBstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVBstoreidx_0(v *Value) bool { + // match: (MOVBstoreidx ptr (MOVWconst [c]) val mem) + // cond: + // result: (MOVBstore [c] ptr val mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARMMOVBstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem) + // cond: + // result: (MOVBstore [c] ptr val mem) + for { + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARMMOVBstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } return false } func rewriteValueARM_OpARMMOVDload_0(v *Value) bool { @@ -6564,6 +7784,10 @@ func rewriteValueARM_OpARMMOVFstore_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: // result: (MOVHUload [off1+off2] {sym} ptr mem) @@ -6655,6 +7879,95 @@ func rewriteValueARM_OpARMMOVHUload_0(v *Value) bool { v.AddArg(x) return true } + // match: (MOVHUload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil && !config.nacl + // result: (MOVHUloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVHUloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHUloadidx_0(v *Value) bool { + // match: (MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVHUreg x) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVHstoreidx { + break + } + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVHUreg) + v.AddArg(x) + return true + } + // match: (MOVHUloadidx ptr (MOVWconst [c]) mem) + // cond: + // result: (MOVHUload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARMMOVHUload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHUloadidx (MOVWconst [c]) ptr mem) + // cond: + // result: (MOVHUload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVHUload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } return false } func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { @@ -6739,6 +8052,10 @@ func rewriteValueARM_OpARMMOVHUreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: // result: (MOVHload [off1+off2] {sym} ptr mem) @@ -6830,6 +8147,95 @@ func rewriteValueARM_OpARMMOVHload_0(v *Value) bool { v.AddArg(x) return true } + // match: (MOVHload [0] {sym} (ADD ptr idx) mem) + // cond: sym == nil && !config.nacl + // result: (MOVHloadidx ptr idx mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVHloadidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHloadidx_0(v *Value) bool { + // match: (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) + // cond: isSamePtr(ptr, ptr2) + // result: (MOVHreg x) + for { + _ = v.Args[2] + ptr := v.Args[0] + idx := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVHstoreidx { + break + } + _ = v_2.Args[3] + ptr2 := v_2.Args[0] + if idx != v_2.Args[1] { + break + } + x := v_2.Args[2] + if !(isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpARMMOVHreg) + v.AddArg(x) + return true + } + // match: (MOVHloadidx ptr (MOVWconst [c]) mem) + // cond: + // result: (MOVHload [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + v.reset(OpARMMOVHload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHloadidx (MOVWconst [c]) ptr mem) + // cond: + // result: (MOVHload [c] ptr mem) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVHload) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } return false } func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { @@ -6942,6 +8348,10 @@ func rewriteValueARM_OpARMMOVHreg_0(v *Value) bool { return false } func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: // result: (MOVHstore [off1+off2] {sym} ptr val mem) @@ -7059,6 +8469,77 @@ func rewriteValueARM_OpARMMOVHstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem) + // cond: sym == nil && !config.nacl + // result: (MOVHstoreidx ptr idx val mem) + for { + if v.AuxInt != 0 { + break + } + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + val := v.Args[1] + mem := v.Args[2] + if !(sym == nil && !config.nacl) { + break + } + v.reset(OpARMMOVHstoreidx) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpARMMOVHstoreidx_0(v *Value) bool { + // match: (MOVHstoreidx ptr (MOVWconst [c]) val mem) + // cond: + // result: (MOVHstore [c] ptr val mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARMMOVHstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem) + // cond: + // result: (MOVHstore [c] ptr val mem) + for { + _ = v.Args[3] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + ptr := v.Args[1] + val := v.Args[2] + mem := v.Args[3] + v.reset(OpARMMOVHstore) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } return false } func rewriteValueARM_OpARMMOVWload_0(v *Value) bool { @@ -9037,6 +10518,594 @@ func rewriteValueARM_OpARMMULA_20(v *Value) bool { } return false } +func rewriteValueARM_OpARMMULD_0(v *Value) bool { + // match: (MULD (NEGD x) y) + // cond: objabi.GOARM >= 6 + // result: (NMULD x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMNEGD { + break + } + x := v_0.Args[0] + y := v.Args[1] + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMNMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MULD y (NEGD x)) + // cond: objabi.GOARM >= 6 + // result: (NMULD x y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMNEGD { + break + } + x := v_1.Args[0] + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMNMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMMULF_0(v *Value) bool { + // match: (MULF (NEGF x) y) + // cond: objabi.GOARM >= 6 + // result: (NMULF x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMNEGF { + break + } + x := v_0.Args[0] + y := v.Args[1] + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMNMULF) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (MULF y (NEGF x)) + // cond: objabi.GOARM >= 6 + // result: (NMULF x y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMNEGF { + break + } + x := v_1.Args[0] + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMNMULF) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMMULS_0(v *Value) bool { + b := v.Block + _ = b + // match: (MULS x (MOVWconst [c]) a) + // cond: int32(c) == -1 + // result: (ADD a x) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + a := v.Args[2] + if !(int32(c) == -1) { + break + } + v.reset(OpARMADD) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MULS _ (MOVWconst [0]) a) + // cond: + // result: a + for { + _ = v.Args[2] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + if v_1.AuxInt != 0 { + break + } + a := v.Args[2] + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) + return true + } + // match: (MULS x (MOVWconst [1]) a) + // cond: + // result: (RSB x a) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + if v_1.AuxInt != 1 { + break + } + a := v.Args[2] + v.reset(OpARMRSB) + v.AddArg(x) + v.AddArg(a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: isPowerOfTwo(c) + // result: (RSB (SLLconst [log2(c)] x) a) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + a := v.Args[2] + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (RSB (ADDshiftLL x x [log2(c-1)]) a) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + a := v.Args[2] + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (RSB (RSBshiftLL x x [log2(c+1)]) a) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + a := v.Args[2] + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (RSB (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) a) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + a := v.Args[2] + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c / 3) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = 1 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (RSB (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) a) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + a := v.Args[2] + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c / 5) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = 2 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (RSB (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + a := v.Args[2] + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c / 7) + v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS x (MOVWconst [c]) a) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (RSB (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) + for { + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + a := v.Args[2] + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c / 9) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM_OpARMMULS_10(v *Value) bool { + b := v.Block + _ = b + // match: (MULS (MOVWconst [c]) x a) + // cond: int32(c) == -1 + // result: (ADD a x) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(int32(c) == -1) { + break + } + v.reset(OpARMADD) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MULS (MOVWconst [0]) _ a) + // cond: + // result: a + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + if v_0.AuxInt != 0 { + break + } + a := v.Args[2] + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) + return true + } + // match: (MULS (MOVWconst [1]) x a) + // cond: + // result: (RSB x a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + if v_0.AuxInt != 1 { + break + } + x := v.Args[1] + a := v.Args[2] + v.reset(OpARMRSB) + v.AddArg(x) + v.AddArg(a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: isPowerOfTwo(c) + // result: (RSB (SLLconst [log2(c)] x) a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: isPowerOfTwo(c-1) && int32(c) >= 3 + // result: (RSB (ADDshiftLL x x [log2(c-1)]) a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(isPowerOfTwo(c-1) && int32(c) >= 3) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v0.AuxInt = log2(c - 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: isPowerOfTwo(c+1) && int32(c) >= 7 + // result: (RSB (RSBshiftLL x x [log2(c+1)]) a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(isPowerOfTwo(c+1) && int32(c) >= 7) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v0.AuxInt = log2(c + 1) + v0.AddArg(x) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) + // result: (RSB (SLLconst [log2(c/3)] (ADDshiftLL x x [1])) a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c / 3) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = 1 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) + // result: (RSB (SLLconst [log2(c/5)] (ADDshiftLL x x [2])) a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c / 5) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = 2 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) + // result: (RSB (SLLconst [log2(c/7)] (RSBshiftLL x x [3])) a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c / 7) + v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULS (MOVWconst [c]) x a) + // cond: c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) + // result: (RSB (SLLconst [log2(c/9)] (ADDshiftLL x x [3])) a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c)) { + break + } + v.reset(OpARMRSB) + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c / 9) + v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type) + v1.AuxInt = 3 + v1.AddArg(x) + v1.AddArg(x) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM_OpARMMULS_20(v *Value) bool { + // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a) + // cond: + // result: (SUBconst [int64(int32(c*d))] a) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + d := v_1.AuxInt + a := v.Args[2] + v.reset(OpARMSUBconst) + v.AuxInt = int64(int32(c * d)) + v.AddArg(a) + return true + } + return false +} func rewriteValueARM_OpARMMVN_0(v *Value) bool { // match: (MVN (MOVWconst [c])) // cond: @@ -9254,6 +11323,120 @@ func rewriteValueARM_OpARMMVNshiftRLreg_0(v *Value) bool { } return false } +func rewriteValueARM_OpARMNEGD_0(v *Value) bool { + // match: (NEGD (MULD x y)) + // cond: objabi.GOARM >= 6 + // result: (NMULD x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMULD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMNMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMNEGF_0(v *Value) bool { + // match: (NEGF (MULF x y)) + // cond: objabi.GOARM >= 6 + // result: (NMULF x y) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMULF { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + if !(objabi.GOARM >= 6) { + break + } + v.reset(OpARMNMULF) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMNMULD_0(v *Value) bool { + // match: (NMULD (NEGD x) y) + // cond: + // result: (MULD x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMNEGD { + break + } + x := v_0.Args[0] + y := v.Args[1] + v.reset(OpARMMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (NMULD y (NEGD x)) + // cond: + // result: (MULD x y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMNEGD { + break + } + x := v_1.Args[0] + v.reset(OpARMMULD) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMNMULF_0(v *Value) bool { + // match: (NMULF (NEGF x) y) + // cond: + // result: (MULF x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMNEGF { + break + } + x := v_0.Args[0] + y := v.Args[1] + v.reset(OpARMMULF) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (NMULF y (NEGF x)) + // cond: + // result: (MULF x y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMNEGF { + break + } + x := v_1.Args[0] + v.reset(OpARMMULF) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} func rewriteValueARM_OpARMNotEqual_0(v *Value) bool { // match: (NotEqual (FlagEQ)) // cond: @@ -9687,7 +11870,7 @@ func rewriteValueARM_OpARMORshiftLL_0(v *Value) bool { } // match: (ORshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (ORconst x [int64(uint32(c)<>uint64(d))]) + // result: (ORconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -9936,7 +12119,7 @@ func rewriteValueARM_OpARMORshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMORconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -10301,6 +12484,28 @@ func rewriteValueARM_OpARMRSB_10(v *Value) bool { v.AuxInt = 0 return true } + // match: (RSB (MUL x y) a) + // cond: objabi.GOARM == 7 + // result: (MULS x y a) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMUL { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + a := v.Args[1] + if !(objabi.GOARM == 7) { + break + } + v.reset(OpARMMULS) + v.AddArg(x) + v.AddArg(y) + v.AddArg(a) + return true + } return false } func rewriteValueARM_OpARMRSBSshiftLL_0(v *Value) bool { @@ -10328,7 +12533,7 @@ func rewriteValueARM_OpARMRSBSshiftLL_0(v *Value) bool { } // match: (RSBSshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (RSBSconst x [int64(uint32(c)<>uint64(d))]) + // result: (RSBSconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -10509,7 +12714,7 @@ func rewriteValueARM_OpARMRSBSshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMRSBSconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -10648,7 +12853,7 @@ func rewriteValueARM_OpARMRSBshiftLL_0(v *Value) bool { } // match: (RSBshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (RSBconst x [int64(uint32(c)<>uint64(d))]) + // result: (RSBconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -10873,7 +13078,7 @@ func rewriteValueARM_OpARMRSBshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMRSBconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -11012,7 +13217,7 @@ func rewriteValueARM_OpARMRSCshiftLL_0(v *Value) bool { } // match: (RSCshiftLL x (MOVWconst [c]) [d] flags) // cond: - // result: (RSCconst x [int64(uint32(c)<>uint64(d))] flags) + // result: (RSCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) for { d := v.AuxInt _ = v.Args[2] @@ -11210,7 +13415,7 @@ func rewriteValueARM_OpARMRSCshiftRL_0(v *Value) bool { c := v_1.AuxInt flags := v.Args[2] v.reset(OpARMRSCconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) v.AddArg(flags) return true @@ -11620,7 +13825,7 @@ func rewriteValueARM_OpARMSBCshiftLL_0(v *Value) bool { } // match: (SBCshiftLL x (MOVWconst [c]) [d] flags) // cond: - // result: (SBCconst x [int64(uint32(c)<>uint64(d))] flags) + // result: (SBCconst x [int64(int32(uint32(c)>>uint64(d)))] flags) for { d := v.AuxInt _ = v.Args[2] @@ -11818,7 +14023,7 @@ func rewriteValueARM_OpARMSBCshiftRL_0(v *Value) bool { c := v_1.AuxInt flags := v.Args[2] v.reset(OpARMSBCconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) v.AddArg(flags) return true @@ -12022,6 +14227,25 @@ func rewriteValueARM_OpARMSRAconst_0(v *Value) bool { v.AuxInt = int64(int32(d) >> uint64(c)) return true } + // match: (SRAconst (SLLconst x [c]) [d]) + // cond: objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 + // result: (BFX [(d-c)|(32-d)<<8] x) + for { + d := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(objabi.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { + break + } + v.reset(OpARMBFX) + v.AuxInt = (d - c) | (32-d)<<8 + v.AddArg(x) + return true + } return false } func rewriteValueARM_OpARMSRL_0(v *Value) bool { @@ -12058,6 +14282,25 @@ func rewriteValueARM_OpARMSRLconst_0(v *Value) bool { v.AuxInt = int64(uint32(d) >> uint64(c)) return true } + // match: (SRLconst (SLLconst x [c]) [d]) + // cond: objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 + // result: (BFXU [(d-c)|(32-d)<<8] x) + for { + d := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(objabi.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { + break + } + v.reset(OpARMBFXU) + v.AuxInt = (d - c) | (32-d)<<8 + v.AddArg(x) + return true + } return false } func rewriteValueARM_OpARMSUB_0(v *Value) bool { @@ -12331,6 +14574,122 @@ func rewriteValueARM_OpARMSUB_10(v *Value) bool { v.AuxInt = 0 return true } + // match: (SUB a (MUL x y)) + // cond: objabi.GOARM == 7 + // result: (MULS x y a) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMUL { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(objabi.GOARM == 7) { + break + } + v.reset(OpARMMULS) + v.AddArg(x) + v.AddArg(y) + v.AddArg(a) + return true + } + return false +} +func rewriteValueARM_OpARMSUBD_0(v *Value) bool { + // match: (SUBD a (MULD x y)) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULSD a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMULD { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULSD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (SUBD a (NMULD x y)) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULAD a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMNMULD { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULAD) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMSUBF_0(v *Value) bool { + // match: (SUBF a (MULF x y)) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULSF a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMULF { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULSF) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (SUBF a (NMULF x y)) + // cond: a.Uses == 1 && objabi.GOARM >= 6 + // result: (MULAF a x y) + for { + _ = v.Args[1] + a := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMNMULF { + break + } + _ = v_1.Args[1] + x := v_1.Args[0] + y := v_1.Args[1] + if !(a.Uses == 1 && objabi.GOARM >= 6) { + break + } + v.reset(OpARMMULAF) + v.AddArg(a) + v.AddArg(x) + v.AddArg(y) + return true + } return false } func rewriteValueARM_OpARMSUBS_0(v *Value) bool { @@ -12602,7 +14961,7 @@ func rewriteValueARM_OpARMSUBSshiftLL_0(v *Value) bool { } // match: (SUBSshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (SUBSconst x [int64(uint32(c)<>uint64(d))]) + // result: (SUBSconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -12783,7 +15142,7 @@ func rewriteValueARM_OpARMSUBSshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMSUBSconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -12967,7 +15326,7 @@ func rewriteValueARM_OpARMSUBshiftLL_0(v *Value) bool { } // match: (SUBshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (SUBconst x [int64(uint32(c)<>uint64(d))]) + // result: (SUBconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -13192,7 +15551,7 @@ func rewriteValueARM_OpARMSUBshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMSUBconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -13263,6 +15622,1138 @@ func rewriteValueARM_OpARMSUBshiftRLreg_0(v *Value) bool { } return false } +func rewriteValueARM_OpARMTEQ_0(v *Value) bool { + // match: (TEQ x (MOVWconst [c])) + // cond: + // result: (TEQconst [c] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTEQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (TEQ (MOVWconst [c]) x) + // cond: + // result: (TEQconst [c] x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTEQconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (TEQ x (SLLconst [c] y)) + // cond: + // result: (TEQshiftLL x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSLLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTEQshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TEQ (SLLconst [c] y) x) + // cond: + // result: (TEQshiftLL x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTEQshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TEQ x (SRLconst [c] y)) + // cond: + // result: (TEQshiftRL x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTEQshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TEQ (SRLconst [c] y) x) + // cond: + // result: (TEQshiftRL x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTEQshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TEQ x (SRAconst [c] y)) + // cond: + // result: (TEQshiftRA x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRAconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTEQshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TEQ (SRAconst [c] y) x) + // cond: + // result: (TEQshiftRA x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRAconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTEQshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TEQ x (SLL y z)) + // cond: + // result: (TEQshiftLLreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSLL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTEQshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (TEQ (SLL y z) x) + // cond: + // result: (TEQshiftLLreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTEQshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + return false +} +func rewriteValueARM_OpARMTEQ_10(v *Value) bool { + // match: (TEQ x (SRL y z)) + // cond: + // result: (TEQshiftRLreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTEQshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (TEQ (SRL y z) x) + // cond: + // result: (TEQshiftRLreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTEQshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (TEQ x (SRA y z)) + // cond: + // result: (TEQshiftRAreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRA { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTEQshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (TEQ (SRA y z) x) + // cond: + // result: (TEQshiftRAreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRA { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTEQshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + return false +} +func rewriteValueARM_OpARMTEQconst_0(v *Value) bool { + // match: (TEQconst (MOVWconst [x]) [y]) + // cond: int32(x^y)==0 + // result: (FlagEQ) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x^y) == 0) { + break + } + v.reset(OpARMFlagEQ) + return true + } + // match: (TEQconst (MOVWconst [x]) [y]) + // cond: int32(x^y)<0 + // result: (FlagLT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x^y) < 0) { + break + } + v.reset(OpARMFlagLT_UGT) + return true + } + // match: (TEQconst (MOVWconst [x]) [y]) + // cond: int32(x^y)>0 + // result: (FlagGT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x^y) > 0) { + break + } + v.reset(OpARMFlagGT_UGT) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftLL_0(v *Value) bool { + b := v.Block + _ = b + // match: (TEQshiftLL (MOVWconst [c]) x [d]) + // cond: + // result: (TEQconst [c] (SLLconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTEQconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TEQshiftLL x (MOVWconst [c]) [d]) + // cond: + // result: (TEQconst x [int64(int32(uint32(c)< x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTEQconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (TEQshiftLLreg x y (MOVWconst [c])) + // cond: + // result: (TEQshiftLL x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTEQshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftRA_0(v *Value) bool { + b := v.Block + _ = b + // match: (TEQshiftRA (MOVWconst [c]) x [d]) + // cond: + // result: (TEQconst [c] (SRAconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTEQconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TEQshiftRA x (MOVWconst [c]) [d]) + // cond: + // result: (TEQconst x [int64(int32(c)>>uint64(d))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTEQconst) + v.AuxInt = int64(int32(c) >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftRAreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (TEQshiftRAreg (MOVWconst [c]) x y) + // cond: + // result: (TEQconst [c] (SRA x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTEQconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (TEQshiftRAreg x y (MOVWconst [c])) + // cond: + // result: (TEQshiftRA x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTEQshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftRL_0(v *Value) bool { + b := v.Block + _ = b + // match: (TEQshiftRL (MOVWconst [c]) x [d]) + // cond: + // result: (TEQconst [c] (SRLconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTEQconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TEQshiftRL x (MOVWconst [c]) [d]) + // cond: + // result: (TEQconst x [int64(int32(uint32(c)>>uint64(d)))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTEQconst) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMTEQshiftRLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (TEQshiftRLreg (MOVWconst [c]) x y) + // cond: + // result: (TEQconst [c] (SRL x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTEQconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (TEQshiftRLreg x y (MOVWconst [c])) + // cond: + // result: (TEQshiftRL x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTEQshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMTST_0(v *Value) bool { + // match: (TST x (MOVWconst [c])) + // cond: + // result: (TSTconst [c] x) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTSTconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (TST (MOVWconst [c]) x) + // cond: + // result: (TSTconst [c] x) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTSTconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (TST x (SLLconst [c] y)) + // cond: + // result: (TSTshiftLL x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSLLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTSTshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TST (SLLconst [c] y) x) + // cond: + // result: (TSTshiftLL x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTSTshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TST x (SRLconst [c] y)) + // cond: + // result: (TSTshiftRL x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRLconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTSTshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TST (SRLconst [c] y) x) + // cond: + // result: (TSTshiftRL x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRLconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTSTshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TST x (SRAconst [c] y)) + // cond: + // result: (TSTshiftRA x y [c]) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRAconst { + break + } + c := v_1.AuxInt + y := v_1.Args[0] + v.reset(OpARMTSTshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TST (SRAconst [c] y) x) + // cond: + // result: (TSTshiftRA x y [c]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRAconst { + break + } + c := v_0.AuxInt + y := v_0.Args[0] + x := v.Args[1] + v.reset(OpARMTSTshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (TST x (SLL y z)) + // cond: + // result: (TSTshiftLLreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSLL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTSTshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (TST (SLL y z) x) + // cond: + // result: (TSTshiftLLreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSLL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTSTshiftLLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + return false +} +func rewriteValueARM_OpARMTST_10(v *Value) bool { + // match: (TST x (SRL y z)) + // cond: + // result: (TSTshiftRLreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRL { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTSTshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (TST (SRL y z) x) + // cond: + // result: (TSTshiftRLreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRL { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTSTshiftRLreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (TST x (SRA y z)) + // cond: + // result: (TSTshiftRAreg x y z) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMSRA { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + z := v_1.Args[1] + v.reset(OpARMTSTshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + // match: (TST (SRA y z) x) + // cond: + // result: (TSTshiftRAreg x y z) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMSRA { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + z := v_0.Args[1] + x := v.Args[1] + v.reset(OpARMTSTshiftRAreg) + v.AddArg(x) + v.AddArg(y) + v.AddArg(z) + return true + } + return false +} +func rewriteValueARM_OpARMTSTconst_0(v *Value) bool { + // match: (TSTconst (MOVWconst [x]) [y]) + // cond: int32(x&y)==0 + // result: (FlagEQ) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) == 0) { + break + } + v.reset(OpARMFlagEQ) + return true + } + // match: (TSTconst (MOVWconst [x]) [y]) + // cond: int32(x&y)<0 + // result: (FlagLT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) < 0) { + break + } + v.reset(OpARMFlagLT_UGT) + return true + } + // match: (TSTconst (MOVWconst [x]) [y]) + // cond: int32(x&y)>0 + // result: (FlagGT_UGT) + for { + y := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + if !(int32(x&y) > 0) { + break + } + v.reset(OpARMFlagGT_UGT) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftLL_0(v *Value) bool { + b := v.Block + _ = b + // match: (TSTshiftLL (MOVWconst [c]) x [d]) + // cond: + // result: (TSTconst [c] (SLLconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftLL x (MOVWconst [c]) [d]) + // cond: + // result: (TSTconst x [int64(int32(uint32(c)< x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (TSTshiftLLreg x y (MOVWconst [c])) + // cond: + // result: (TSTshiftLL x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTSTshiftLL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftRA_0(v *Value) bool { + b := v.Block + _ = b + // match: (TSTshiftRA (MOVWconst [c]) x [d]) + // cond: + // result: (TSTconst [c] (SRAconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftRA x (MOVWconst [c]) [d]) + // cond: + // result: (TSTconst x [int64(int32(c)>>uint64(d))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTSTconst) + v.AuxInt = int64(int32(c) >> uint64(d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftRAreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (TSTshiftRAreg (MOVWconst [c]) x y) + // cond: + // result: (TSTconst [c] (SRA x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (TSTshiftRAreg x y (MOVWconst [c])) + // cond: + // result: (TSTshiftRA x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTSTshiftRA) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftRL_0(v *Value) bool { + b := v.Block + _ = b + // match: (TSTshiftRL (MOVWconst [c]) x [d]) + // cond: + // result: (TSTconst [c] (SRLconst x [d])) + for { + d := v.AuxInt + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type) + v0.AuxInt = d + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (TSTshiftRL x (MOVWconst [c]) [d]) + // cond: + // result: (TSTconst x [int64(int32(uint32(c)>>uint64(d)))]) + for { + d := v.AuxInt + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMTSTconst) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMTSTshiftRLreg_0(v *Value) bool { + b := v.Block + _ = b + // match: (TSTshiftRLreg (MOVWconst [c]) x y) + // cond: + // result: (TSTconst [c] (SRL x y)) + for { + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + y := v.Args[2] + v.reset(OpARMTSTconst) + v.AuxInt = c + v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (TSTshiftRLreg x y (MOVWconst [c])) + // cond: + // result: (TSTshiftRL x y [c]) + for { + _ = v.Args[2] + x := v.Args[0] + y := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpARMMOVWconst { + break + } + c := v_2.AuxInt + v.reset(OpARMTSTshiftRL) + v.AuxInt = c + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} func rewriteValueARM_OpARMXOR_0(v *Value) bool { // match: (XOR x (MOVWconst [c])) // cond: @@ -13643,7 +17134,7 @@ func rewriteValueARM_OpARMXORshiftLL_0(v *Value) bool { } // match: (XORshiftLL x (MOVWconst [c]) [d]) // cond: - // result: (XORconst x [int64(uint32(c)<>uint64(d))]) + // result: (XORconst x [int64(int32(uint32(c)>>uint64(d)))]) for { d := v.AuxInt _ = v.Args[1] @@ -13890,7 +17381,7 @@ func rewriteValueARM_OpARMXORshiftRL_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpARMXORconst) - v.AuxInt = int64(uint32(c) >> uint64(d)) + v.AuxInt = int64(int32(uint32(c) >> uint64(d))) v.AddArg(x) return true } @@ -14008,7 +17499,7 @@ func rewriteValueARM_OpARMXORshiftRR_0(v *Value) bool { } // match: (XORshiftRR x (MOVWconst [c]) [d]) // cond: - // result: (XORconst x [int64(uint32(c)>>uint64(d)|uint32(c)<>uint64(d)|uint32(c)<>uint64(d) | uint32(c)<>uint64(d) | uint32(c)< x y) // cond: - // result: (CSELULT (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSELULT (SLL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -12622,7 +12799,7 @@ func rewriteValueARM64_OpLsh16x16_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12641,7 +12818,7 @@ func rewriteValueARM64_OpLsh16x32_0(v *Value) bool { _ = typ // match: (Lsh16x32 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSELULT (SLL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -12654,7 +12831,7 @@ func rewriteValueARM64_OpLsh16x32_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12669,45 +12846,9 @@ func rewriteValueARM64_OpLsh16x32_0(v *Value) bool { func rewriteValueARM64_OpLsh16x64_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh16x64 x (MOVDconst [c])) - // cond: uint64(c) < 16 - // result: (SLLconst x [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh16x64 _ (MOVDconst [c])) - // cond: uint64(c) >= 16 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 16) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (Lsh16x64 x y) // cond: - // result: (CSELULT (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSELULT (SLL x y) (MOVDconst [0]) (CMPconst [64] y)) for { t := v.Type _ = v.Args[1] @@ -12718,7 +12859,7 @@ func rewriteValueARM64_OpLsh16x64_0(v *Value) bool { v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v1.AuxInt = 0 v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12735,7 +12876,7 @@ func rewriteValueARM64_OpLsh16x8_0(v *Value) bool { _ = typ // match: (Lsh16x8 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSELULT (SLL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -12748,7 +12889,7 @@ func rewriteValueARM64_OpLsh16x8_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12767,7 +12908,7 @@ func rewriteValueARM64_OpLsh32x16_0(v *Value) bool { _ = typ // match: (Lsh32x16 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSELULT (SLL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -12780,7 +12921,7 @@ func rewriteValueARM64_OpLsh32x16_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12799,7 +12940,7 @@ func rewriteValueARM64_OpLsh32x32_0(v *Value) bool { _ = typ // match: (Lsh32x32 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSELULT (SLL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -12812,7 +12953,7 @@ func rewriteValueARM64_OpLsh32x32_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12827,45 +12968,9 @@ func rewriteValueARM64_OpLsh32x32_0(v *Value) bool { func rewriteValueARM64_OpLsh32x64_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh32x64 x (MOVDconst [c])) - // cond: uint64(c) < 32 - // result: (SLLconst x [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh32x64 _ (MOVDconst [c])) - // cond: uint64(c) >= 32 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 32) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (Lsh32x64 x y) // cond: - // result: (CSELULT (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSELULT (SLL x y) (MOVDconst [0]) (CMPconst [64] y)) for { t := v.Type _ = v.Args[1] @@ -12876,7 +12981,7 @@ func rewriteValueARM64_OpLsh32x64_0(v *Value) bool { v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v1.AuxInt = 0 v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12893,7 +12998,7 @@ func rewriteValueARM64_OpLsh32x8_0(v *Value) bool { _ = typ // match: (Lsh32x8 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSELULT (SLL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -12906,7 +13011,7 @@ func rewriteValueARM64_OpLsh32x8_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12925,7 +13030,7 @@ func rewriteValueARM64_OpLsh64x16_0(v *Value) bool { _ = typ // match: (Lsh64x16 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSELULT (SLL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -12938,7 +13043,7 @@ func rewriteValueARM64_OpLsh64x16_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12957,7 +13062,7 @@ func rewriteValueARM64_OpLsh64x32_0(v *Value) bool { _ = typ // match: (Lsh64x32 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSELULT (SLL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -12970,7 +13075,7 @@ func rewriteValueARM64_OpLsh64x32_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -12985,45 +13090,9 @@ func rewriteValueARM64_OpLsh64x32_0(v *Value) bool { func rewriteValueARM64_OpLsh64x64_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh64x64 x (MOVDconst [c])) - // cond: uint64(c) < 64 - // result: (SLLconst x [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh64x64 _ (MOVDconst [c])) - // cond: uint64(c) >= 64 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 64) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (Lsh64x64 x y) // cond: - // result: (CSELULT (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSELULT (SLL x y) (MOVDconst [0]) (CMPconst [64] y)) for { t := v.Type _ = v.Args[1] @@ -13034,7 +13103,7 @@ func rewriteValueARM64_OpLsh64x64_0(v *Value) bool { v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v1.AuxInt = 0 v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -13051,7 +13120,7 @@ func rewriteValueARM64_OpLsh64x8_0(v *Value) bool { _ = typ // match: (Lsh64x8 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSELULT (SLL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -13064,7 +13133,7 @@ func rewriteValueARM64_OpLsh64x8_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -13083,7 +13152,7 @@ func rewriteValueARM64_OpLsh8x16_0(v *Value) bool { _ = typ // match: (Lsh8x16 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSELULT (SLL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -13096,7 +13165,7 @@ func rewriteValueARM64_OpLsh8x16_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -13115,7 +13184,7 @@ func rewriteValueARM64_OpLsh8x32_0(v *Value) bool { _ = typ // match: (Lsh8x32 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSELULT (SLL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -13128,7 +13197,7 @@ func rewriteValueARM64_OpLsh8x32_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -13143,45 +13212,9 @@ func rewriteValueARM64_OpLsh8x32_0(v *Value) bool { func rewriteValueARM64_OpLsh8x64_0(v *Value) bool { b := v.Block _ = b - // match: (Lsh8x64 x (MOVDconst [c])) - // cond: uint64(c) < 8 - // result: (SLLconst x [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpARM64SLLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Lsh8x64 _ (MOVDconst [c])) - // cond: uint64(c) >= 8 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 8) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (Lsh8x64 x y) // cond: - // result: (CSELULT (SLL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSELULT (SLL x y) (MOVDconst [0]) (CMPconst [64] y)) for { t := v.Type _ = v.Args[1] @@ -13192,7 +13225,7 @@ func rewriteValueARM64_OpLsh8x64_0(v *Value) bool { v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v1.AuxInt = 0 v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -13209,7 +13242,7 @@ func rewriteValueARM64_OpLsh8x8_0(v *Value) bool { _ = typ // match: (Lsh8x8 x y) // cond: - // result: (CSELULT (SLL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSELULT (SLL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -13222,7 +13255,7 @@ func rewriteValueARM64_OpLsh8x8_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14242,7 +14275,7 @@ func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool { _ = typ // match: (Rsh16Ux16 x y) // cond: - // result: (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -14257,7 +14290,7 @@ func rewriteValueARM64_OpRsh16Ux16_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14276,7 +14309,7 @@ func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool { _ = typ // match: (Rsh16Ux32 x y) // cond: - // result: (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -14291,7 +14324,7 @@ func rewriteValueARM64_OpRsh16Ux32_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14308,47 +14341,9 @@ func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool { _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh16Ux64 x (MOVDconst [c])) - // cond: uint64(c) < 16 - // result: (SRLconst (ZeroExt16to64 x) [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpARM64SRLconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh16Ux64 _ (MOVDconst [c])) - // cond: uint64(c) >= 16 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 16) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (Rsh16Ux64 x y) // cond: - // result: (CSELULT (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSELULT (SRL (ZeroExt16to64 x) y) (MOVDconst [0]) (CMPconst [64] y)) for { t := v.Type _ = v.Args[1] @@ -14361,7 +14356,7 @@ func rewriteValueARM64_OpRsh16Ux64_0(v *Value) bool { v0.AddArg(v1) v0.AddArg(y) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14378,7 +14373,7 @@ func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool { _ = typ // match: (Rsh16Ux8 x y) // cond: - // result: (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSELULT (SRL (ZeroExt16to64 x) (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -14393,7 +14388,7 @@ func rewriteValueARM64_OpRsh16Ux8_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14412,7 +14407,7 @@ func rewriteValueARM64_OpRsh16x16_0(v *Value) bool { _ = typ // match: (Rsh16x16 x y) // cond: - // result: (SRA (SignExt16to64 x) (CSELULT (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SRA (SignExt16to64 x) (CSELULT (ZeroExt16to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -14425,7 +14420,7 @@ func rewriteValueARM64_OpRsh16x16_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14445,7 +14440,7 @@ func rewriteValueARM64_OpRsh16x32_0(v *Value) bool { _ = typ // match: (Rsh16x32 x y) // cond: - // result: (SRA (SignExt16to64 x) (CSELULT (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (SRA (SignExt16to64 x) (CSELULT (ZeroExt32to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -14458,7 +14453,7 @@ func rewriteValueARM64_OpRsh16x32_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14476,51 +14471,9 @@ func rewriteValueARM64_OpRsh16x64_0(v *Value) bool { _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh16x64 x (MOVDconst [c])) - // cond: uint64(c) < 16 - // result: (SRAconst (SignExt16to64 x) [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 16) { - break - } - v.reset(OpARM64SRAconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh16x64 x (MOVDconst [c])) - // cond: uint64(c) >= 16 - // result: (SRAconst (SignExt16to64 x) [63]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 16) { - break - } - v.reset(OpARM64SRAconst) - v.AuxInt = 63 - v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh16x64 x y) // cond: - // result: (SRA (SignExt16to64 x) (CSELULT y (Const64 [63]) (CMPconst [64] y))) + // result: (SRA (SignExt16to64 x) (CSELULT y (MOVDconst [63]) (CMPconst [64] y))) for { _ = v.Args[1] x := v.Args[0] @@ -14531,7 +14484,7 @@ func rewriteValueARM64_OpRsh16x64_0(v *Value) bool { v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type) v1.AddArg(y) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v2.AuxInt = 63 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14549,7 +14502,7 @@ func rewriteValueARM64_OpRsh16x8_0(v *Value) bool { _ = typ // match: (Rsh16x8 x y) // cond: - // result: (SRA (SignExt16to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (SRA (SignExt16to64 x) (CSELULT (ZeroExt8to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -14562,7 +14515,7 @@ func rewriteValueARM64_OpRsh16x8_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14582,7 +14535,7 @@ func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool { _ = typ // match: (Rsh32Ux16 x y) // cond: - // result: (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -14597,7 +14550,7 @@ func rewriteValueARM64_OpRsh32Ux16_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14616,7 +14569,7 @@ func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool { _ = typ // match: (Rsh32Ux32 x y) // cond: - // result: (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -14631,7 +14584,7 @@ func rewriteValueARM64_OpRsh32Ux32_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14648,47 +14601,9 @@ func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool { _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh32Ux64 x (MOVDconst [c])) - // cond: uint64(c) < 32 - // result: (SRLconst (ZeroExt32to64 x) [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpARM64SRLconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh32Ux64 _ (MOVDconst [c])) - // cond: uint64(c) >= 32 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 32) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (Rsh32Ux64 x y) // cond: - // result: (CSELULT (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSELULT (SRL (ZeroExt32to64 x) y) (MOVDconst [0]) (CMPconst [64] y)) for { t := v.Type _ = v.Args[1] @@ -14701,7 +14616,7 @@ func rewriteValueARM64_OpRsh32Ux64_0(v *Value) bool { v0.AddArg(v1) v0.AddArg(y) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14718,7 +14633,7 @@ func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool { _ = typ // match: (Rsh32Ux8 x y) // cond: - // result: (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSELULT (SRL (ZeroExt32to64 x) (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -14733,7 +14648,7 @@ func rewriteValueARM64_OpRsh32Ux8_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14752,7 +14667,7 @@ func rewriteValueARM64_OpRsh32x16_0(v *Value) bool { _ = typ // match: (Rsh32x16 x y) // cond: - // result: (SRA (SignExt32to64 x) (CSELULT (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SRA (SignExt32to64 x) (CSELULT (ZeroExt16to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -14765,7 +14680,7 @@ func rewriteValueARM64_OpRsh32x16_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14785,7 +14700,7 @@ func rewriteValueARM64_OpRsh32x32_0(v *Value) bool { _ = typ // match: (Rsh32x32 x y) // cond: - // result: (SRA (SignExt32to64 x) (CSELULT (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (SRA (SignExt32to64 x) (CSELULT (ZeroExt32to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -14798,7 +14713,7 @@ func rewriteValueARM64_OpRsh32x32_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14816,51 +14731,9 @@ func rewriteValueARM64_OpRsh32x64_0(v *Value) bool { _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh32x64 x (MOVDconst [c])) - // cond: uint64(c) < 32 - // result: (SRAconst (SignExt32to64 x) [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { - break - } - v.reset(OpARM64SRAconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh32x64 x (MOVDconst [c])) - // cond: uint64(c) >= 32 - // result: (SRAconst (SignExt32to64 x) [63]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 32) { - break - } - v.reset(OpARM64SRAconst) - v.AuxInt = 63 - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh32x64 x y) // cond: - // result: (SRA (SignExt32to64 x) (CSELULT y (Const64 [63]) (CMPconst [64] y))) + // result: (SRA (SignExt32to64 x) (CSELULT y (MOVDconst [63]) (CMPconst [64] y))) for { _ = v.Args[1] x := v.Args[0] @@ -14871,7 +14744,7 @@ func rewriteValueARM64_OpRsh32x64_0(v *Value) bool { v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type) v1.AddArg(y) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v2.AuxInt = 63 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14889,7 +14762,7 @@ func rewriteValueARM64_OpRsh32x8_0(v *Value) bool { _ = typ // match: (Rsh32x8 x y) // cond: - // result: (SRA (SignExt32to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (SRA (SignExt32to64 x) (CSELULT (ZeroExt8to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -14902,7 +14775,7 @@ func rewriteValueARM64_OpRsh32x8_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14922,7 +14795,7 @@ func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool { _ = typ // match: (Rsh64Ux16 x y) // cond: - // result: (CSELULT (SRL x (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSELULT (SRL x (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -14935,7 +14808,7 @@ func rewriteValueARM64_OpRsh64Ux16_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14954,7 +14827,7 @@ func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool { _ = typ // match: (Rsh64Ux32 x y) // cond: - // result: (CSELULT (SRL x (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSELULT (SRL x (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -14967,7 +14840,7 @@ func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -14982,45 +14855,9 @@ func rewriteValueARM64_OpRsh64Ux32_0(v *Value) bool { func rewriteValueARM64_OpRsh64Ux64_0(v *Value) bool { b := v.Block _ = b - // match: (Rsh64Ux64 x (MOVDconst [c])) - // cond: uint64(c) < 64 - // result: (SRLconst x [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpARM64SRLconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh64Ux64 _ (MOVDconst [c])) - // cond: uint64(c) >= 64 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 64) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (Rsh64Ux64 x y) // cond: - // result: (CSELULT (SRL x y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSELULT (SRL x y) (MOVDconst [0]) (CMPconst [64] y)) for { t := v.Type _ = v.Args[1] @@ -15031,7 +14868,7 @@ func rewriteValueARM64_OpRsh64Ux64_0(v *Value) bool { v0.AddArg(x) v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpConst64, t) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v1.AuxInt = 0 v.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15048,7 +14885,7 @@ func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool { _ = typ // match: (Rsh64Ux8 x y) // cond: - // result: (CSELULT (SRL x (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSELULT (SRL x (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -15061,7 +14898,7 @@ func rewriteValueARM64_OpRsh64Ux8_0(v *Value) bool { v1.AddArg(y) v0.AddArg(v1) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15080,7 +14917,7 @@ func rewriteValueARM64_OpRsh64x16_0(v *Value) bool { _ = typ // match: (Rsh64x16 x y) // cond: - // result: (SRA x (CSELULT (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SRA x (CSELULT (ZeroExt16to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -15091,7 +14928,7 @@ func rewriteValueARM64_OpRsh64x16_0(v *Value) bool { v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v1.AddArg(y) v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v2.AuxInt = 63 v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15111,7 +14948,7 @@ func rewriteValueARM64_OpRsh64x32_0(v *Value) bool { _ = typ // match: (Rsh64x32 x y) // cond: - // result: (SRA x (CSELULT (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (SRA x (CSELULT (ZeroExt32to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -15122,7 +14959,7 @@ func rewriteValueARM64_OpRsh64x32_0(v *Value) bool { v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v1.AddArg(y) v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v2.AuxInt = 63 v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15138,47 +14975,9 @@ func rewriteValueARM64_OpRsh64x32_0(v *Value) bool { func rewriteValueARM64_OpRsh64x64_0(v *Value) bool { b := v.Block _ = b - // match: (Rsh64x64 x (MOVDconst [c])) - // cond: uint64(c) < 64 - // result: (SRAconst x [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 64) { - break - } - v.reset(OpARM64SRAconst) - v.AuxInt = c - v.AddArg(x) - return true - } - // match: (Rsh64x64 x (MOVDconst [c])) - // cond: uint64(c) >= 64 - // result: (SRAconst x [63]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 64) { - break - } - v.reset(OpARM64SRAconst) - v.AuxInt = 63 - v.AddArg(x) - return true - } // match: (Rsh64x64 x y) // cond: - // result: (SRA x (CSELULT y (Const64 [63]) (CMPconst [64] y))) + // result: (SRA x (CSELULT y (MOVDconst [63]) (CMPconst [64] y))) for { _ = v.Args[1] x := v.Args[0] @@ -15187,7 +14986,7 @@ func rewriteValueARM64_OpRsh64x64_0(v *Value) bool { v.AddArg(x) v0 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type) v0.AddArg(y) - v1 := b.NewValue0(v.Pos, OpConst64, y.Type) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v1.AuxInt = 63 v0.AddArg(v1) v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15205,7 +15004,7 @@ func rewriteValueARM64_OpRsh64x8_0(v *Value) bool { _ = typ // match: (Rsh64x8 x y) // cond: - // result: (SRA x (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (SRA x (CSELULT (ZeroExt8to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -15216,7 +15015,7 @@ func rewriteValueARM64_OpRsh64x8_0(v *Value) bool { v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v1.AddArg(y) v0.AddArg(v1) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v2.AuxInt = 63 v0.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15236,7 +15035,7 @@ func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool { _ = typ // match: (Rsh8Ux16 x y) // cond: - // result: (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt16to64 y))) + // result: (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt16to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -15251,7 +15050,7 @@ func rewriteValueARM64_OpRsh8Ux16_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15270,7 +15069,7 @@ func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool { _ = typ // match: (Rsh8Ux32 x y) // cond: - // result: (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt32to64 y))) + // result: (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt32to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -15285,7 +15084,7 @@ func rewriteValueARM64_OpRsh8Ux32_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15302,47 +15101,9 @@ func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool { _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh8Ux64 x (MOVDconst [c])) - // cond: uint64(c) < 8 - // result: (SRLconst (ZeroExt8to64 x) [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpARM64SRLconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh8Ux64 _ (MOVDconst [c])) - // cond: uint64(c) >= 8 - // result: (MOVDconst [0]) - for { - _ = v.Args[1] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 8) { - break - } - v.reset(OpARM64MOVDconst) - v.AuxInt = 0 - return true - } // match: (Rsh8Ux64 x y) // cond: - // result: (CSELULT (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) + // result: (CSELULT (SRL (ZeroExt8to64 x) y) (MOVDconst [0]) (CMPconst [64] y)) for { t := v.Type _ = v.Args[1] @@ -15355,7 +15116,7 @@ func rewriteValueARM64_OpRsh8Ux64_0(v *Value) bool { v0.AddArg(v1) v0.AddArg(y) v.AddArg(v0) - v2 := b.NewValue0(v.Pos, OpConst64, t) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v2.AuxInt = 0 v.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15372,7 +15133,7 @@ func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool { _ = typ // match: (Rsh8Ux8 x y) // cond: - // result: (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 [0]) (CMPconst [64] (ZeroExt8to64 y))) + // result: (CSELULT (SRL (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVDconst [0]) (CMPconst [64] (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -15387,7 +15148,7 @@ func rewriteValueARM64_OpRsh8Ux8_0(v *Value) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) - v3 := b.NewValue0(v.Pos, OpConst64, t) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, t) v3.AuxInt = 0 v.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15406,7 +15167,7 @@ func rewriteValueARM64_OpRsh8x16_0(v *Value) bool { _ = typ // match: (Rsh8x16 x y) // cond: - // result: (SRA (SignExt8to64 x) (CSELULT (ZeroExt16to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt16to64 y)))) + // result: (SRA (SignExt8to64 x) (CSELULT (ZeroExt16to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt16to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -15419,7 +15180,7 @@ func rewriteValueARM64_OpRsh8x16_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15439,7 +15200,7 @@ func rewriteValueARM64_OpRsh8x32_0(v *Value) bool { _ = typ // match: (Rsh8x32 x y) // cond: - // result: (SRA (SignExt8to64 x) (CSELULT (ZeroExt32to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt32to64 y)))) + // result: (SRA (SignExt8to64 x) (CSELULT (ZeroExt32to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt32to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -15452,7 +15213,7 @@ func rewriteValueARM64_OpRsh8x32_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15470,51 +15231,9 @@ func rewriteValueARM64_OpRsh8x64_0(v *Value) bool { _ = b typ := &b.Func.Config.Types _ = typ - // match: (Rsh8x64 x (MOVDconst [c])) - // cond: uint64(c) < 8 - // result: (SRAconst (SignExt8to64 x) [c]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { - break - } - v.reset(OpARM64SRAconst) - v.AuxInt = c - v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - return true - } - // match: (Rsh8x64 x (MOVDconst [c])) - // cond: uint64(c) >= 8 - // result: (SRAconst (SignExt8to64 x) [63]) - for { - _ = v.Args[1] - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpARM64MOVDconst { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 8) { - break - } - v.reset(OpARM64SRAconst) - v.AuxInt = 63 - v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) - v0.AddArg(x) - v.AddArg(v0) - return true - } // match: (Rsh8x64 x y) // cond: - // result: (SRA (SignExt8to64 x) (CSELULT y (Const64 [63]) (CMPconst [64] y))) + // result: (SRA (SignExt8to64 x) (CSELULT y (MOVDconst [63]) (CMPconst [64] y))) for { _ = v.Args[1] x := v.Args[0] @@ -15525,7 +15244,7 @@ func rewriteValueARM64_OpRsh8x64_0(v *Value) bool { v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpARM64CSELULT, y.Type) v1.AddArg(y) - v2 := b.NewValue0(v.Pos, OpConst64, y.Type) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v2.AuxInt = 63 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -15543,7 +15262,7 @@ func rewriteValueARM64_OpRsh8x8_0(v *Value) bool { _ = typ // match: (Rsh8x8 x y) // cond: - // result: (SRA (SignExt8to64 x) (CSELULT (ZeroExt8to64 y) (Const64 [63]) (CMPconst [64] (ZeroExt8to64 y)))) + // result: (SRA (SignExt8to64 x) (CSELULT (ZeroExt8to64 y) (MOVDconst [63]) (CMPconst [64] (ZeroExt8to64 y)))) for { _ = v.Args[1] x := v.Args[0] @@ -15556,7 +15275,7 @@ func rewriteValueARM64_OpRsh8x8_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v2.AddArg(y) v1.AddArg(v2) - v3 := b.NewValue0(v.Pos, OpConst64, y.Type) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, y.Type) v3.AuxInt = 63 v1.AddArg(v3) v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags) @@ -16213,6 +15932,95 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { v.AddArg(v1) return true } + // match: (Zero [9] ptr mem) + // cond: + // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if v.AuxInt != 9 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 8 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + return false +} +func rewriteValueARM64_OpZero_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (Zero [10] ptr mem) + // cond: + // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + for { + if v.AuxInt != 10 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVHstore) + v.AuxInt = 8 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [11] ptr mem) + // cond: + // result: (MOVBstore [10] ptr (MOVDconst [0]) (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + for { + if v.AuxInt != 11 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 10 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AuxInt = 8 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } // match: (Zero [12] ptr mem) // cond: // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) @@ -16238,57 +16046,23 @@ func rewriteValueARM64_OpZero_0(v *Value) bool { v.AddArg(v1) return true } - return false -} -func rewriteValueARM64_OpZero_10(v *Value) bool { - b := v.Block - _ = b - config := b.Func.Config - _ = config - typ := &b.Func.Config.Types - _ = typ - // match: (Zero [16] ptr mem) + // match: (Zero [13] ptr mem) // cond: - // result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)) + // result: (MOVBstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) for { - if v.AuxInt != 16 { + if v.AuxInt != 13 { break } _ = v.Args[1] ptr := v.Args[0] mem := v.Args[1] - v.reset(OpARM64MOVDstore) - v.AuxInt = 8 + v.reset(OpARM64MOVBstore) + v.AuxInt = 12 v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) v0.AuxInt = 0 v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) - v1.AddArg(ptr) - v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Zero [24] ptr mem) - // cond: - // result: (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) - for { - if v.AuxInt != 24 { - break - } - _ = v.Args[1] - ptr := v.Args[0] - mem := v.Args[1] - v.reset(OpARM64MOVDstore) - v.AuxInt = 16 - v.AddArg(ptr) - v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) - v0.AuxInt = 0 - v.AddArg(v0) - v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) v1.AuxInt = 8 v1.AddArg(ptr) v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) @@ -16304,62 +16078,288 @@ func rewriteValueARM64_OpZero_10(v *Value) bool { v.AddArg(v1) return true } + // match: (Zero [14] ptr mem) + // cond: + // result: (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))) + for { + if v.AuxInt != 14 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVHstore) + v.AuxInt = 12 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v1.AuxInt = 8 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [15] ptr mem) + // cond: + // result: (MOVBstore [14] ptr (MOVDconst [0]) (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))) + for { + if v.AuxInt != 15 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64MOVBstore) + v.AuxInt = 14 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem) + v1.AuxInt = 12 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem) + v3.AuxInt = 8 + v3.AddArg(ptr) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v3.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem) + v5.AddArg(ptr) + v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v6.AuxInt = 0 + v5.AddArg(v6) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [16] ptr mem) + // cond: + // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) + for { + if v.AuxInt != 16 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64STP) + v.AuxInt = 0 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = 0 + v.AddArg(v1) + v.AddArg(mem) + return true + } + // match: (Zero [32] ptr mem) + // cond: + // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) + for { + if v.AuxInt != 32 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64STP) + v.AuxInt = 16 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v2.AuxInt = 0 + v2.AddArg(ptr) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v3.AuxInt = 0 + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v2.AddArg(v4) + v2.AddArg(mem) + v.AddArg(v2) + return true + } + // match: (Zero [48] ptr mem) + // cond: + // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) + for { + if v.AuxInt != 48 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64STP) + v.AuxInt = 32 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v2.AuxInt = 16 + v2.AddArg(ptr) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v3.AuxInt = 0 + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v2.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v5.AuxInt = 0 + v5.AddArg(ptr) + v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v6.AuxInt = 0 + v5.AddArg(v6) + v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v7.AuxInt = 0 + v5.AddArg(v7) + v5.AddArg(mem) + v2.AddArg(v5) + v.AddArg(v2) + return true + } + // match: (Zero [64] ptr mem) + // cond: + // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) + for { + if v.AuxInt != 64 { + break + } + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpARM64STP) + v.AuxInt = 48 + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v1.AuxInt = 0 + v.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v2.AuxInt = 32 + v2.AddArg(ptr) + v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v3.AuxInt = 0 + v2.AddArg(v3) + v4 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v4.AuxInt = 0 + v2.AddArg(v4) + v5 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v5.AuxInt = 16 + v5.AddArg(ptr) + v6 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v6.AuxInt = 0 + v5.AddArg(v6) + v7 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v7.AuxInt = 0 + v5.AddArg(v7) + v8 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem) + v8.AuxInt = 0 + v8.AddArg(ptr) + v9 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v9.AuxInt = 0 + v8.AddArg(v9) + v10 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64) + v10.AuxInt = 0 + v8.AddArg(v10) + v8.AddArg(mem) + v5.AddArg(v8) + v2.AddArg(v5) + v.AddArg(v2) + return true + } + return false +} +func rewriteValueARM64_OpZero_20(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (Zero [s] ptr mem) - // cond: s%8 != 0 && s > 8 - // result: (Zero [s%8] (OffPtr ptr [s-s%8]) (Zero [s-s%8] ptr mem)) + // cond: s%16 != 0 && s > 16 + // result: (Zero [s-s%16] (OffPtr ptr [s%16]) (Zero [s%16] ptr mem)) for { s := v.AuxInt _ = v.Args[1] ptr := v.Args[0] mem := v.Args[1] - if !(s%8 != 0 && s > 8) { + if !(s%16 != 0 && s > 16) { break } v.reset(OpZero) - v.AuxInt = s % 8 + v.AuxInt = s - s%16 v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type) - v0.AuxInt = s - s%8 + v0.AuxInt = s % 16 v0.AddArg(ptr) v.AddArg(v0) v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem) - v1.AuxInt = s - s%8 + v1.AuxInt = s % 16 v1.AddArg(ptr) v1.AddArg(mem) v.AddArg(v1) return true } // match: (Zero [s] ptr mem) - // cond: s%8 == 0 && s > 24 && s <= 8*128 && !config.noDuffDevice - // result: (DUFFZERO [4 * (128 - int64(s/8))] ptr mem) + // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice + // result: (DUFFZERO [4 * (64 - int64(s/16))] ptr mem) for { s := v.AuxInt _ = v.Args[1] ptr := v.Args[0] mem := v.Args[1] - if !(s%8 == 0 && s > 24 && s <= 8*128 && !config.noDuffDevice) { + if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) { break } v.reset(OpARM64DUFFZERO) - v.AuxInt = 4 * (128 - int64(s/8)) + v.AuxInt = 4 * (64 - int64(s/16)) v.AddArg(ptr) v.AddArg(mem) return true } // match: (Zero [s] ptr mem) - // cond: s%8 == 0 && (s > 8*128 || config.noDuffDevice) - // result: (LoweredZero ptr (ADDconst [s-8] ptr) mem) + // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice) + // result: (LoweredZero ptr (ADDconst [s-16] ptr) mem) for { s := v.AuxInt _ = v.Args[1] ptr := v.Args[0] mem := v.Args[1] - if !(s%8 == 0 && (s > 8*128 || config.noDuffDevice)) { + if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) { break } v.reset(OpARM64LoweredZero) v.AddArg(ptr) v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type) - v0.AuxInt = s - 8 + v0.AuxInt = s - 16 v0.AddArg(ptr) v.AddArg(v0) v.AddArg(mem) @@ -16456,6 +16456,7 @@ func rewriteBlockARM64(b *Block) bool { x := v.Args[0] b.Kind = BlockARM64Z b.SetControl(x) + b.Aux = nil return true } // match: (EQ (CMPWconst [0] x) yes no) @@ -16472,6 +16473,7 @@ func rewriteBlockARM64(b *Block) bool { x := v.Args[0] b.Kind = BlockARM64ZW b.SetControl(x) + b.Aux = nil return true } // match: (EQ (FlagEQ) yes no) @@ -16484,6 +16486,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (EQ (FlagLT_ULT) yes no) @@ -16496,6 +16499,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16509,6 +16513,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16522,6 +16527,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16535,6 +16541,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16549,6 +16556,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64EQ b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64GE: @@ -16562,6 +16570,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (FlagLT_ULT) yes no) @@ -16574,6 +16583,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16587,6 +16597,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16600,6 +16611,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (FlagGT_UGT) yes no) @@ -16612,6 +16624,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (InvertFlags cmp) yes no) @@ -16625,6 +16638,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64LE b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64GT: @@ -16638,6 +16652,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16651,6 +16666,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16664,6 +16680,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16677,6 +16694,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GT (FlagGT_UGT) yes no) @@ -16689,6 +16707,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GT (InvertFlags cmp) yes no) @@ -16702,6 +16721,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64LT b.SetControl(cmp) + b.Aux = nil return true } case BlockIf: @@ -16716,6 +16736,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64EQ b.SetControl(cc) + b.Aux = nil return true } // match: (If (NotEqual cc) yes no) @@ -16729,6 +16750,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64NE b.SetControl(cc) + b.Aux = nil return true } // match: (If (LessThan cc) yes no) @@ -16742,6 +16764,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64LT b.SetControl(cc) + b.Aux = nil return true } // match: (If (LessThanU cc) yes no) @@ -16755,6 +16778,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64ULT b.SetControl(cc) + b.Aux = nil return true } // match: (If (LessEqual cc) yes no) @@ -16768,6 +16792,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64LE b.SetControl(cc) + b.Aux = nil return true } // match: (If (LessEqualU cc) yes no) @@ -16781,6 +16806,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64ULE b.SetControl(cc) + b.Aux = nil return true } // match: (If (GreaterThan cc) yes no) @@ -16794,6 +16820,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64GT b.SetControl(cc) + b.Aux = nil return true } // match: (If (GreaterThanU cc) yes no) @@ -16807,6 +16834,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64UGT b.SetControl(cc) + b.Aux = nil return true } // match: (If (GreaterEqual cc) yes no) @@ -16820,6 +16848,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64GE b.SetControl(cc) + b.Aux = nil return true } // match: (If (GreaterEqualU cc) yes no) @@ -16833,6 +16862,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64UGE b.SetControl(cc) + b.Aux = nil return true } // match: (If cond yes no) @@ -16844,6 +16874,7 @@ func rewriteBlockARM64(b *Block) bool { cond := b.Control b.Kind = BlockARM64NZ b.SetControl(cond) + b.Aux = nil return true } case BlockARM64LE: @@ -16857,6 +16888,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagLT_ULT) yes no) @@ -16869,6 +16901,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagLT_UGT) yes no) @@ -16881,6 +16914,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagGT_ULT) yes no) @@ -16893,6 +16927,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16906,6 +16941,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16920,6 +16956,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64GE b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64LT: @@ -16933,6 +16970,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16946,6 +16984,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LT (FlagLT_UGT) yes no) @@ -16958,6 +16997,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LT (FlagGT_ULT) yes no) @@ -16970,6 +17010,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16983,6 +17024,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -16997,6 +17039,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64GT b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64NE: @@ -17014,6 +17057,7 @@ func rewriteBlockARM64(b *Block) bool { x := v.Args[0] b.Kind = BlockARM64NZ b.SetControl(x) + b.Aux = nil return true } // match: (NE (CMPWconst [0] x) yes no) @@ -17030,6 +17074,7 @@ func rewriteBlockARM64(b *Block) bool { x := v.Args[0] b.Kind = BlockARM64NZW b.SetControl(x) + b.Aux = nil return true } // match: (NE (FlagEQ) yes no) @@ -17042,6 +17087,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17055,6 +17101,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagLT_UGT) yes no) @@ -17067,6 +17114,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagGT_ULT) yes no) @@ -17079,6 +17127,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagGT_UGT) yes no) @@ -17091,6 +17140,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (InvertFlags cmp) yes no) @@ -17104,6 +17154,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64NE b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64NZ: @@ -17118,6 +17169,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64EQ b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (NotEqual cc) yes no) @@ -17131,6 +17183,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64NE b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (LessThan cc) yes no) @@ -17144,6 +17197,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64LT b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (LessThanU cc) yes no) @@ -17157,6 +17211,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64ULT b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (LessEqual cc) yes no) @@ -17170,6 +17225,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64LE b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (LessEqualU cc) yes no) @@ -17183,6 +17239,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64ULE b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (GreaterThan cc) yes no) @@ -17196,6 +17253,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64GT b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (GreaterThanU cc) yes no) @@ -17209,6 +17267,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64UGT b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (GreaterEqual cc) yes no) @@ -17222,6 +17281,7 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64GE b.SetControl(cc) + b.Aux = nil return true } // match: (NZ (GreaterEqualU cc) yes no) @@ -17235,6 +17295,25 @@ func rewriteBlockARM64(b *Block) bool { cc := v.Args[0] b.Kind = BlockARM64UGE b.SetControl(cc) + b.Aux = nil + return true + } + // match: (NZ (ANDconst [c] x) yes no) + // cond: oneBit(c) + // result: (TBNZ {ntz(c)} x yes no) + for { + v := b.Control + if v.Op != OpARM64ANDconst { + break + } + c := v.AuxInt + x := v.Args[0] + if !(oneBit(c)) { + break + } + b.Kind = BlockARM64TBNZ + b.SetControl(x) + b.Aux = ntz(c) return true } // match: (NZ (MOVDconst [0]) yes no) @@ -17250,6 +17329,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17267,9 +17347,28 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockARM64NZW: + // match: (NZW (ANDconst [c] x) yes no) + // cond: oneBit(int64(uint32(c))) + // result: (TBNZ {ntz(int64(uint32(c)))} x yes no) + for { + v := b.Control + if v.Op != OpARM64ANDconst { + break + } + c := v.AuxInt + x := v.Args[0] + if !(oneBit(int64(uint32(c)))) { + break + } + b.Kind = BlockARM64TBNZ + b.SetControl(x) + b.Aux = ntz(int64(uint32(c))) + return true + } // match: (NZW (MOVDconst [c]) yes no) // cond: int32(c) == 0 // result: (First nil no yes) @@ -17284,6 +17383,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17301,6 +17401,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockARM64UGE: @@ -17314,6 +17415,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGE (FlagLT_ULT) yes no) @@ -17326,6 +17428,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17339,6 +17442,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGE (FlagGT_ULT) yes no) @@ -17351,6 +17455,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17364,6 +17469,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGE (InvertFlags cmp) yes no) @@ -17377,6 +17483,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64ULE b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64UGT: @@ -17390,6 +17497,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17403,6 +17511,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17416,6 +17525,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGT (FlagGT_ULT) yes no) @@ -17428,6 +17538,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17441,6 +17552,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (UGT (InvertFlags cmp) yes no) @@ -17454,6 +17566,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64ULT b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64ULE: @@ -17467,6 +17580,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagLT_ULT) yes no) @@ -17479,6 +17593,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagLT_UGT) yes no) @@ -17491,6 +17606,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17504,6 +17620,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULE (FlagGT_UGT) yes no) @@ -17516,6 +17633,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17530,6 +17648,7 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64UGE b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64ULT: @@ -17543,6 +17662,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17556,6 +17676,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULT (FlagLT_UGT) yes no) @@ -17568,6 +17689,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17581,6 +17703,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ULT (FlagGT_UGT) yes no) @@ -17593,6 +17716,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -17607,9 +17731,28 @@ func rewriteBlockARM64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockARM64UGT b.SetControl(cmp) + b.Aux = nil return true } case BlockARM64Z: + // match: (Z (ANDconst [c] x) yes no) + // cond: oneBit(c) + // result: (TBZ {ntz(c)} x yes no) + for { + v := b.Control + if v.Op != OpARM64ANDconst { + break + } + c := v.AuxInt + x := v.Args[0] + if !(oneBit(c)) { + break + } + b.Kind = BlockARM64TBZ + b.SetControl(x) + b.Aux = ntz(c) + return true + } // match: (Z (MOVDconst [0]) yes no) // cond: // result: (First nil yes no) @@ -17623,6 +17766,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (Z (MOVDconst [c]) yes no) @@ -17639,10 +17783,29 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } case BlockARM64ZW: + // match: (ZW (ANDconst [c] x) yes no) + // cond: oneBit(int64(uint32(c))) + // result: (TBZ {ntz(int64(uint32(c)))} x yes no) + for { + v := b.Control + if v.Op != OpARM64ANDconst { + break + } + c := v.AuxInt + x := v.Args[0] + if !(oneBit(int64(uint32(c)))) { + break + } + b.Kind = BlockARM64TBZ + b.SetControl(x) + b.Aux = ntz(int64(uint32(c))) + return true + } // match: (ZW (MOVDconst [c]) yes no) // cond: int32(c) == 0 // result: (First nil yes no) @@ -17657,6 +17820,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (ZW (MOVDconst [c]) yes no) @@ -17673,6 +17837,7 @@ func rewriteBlockARM64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index 08749751e1c..d4f4c03ca3e 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -145,6 +145,8 @@ func rewriteValueMIPS(v *Value) bool { return rewriteValueMIPS_OpGeq8_0(v) case OpGeq8U: return rewriteValueMIPS_OpGeq8U_0(v) + case OpGetCallerSP: + return rewriteValueMIPS_OpGetCallerSP_0(v) case OpGetClosurePtr: return rewriteValueMIPS_OpGetClosurePtr_0(v) case OpGreater16: @@ -1759,6 +1761,15 @@ func rewriteValueMIPS_OpGeq8U_0(v *Value) bool { return true } } +func rewriteValueMIPS_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) + // cond: + // result: (LoweredGetCallerSP) + for { + v.reset(OpMIPSLoweredGetCallerSP) + return true + } +} func rewriteValueMIPS_OpGetClosurePtr_0(v *Value) bool { // match: (GetClosurePtr) // cond: @@ -9608,6 +9619,7 @@ func rewriteBlockMIPS(b *Block) bool { cmp := v.Args[0] b.Kind = BlockMIPSFPF b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (FPFlagFalse cmp) yes no) @@ -9621,6 +9633,7 @@ func rewriteBlockMIPS(b *Block) bool { cmp := v.Args[0] b.Kind = BlockMIPSFPT b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) @@ -9641,6 +9654,7 @@ func rewriteBlockMIPS(b *Block) bool { _ = cmp.Args[1] b.Kind = BlockMIPSNE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -9661,6 +9675,7 @@ func rewriteBlockMIPS(b *Block) bool { _ = cmp.Args[1] b.Kind = BlockMIPSNE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) @@ -9680,6 +9695,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockMIPSNE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -9699,6 +9715,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockMIPSNE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no) @@ -9718,6 +9735,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockMIPSNE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no) @@ -9737,6 +9755,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockMIPSNE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (SGTUconst [1] x) yes no) @@ -9753,6 +9772,7 @@ func rewriteBlockMIPS(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPSNE b.SetControl(x) + b.Aux = nil return true } // match: (EQ (SGTUzero x) yes no) @@ -9766,6 +9786,7 @@ func rewriteBlockMIPS(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPSEQ b.SetControl(x) + b.Aux = nil return true } // match: (EQ (SGTconst [0] x) yes no) @@ -9782,6 +9803,7 @@ func rewriteBlockMIPS(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPSGEZ b.SetControl(x) + b.Aux = nil return true } // match: (EQ (SGTzero x) yes no) @@ -9795,6 +9817,7 @@ func rewriteBlockMIPS(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPSLEZ b.SetControl(x) + b.Aux = nil return true } // match: (EQ (MOVWconst [0]) yes no) @@ -9810,6 +9833,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (EQ (MOVWconst [c]) yes no) @@ -9826,6 +9850,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -9844,6 +9869,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GEZ (MOVWconst [c]) yes no) @@ -9860,6 +9886,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -9878,6 +9905,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GTZ (MOVWconst [c]) yes no) @@ -9894,6 +9922,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -9907,6 +9936,7 @@ func rewriteBlockMIPS(b *Block) bool { cond := b.Control b.Kind = BlockMIPSNE b.SetControl(cond) + b.Aux = nil return true } case BlockMIPSLEZ: @@ -9924,6 +9954,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LEZ (MOVWconst [c]) yes no) @@ -9940,6 +9971,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -9958,6 +9990,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LTZ (MOVWconst [c]) yes no) @@ -9974,6 +10007,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -9989,6 +10023,7 @@ func rewriteBlockMIPS(b *Block) bool { cmp := v.Args[0] b.Kind = BlockMIPSFPT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (FPFlagFalse cmp) yes no) @@ -10002,6 +10037,7 @@ func rewriteBlockMIPS(b *Block) bool { cmp := v.Args[0] b.Kind = BlockMIPSFPF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) @@ -10022,6 +10058,7 @@ func rewriteBlockMIPS(b *Block) bool { _ = cmp.Args[1] b.Kind = BlockMIPSEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -10042,6 +10079,7 @@ func rewriteBlockMIPS(b *Block) bool { _ = cmp.Args[1] b.Kind = BlockMIPSEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) @@ -10061,6 +10099,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockMIPSEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -10080,6 +10119,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockMIPSEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGTzero _)) yes no) @@ -10099,6 +10139,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockMIPSEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no) @@ -10118,6 +10159,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockMIPSEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (SGTUconst [1] x) yes no) @@ -10134,6 +10176,7 @@ func rewriteBlockMIPS(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPSEQ b.SetControl(x) + b.Aux = nil return true } // match: (NE (SGTUzero x) yes no) @@ -10147,6 +10190,7 @@ func rewriteBlockMIPS(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPSNE b.SetControl(x) + b.Aux = nil return true } // match: (NE (SGTconst [0] x) yes no) @@ -10163,6 +10207,7 @@ func rewriteBlockMIPS(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPSLTZ b.SetControl(x) + b.Aux = nil return true } // match: (NE (SGTzero x) yes no) @@ -10176,6 +10221,7 @@ func rewriteBlockMIPS(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPSGTZ b.SetControl(x) + b.Aux = nil return true } // match: (NE (MOVWconst [0]) yes no) @@ -10191,6 +10237,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -10208,6 +10255,7 @@ func rewriteBlockMIPS(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 82919a1d31a..21265e3c421 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -41,6 +41,30 @@ func rewriteValueMIPS64(v *Value) bool { return rewriteValueMIPS64_OpAnd8_0(v) case OpAndB: return rewriteValueMIPS64_OpAndB_0(v) + case OpAtomicAdd32: + return rewriteValueMIPS64_OpAtomicAdd32_0(v) + case OpAtomicAdd64: + return rewriteValueMIPS64_OpAtomicAdd64_0(v) + case OpAtomicCompareAndSwap32: + return rewriteValueMIPS64_OpAtomicCompareAndSwap32_0(v) + case OpAtomicCompareAndSwap64: + return rewriteValueMIPS64_OpAtomicCompareAndSwap64_0(v) + case OpAtomicExchange32: + return rewriteValueMIPS64_OpAtomicExchange32_0(v) + case OpAtomicExchange64: + return rewriteValueMIPS64_OpAtomicExchange64_0(v) + case OpAtomicLoad32: + return rewriteValueMIPS64_OpAtomicLoad32_0(v) + case OpAtomicLoad64: + return rewriteValueMIPS64_OpAtomicLoad64_0(v) + case OpAtomicLoadPtr: + return rewriteValueMIPS64_OpAtomicLoadPtr_0(v) + case OpAtomicStore32: + return rewriteValueMIPS64_OpAtomicStore32_0(v) + case OpAtomicStore64: + return rewriteValueMIPS64_OpAtomicStore64_0(v) + case OpAtomicStorePtrNoWB: + return rewriteValueMIPS64_OpAtomicStorePtrNoWB_0(v) case OpAvg64u: return rewriteValueMIPS64_OpAvg64u_0(v) case OpClosureCall: @@ -147,6 +171,8 @@ func rewriteValueMIPS64(v *Value) bool { return rewriteValueMIPS64_OpGeq8_0(v) case OpGeq8U: return rewriteValueMIPS64_OpGeq8U_0(v) + case OpGetCallerSP: + return rewriteValueMIPS64_OpGetCallerSP_0(v) case OpGetClosurePtr: return rewriteValueMIPS64_OpGetClosurePtr_0(v) case OpGreater16: @@ -267,6 +293,14 @@ func rewriteValueMIPS64(v *Value) bool { return rewriteValueMIPS64_OpMIPS64AND_0(v) case OpMIPS64ANDconst: return rewriteValueMIPS64_OpMIPS64ANDconst_0(v) + case OpMIPS64LoweredAtomicAdd32: + return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32_0(v) + case OpMIPS64LoweredAtomicAdd64: + return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64_0(v) + case OpMIPS64LoweredAtomicStore32: + return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32_0(v) + case OpMIPS64LoweredAtomicStore64: + return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64_0(v) case OpMIPS64MOVBUload: return rewriteValueMIPS64_OpMIPS64MOVBUload_0(v) case OpMIPS64MOVBUreg: @@ -753,6 +787,196 @@ func rewriteValueMIPS64_OpAndB_0(v *Value) bool { return true } } +func rewriteValueMIPS64_OpAtomicAdd32_0(v *Value) bool { + // match: (AtomicAdd32 ptr val mem) + // cond: + // result: (LoweredAtomicAdd32 ptr val mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicAdd32) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicAdd64_0(v *Value) bool { + // match: (AtomicAdd64 ptr val mem) + // cond: + // result: (LoweredAtomicAdd64 ptr val mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicAdd64) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicCompareAndSwap32_0(v *Value) bool { + // match: (AtomicCompareAndSwap32 ptr old new_ mem) + // cond: + // result: (LoweredAtomicCas32 ptr old new_ mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + old := v.Args[1] + new_ := v.Args[2] + mem := v.Args[3] + v.reset(OpMIPS64LoweredAtomicCas32) + v.AddArg(ptr) + v.AddArg(old) + v.AddArg(new_) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicCompareAndSwap64_0(v *Value) bool { + // match: (AtomicCompareAndSwap64 ptr old new_ mem) + // cond: + // result: (LoweredAtomicCas64 ptr old new_ mem) + for { + _ = v.Args[3] + ptr := v.Args[0] + old := v.Args[1] + new_ := v.Args[2] + mem := v.Args[3] + v.reset(OpMIPS64LoweredAtomicCas64) + v.AddArg(ptr) + v.AddArg(old) + v.AddArg(new_) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicExchange32_0(v *Value) bool { + // match: (AtomicExchange32 ptr val mem) + // cond: + // result: (LoweredAtomicExchange32 ptr val mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicExchange32) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicExchange64_0(v *Value) bool { + // match: (AtomicExchange64 ptr val mem) + // cond: + // result: (LoweredAtomicExchange64 ptr val mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicExchange64) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicLoad32_0(v *Value) bool { + // match: (AtomicLoad32 ptr mem) + // cond: + // result: (LoweredAtomicLoad32 ptr mem) + for { + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpMIPS64LoweredAtomicLoad32) + v.AddArg(ptr) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicLoad64_0(v *Value) bool { + // match: (AtomicLoad64 ptr mem) + // cond: + // result: (LoweredAtomicLoad64 ptr mem) + for { + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpMIPS64LoweredAtomicLoad64) + v.AddArg(ptr) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicLoadPtr_0(v *Value) bool { + // match: (AtomicLoadPtr ptr mem) + // cond: + // result: (LoweredAtomicLoad64 ptr mem) + for { + _ = v.Args[1] + ptr := v.Args[0] + mem := v.Args[1] + v.reset(OpMIPS64LoweredAtomicLoad64) + v.AddArg(ptr) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicStore32_0(v *Value) bool { + // match: (AtomicStore32 ptr val mem) + // cond: + // result: (LoweredAtomicStore32 ptr val mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicStore32) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicStore64_0(v *Value) bool { + // match: (AtomicStore64 ptr val mem) + // cond: + // result: (LoweredAtomicStore64 ptr val mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicStore64) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } +} +func rewriteValueMIPS64_OpAtomicStorePtrNoWB_0(v *Value) bool { + // match: (AtomicStorePtrNoWB ptr val mem) + // cond: + // result: (LoweredAtomicStore64 ptr val mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicStore64) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } +} func rewriteValueMIPS64_OpAvg64u_0(v *Value) bool { b := v.Block _ = b @@ -1719,6 +1943,15 @@ func rewriteValueMIPS64_OpGeq8U_0(v *Value) bool { return true } } +func rewriteValueMIPS64_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) + // cond: + // result: (LoweredGetCallerSP) + for { + v.reset(OpMIPS64LoweredGetCallerSP) + return true + } +} func rewriteValueMIPS64_OpGetClosurePtr_0(v *Value) bool { // match: (GetClosurePtr) // cond: @@ -2699,7 +2932,7 @@ func rewriteValueMIPS64_OpLsh16x16_0(v *Value) bool { _ = typ // match: (Lsh16x16 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -2708,7 +2941,7 @@ func rewriteValueMIPS64_OpLsh16x16_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -2732,7 +2965,7 @@ func rewriteValueMIPS64_OpLsh16x32_0(v *Value) bool { _ = typ // match: (Lsh16x32 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -2741,7 +2974,7 @@ func rewriteValueMIPS64_OpLsh16x32_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -2765,7 +2998,7 @@ func rewriteValueMIPS64_OpLsh16x64_0(v *Value) bool { _ = typ // match: (Lsh16x64 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) y)) (SLLV x y)) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) for { t := v.Type _ = v.Args[1] @@ -2774,7 +3007,7 @@ func rewriteValueMIPS64_OpLsh16x64_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v1.AddArg(y) @@ -2794,7 +3027,7 @@ func rewriteValueMIPS64_OpLsh16x8_0(v *Value) bool { _ = typ // match: (Lsh16x8 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -2803,7 +3036,7 @@ func rewriteValueMIPS64_OpLsh16x8_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -2827,7 +3060,7 @@ func rewriteValueMIPS64_OpLsh32x16_0(v *Value) bool { _ = typ // match: (Lsh32x16 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -2836,7 +3069,7 @@ func rewriteValueMIPS64_OpLsh32x16_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -2860,7 +3093,7 @@ func rewriteValueMIPS64_OpLsh32x32_0(v *Value) bool { _ = typ // match: (Lsh32x32 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -2869,7 +3102,7 @@ func rewriteValueMIPS64_OpLsh32x32_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -2893,7 +3126,7 @@ func rewriteValueMIPS64_OpLsh32x64_0(v *Value) bool { _ = typ // match: (Lsh32x64 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) y)) (SLLV x y)) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) for { t := v.Type _ = v.Args[1] @@ -2902,7 +3135,7 @@ func rewriteValueMIPS64_OpLsh32x64_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v1.AddArg(y) @@ -2922,7 +3155,7 @@ func rewriteValueMIPS64_OpLsh32x8_0(v *Value) bool { _ = typ // match: (Lsh32x8 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -2931,7 +3164,7 @@ func rewriteValueMIPS64_OpLsh32x8_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -2955,7 +3188,7 @@ func rewriteValueMIPS64_OpLsh64x16_0(v *Value) bool { _ = typ // match: (Lsh64x16 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -2964,7 +3197,7 @@ func rewriteValueMIPS64_OpLsh64x16_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -2988,7 +3221,7 @@ func rewriteValueMIPS64_OpLsh64x32_0(v *Value) bool { _ = typ // match: (Lsh64x32 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -2997,7 +3230,7 @@ func rewriteValueMIPS64_OpLsh64x32_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -3021,7 +3254,7 @@ func rewriteValueMIPS64_OpLsh64x64_0(v *Value) bool { _ = typ // match: (Lsh64x64 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) y)) (SLLV x y)) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) for { t := v.Type _ = v.Args[1] @@ -3030,7 +3263,7 @@ func rewriteValueMIPS64_OpLsh64x64_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v1.AddArg(y) @@ -3050,7 +3283,7 @@ func rewriteValueMIPS64_OpLsh64x8_0(v *Value) bool { _ = typ // match: (Lsh64x8 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -3059,7 +3292,7 @@ func rewriteValueMIPS64_OpLsh64x8_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -3083,7 +3316,7 @@ func rewriteValueMIPS64_OpLsh8x16_0(v *Value) bool { _ = typ // match: (Lsh8x16 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -3092,7 +3325,7 @@ func rewriteValueMIPS64_OpLsh8x16_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -3116,7 +3349,7 @@ func rewriteValueMIPS64_OpLsh8x32_0(v *Value) bool { _ = typ // match: (Lsh8x32 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -3125,7 +3358,7 @@ func rewriteValueMIPS64_OpLsh8x32_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -3149,7 +3382,7 @@ func rewriteValueMIPS64_OpLsh8x64_0(v *Value) bool { _ = typ // match: (Lsh8x64 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) y)) (SLLV x y)) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) for { t := v.Type _ = v.Args[1] @@ -3158,7 +3391,7 @@ func rewriteValueMIPS64_OpLsh8x64_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v1.AddArg(y) @@ -3178,7 +3411,7 @@ func rewriteValueMIPS64_OpLsh8x8_0(v *Value) bool { _ = typ // match: (Lsh8x8 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -3187,7 +3420,7 @@ func rewriteValueMIPS64_OpLsh8x8_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -3475,6 +3708,98 @@ func rewriteValueMIPS64_OpMIPS64ANDconst_0(v *Value) bool { } return false } +func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32_0(v *Value) bool { + // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) + // cond: is32Bit(c) + // result: (LoweredAtomicAddconst32 [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(is32Bit(c)) { + break + } + v.reset(OpMIPS64LoweredAtomicAddconst32) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64_0(v *Value) bool { + // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) + // cond: is32Bit(c) + // result: (LoweredAtomicAddconst64 [c] ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { + break + } + c := v_1.AuxInt + mem := v.Args[2] + if !(is32Bit(c)) { + break + } + v.reset(OpMIPS64LoweredAtomicAddconst64) + v.AuxInt = c + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32_0(v *Value) bool { + // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem) + // cond: + // result: (LoweredAtomicStorezero32 ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { + break + } + if v_1.AuxInt != 0 { + break + } + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicStorezero32) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64_0(v *Value) bool { + // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem) + // cond: + // result: (LoweredAtomicStorezero64 ptr mem) + for { + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpMIPS64MOVVconst { + break + } + if v_1.AuxInt != 0 { + break + } + mem := v.Args[2] + v.reset(OpMIPS64LoweredAtomicStorezero64) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueMIPS64_OpMIPS64MOVBUload_0(v *Value) bool { // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) // cond: is32Bit(off1+off2) @@ -7521,7 +7846,7 @@ func rewriteValueMIPS64_OpRsh16Ux16_0(v *Value) bool { _ = typ // match: (Rsh16Ux16 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -7530,7 +7855,7 @@ func rewriteValueMIPS64_OpRsh16Ux16_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -7556,7 +7881,7 @@ func rewriteValueMIPS64_OpRsh16Ux32_0(v *Value) bool { _ = typ // match: (Rsh16Ux32 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -7565,7 +7890,7 @@ func rewriteValueMIPS64_OpRsh16Ux32_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -7591,7 +7916,7 @@ func rewriteValueMIPS64_OpRsh16Ux64_0(v *Value) bool { _ = typ // match: (Rsh16Ux64 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) y)) (SRLV (ZeroExt16to64 x) y)) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) for { t := v.Type _ = v.Args[1] @@ -7600,7 +7925,7 @@ func rewriteValueMIPS64_OpRsh16Ux64_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v1.AddArg(y) @@ -7622,7 +7947,7 @@ func rewriteValueMIPS64_OpRsh16Ux8_0(v *Value) bool { _ = typ // match: (Rsh16Ux8 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -7631,7 +7956,7 @@ func rewriteValueMIPS64_OpRsh16Ux8_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -7657,7 +7982,7 @@ func rewriteValueMIPS64_OpRsh16x16_0(v *Value) bool { _ = typ // match: (Rsh16x16 x y) // cond: - // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (Const64 [63]))) (ZeroExt16to64 y))) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -7673,7 +7998,7 @@ func rewriteValueMIPS64_OpRsh16x16_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -7692,7 +8017,7 @@ func rewriteValueMIPS64_OpRsh16x32_0(v *Value) bool { _ = typ // match: (Rsh16x32 x y) // cond: - // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (Const64 [63]))) (ZeroExt32to64 y))) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -7708,7 +8033,7 @@ func rewriteValueMIPS64_OpRsh16x32_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -7727,7 +8052,7 @@ func rewriteValueMIPS64_OpRsh16x64_0(v *Value) bool { _ = typ // match: (Rsh16x64 x y) // cond: - // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (Const64 [63]))) y)) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) for { t := v.Type _ = v.Args[1] @@ -7741,7 +8066,7 @@ func rewriteValueMIPS64_OpRsh16x64_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3.AddArg(y) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 v3.AddArg(v4) v2.AddArg(v3) @@ -7758,7 +8083,7 @@ func rewriteValueMIPS64_OpRsh16x8_0(v *Value) bool { _ = typ // match: (Rsh16x8 x y) // cond: - // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -7774,7 +8099,7 @@ func rewriteValueMIPS64_OpRsh16x8_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -7793,7 +8118,7 @@ func rewriteValueMIPS64_OpRsh32Ux16_0(v *Value) bool { _ = typ // match: (Rsh32Ux16 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -7802,7 +8127,7 @@ func rewriteValueMIPS64_OpRsh32Ux16_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -7828,7 +8153,7 @@ func rewriteValueMIPS64_OpRsh32Ux32_0(v *Value) bool { _ = typ // match: (Rsh32Ux32 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -7837,7 +8162,7 @@ func rewriteValueMIPS64_OpRsh32Ux32_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -7863,7 +8188,7 @@ func rewriteValueMIPS64_OpRsh32Ux64_0(v *Value) bool { _ = typ // match: (Rsh32Ux64 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) y)) (SRLV (ZeroExt32to64 x) y)) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) for { t := v.Type _ = v.Args[1] @@ -7872,7 +8197,7 @@ func rewriteValueMIPS64_OpRsh32Ux64_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v1.AddArg(y) @@ -7894,7 +8219,7 @@ func rewriteValueMIPS64_OpRsh32Ux8_0(v *Value) bool { _ = typ // match: (Rsh32Ux8 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -7903,7 +8228,7 @@ func rewriteValueMIPS64_OpRsh32Ux8_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -7929,7 +8254,7 @@ func rewriteValueMIPS64_OpRsh32x16_0(v *Value) bool { _ = typ // match: (Rsh32x16 x y) // cond: - // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (Const64 [63]))) (ZeroExt16to64 y))) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -7945,7 +8270,7 @@ func rewriteValueMIPS64_OpRsh32x16_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -7964,7 +8289,7 @@ func rewriteValueMIPS64_OpRsh32x32_0(v *Value) bool { _ = typ // match: (Rsh32x32 x y) // cond: - // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (Const64 [63]))) (ZeroExt32to64 y))) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -7980,7 +8305,7 @@ func rewriteValueMIPS64_OpRsh32x32_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -7999,7 +8324,7 @@ func rewriteValueMIPS64_OpRsh32x64_0(v *Value) bool { _ = typ // match: (Rsh32x64 x y) // cond: - // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (Const64 [63]))) y)) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) for { t := v.Type _ = v.Args[1] @@ -8013,7 +8338,7 @@ func rewriteValueMIPS64_OpRsh32x64_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3.AddArg(y) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 v3.AddArg(v4) v2.AddArg(v3) @@ -8030,7 +8355,7 @@ func rewriteValueMIPS64_OpRsh32x8_0(v *Value) bool { _ = typ // match: (Rsh32x8 x y) // cond: - // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -8046,7 +8371,7 @@ func rewriteValueMIPS64_OpRsh32x8_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -8065,7 +8390,7 @@ func rewriteValueMIPS64_OpRsh64Ux16_0(v *Value) bool { _ = typ // match: (Rsh64Ux16 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -8074,7 +8399,7 @@ func rewriteValueMIPS64_OpRsh64Ux16_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -8098,7 +8423,7 @@ func rewriteValueMIPS64_OpRsh64Ux32_0(v *Value) bool { _ = typ // match: (Rsh64Ux32 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -8107,7 +8432,7 @@ func rewriteValueMIPS64_OpRsh64Ux32_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -8131,7 +8456,7 @@ func rewriteValueMIPS64_OpRsh64Ux64_0(v *Value) bool { _ = typ // match: (Rsh64Ux64 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) y)) (SRLV x y)) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) for { t := v.Type _ = v.Args[1] @@ -8140,7 +8465,7 @@ func rewriteValueMIPS64_OpRsh64Ux64_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v1.AddArg(y) @@ -8160,7 +8485,7 @@ func rewriteValueMIPS64_OpRsh64Ux8_0(v *Value) bool { _ = typ // match: (Rsh64Ux8 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -8169,7 +8494,7 @@ func rewriteValueMIPS64_OpRsh64Ux8_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -8193,7 +8518,7 @@ func rewriteValueMIPS64_OpRsh64x16_0(v *Value) bool { _ = typ // match: (Rsh64x16 x y) // cond: - // result: (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (Const64 [63]))) (ZeroExt16to64 y))) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -8207,7 +8532,7 @@ func rewriteValueMIPS64_OpRsh64x16_0(v *Value) bool { v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 v2.AddArg(v4) v1.AddArg(v2) @@ -8226,7 +8551,7 @@ func rewriteValueMIPS64_OpRsh64x32_0(v *Value) bool { _ = typ // match: (Rsh64x32 x y) // cond: - // result: (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (Const64 [63]))) (ZeroExt32to64 y))) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -8240,7 +8565,7 @@ func rewriteValueMIPS64_OpRsh64x32_0(v *Value) bool { v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 v2.AddArg(v4) v1.AddArg(v2) @@ -8259,7 +8584,7 @@ func rewriteValueMIPS64_OpRsh64x64_0(v *Value) bool { _ = typ // match: (Rsh64x64 x y) // cond: - // result: (SRAV x (OR (NEGV (SGTU y (Const64 [63]))) y)) + // result: (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) for { t := v.Type _ = v.Args[1] @@ -8271,7 +8596,7 @@ func rewriteValueMIPS64_OpRsh64x64_0(v *Value) bool { v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v2.AddArg(y) - v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v3.AuxInt = 63 v2.AddArg(v3) v1.AddArg(v2) @@ -8288,7 +8613,7 @@ func rewriteValueMIPS64_OpRsh64x8_0(v *Value) bool { _ = typ // match: (Rsh64x8 x y) // cond: - // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -8302,7 +8627,7 @@ func rewriteValueMIPS64_OpRsh64x8_0(v *Value) bool { v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v3.AddArg(y) v2.AddArg(v3) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 v2.AddArg(v4) v1.AddArg(v2) @@ -8321,7 +8646,7 @@ func rewriteValueMIPS64_OpRsh8Ux16_0(v *Value) bool { _ = typ // match: (Rsh8Ux16 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -8330,7 +8655,7 @@ func rewriteValueMIPS64_OpRsh8Ux16_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) @@ -8356,7 +8681,7 @@ func rewriteValueMIPS64_OpRsh8Ux32_0(v *Value) bool { _ = typ // match: (Rsh8Ux32 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -8365,7 +8690,7 @@ func rewriteValueMIPS64_OpRsh8Ux32_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) @@ -8391,7 +8716,7 @@ func rewriteValueMIPS64_OpRsh8Ux64_0(v *Value) bool { _ = typ // match: (Rsh8Ux64 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) y)) (SRLV (ZeroExt8to64 x) y)) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) for { t := v.Type _ = v.Args[1] @@ -8400,7 +8725,7 @@ func rewriteValueMIPS64_OpRsh8Ux64_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v1.AddArg(y) @@ -8422,7 +8747,7 @@ func rewriteValueMIPS64_OpRsh8Ux8_0(v *Value) bool { _ = typ // match: (Rsh8Ux8 x y) // cond: - // result: (AND (NEGV (SGTU (Const64 [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -8431,7 +8756,7 @@ func rewriteValueMIPS64_OpRsh8Ux8_0(v *Value) bool { v.reset(OpMIPS64AND) v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) - v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v2.AuxInt = 64 v1.AddArg(v2) v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) @@ -8457,7 +8782,7 @@ func rewriteValueMIPS64_OpRsh8x16_0(v *Value) bool { _ = typ // match: (Rsh8x16 x y) // cond: - // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (Const64 [63]))) (ZeroExt16to64 y))) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) for { t := v.Type _ = v.Args[1] @@ -8473,7 +8798,7 @@ func rewriteValueMIPS64_OpRsh8x16_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -8492,7 +8817,7 @@ func rewriteValueMIPS64_OpRsh8x32_0(v *Value) bool { _ = typ // match: (Rsh8x32 x y) // cond: - // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (Const64 [63]))) (ZeroExt32to64 y))) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) for { t := v.Type _ = v.Args[1] @@ -8508,7 +8833,7 @@ func rewriteValueMIPS64_OpRsh8x32_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -8527,7 +8852,7 @@ func rewriteValueMIPS64_OpRsh8x64_0(v *Value) bool { _ = typ // match: (Rsh8x64 x y) // cond: - // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (Const64 [63]))) y)) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) for { t := v.Type _ = v.Args[1] @@ -8541,7 +8866,7 @@ func rewriteValueMIPS64_OpRsh8x64_0(v *Value) bool { v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t) v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool) v3.AddArg(y) - v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v4.AuxInt = 63 v3.AddArg(v4) v2.AddArg(v3) @@ -8558,7 +8883,7 @@ func rewriteValueMIPS64_OpRsh8x8_0(v *Value) bool { _ = typ // match: (Rsh8x8 x y) // cond: - // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (Const64 [63]))) (ZeroExt8to64 y))) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) for { t := v.Type _ = v.Args[1] @@ -8574,7 +8899,7 @@ func rewriteValueMIPS64_OpRsh8x8_0(v *Value) bool { v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) v4.AddArg(y) v3.AddArg(v4) - v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64) v5.AuxInt = 63 v3.AddArg(v5) v2.AddArg(v3) @@ -10187,6 +10512,7 @@ func rewriteBlockMIPS64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockMIPS64FPF b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (FPFlagFalse cmp) yes no) @@ -10200,6 +10526,7 @@ func rewriteBlockMIPS64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockMIPS64FPT b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) @@ -10220,6 +10547,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = cmp.Args[1] b.Kind = BlockMIPS64NE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -10240,6 +10568,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = cmp.Args[1] b.Kind = BlockMIPS64NE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) @@ -10259,6 +10588,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockMIPS64NE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -10278,6 +10608,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockMIPS64NE b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (SGTUconst [1] x) yes no) @@ -10294,6 +10625,7 @@ func rewriteBlockMIPS64(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPS64NE b.SetControl(x) + b.Aux = nil return true } // match: (EQ (SGTU x (MOVVconst [0])) yes no) @@ -10315,6 +10647,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockMIPS64EQ b.SetControl(x) + b.Aux = nil return true } // match: (EQ (SGTconst [0] x) yes no) @@ -10331,6 +10664,7 @@ func rewriteBlockMIPS64(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPS64GEZ b.SetControl(x) + b.Aux = nil return true } // match: (EQ (SGT x (MOVVconst [0])) yes no) @@ -10352,6 +10686,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockMIPS64LEZ b.SetControl(x) + b.Aux = nil return true } // match: (EQ (MOVVconst [0]) yes no) @@ -10367,6 +10702,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (EQ (MOVVconst [c]) yes no) @@ -10383,6 +10719,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -10401,6 +10738,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GEZ (MOVVconst [c]) yes no) @@ -10417,6 +10755,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -10435,6 +10774,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GTZ (MOVVconst [c]) yes no) @@ -10451,6 +10791,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -10464,6 +10805,7 @@ func rewriteBlockMIPS64(b *Block) bool { cond := b.Control b.Kind = BlockMIPS64NE b.SetControl(cond) + b.Aux = nil return true } case BlockMIPS64LEZ: @@ -10481,6 +10823,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LEZ (MOVVconst [c]) yes no) @@ -10497,6 +10840,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -10515,6 +10859,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LTZ (MOVVconst [c]) yes no) @@ -10531,6 +10876,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -10546,6 +10892,7 @@ func rewriteBlockMIPS64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockMIPS64FPT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (FPFlagFalse cmp) yes no) @@ -10559,6 +10906,7 @@ func rewriteBlockMIPS64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockMIPS64FPF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) @@ -10579,6 +10927,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = cmp.Args[1] b.Kind = BlockMIPS64EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) @@ -10599,6 +10948,7 @@ func rewriteBlockMIPS64(b *Block) bool { _ = cmp.Args[1] b.Kind = BlockMIPS64EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) @@ -10618,6 +10968,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockMIPS64EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) @@ -10637,6 +10988,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockMIPS64EQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (SGTUconst [1] x) yes no) @@ -10653,6 +11005,7 @@ func rewriteBlockMIPS64(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPS64EQ b.SetControl(x) + b.Aux = nil return true } // match: (NE (SGTU x (MOVVconst [0])) yes no) @@ -10674,6 +11027,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockMIPS64NE b.SetControl(x) + b.Aux = nil return true } // match: (NE (SGTconst [0] x) yes no) @@ -10690,6 +11044,7 @@ func rewriteBlockMIPS64(b *Block) bool { x := v.Args[0] b.Kind = BlockMIPS64LTZ b.SetControl(x) + b.Aux = nil return true } // match: (NE (SGT x (MOVVconst [0])) yes no) @@ -10711,6 +11066,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockMIPS64GTZ b.SetControl(x) + b.Aux = nil return true } // match: (NE (MOVVconst [0]) yes no) @@ -10726,6 +11082,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -10743,6 +11100,7 @@ func rewriteBlockMIPS64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } } diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 75bf763d12e..6a000f44311 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -15,6 +15,8 @@ var _ = types.TypeMem // in case not otherwise used func rewriteValuePPC64(v *Value) bool { switch v.Op { + case OpAbs: + return rewriteValuePPC64_OpAbs_0(v) case OpAdd16: return rewriteValuePPC64_OpAdd16_0(v) case OpAdd32: @@ -73,6 +75,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpBitLen32_0(v) case OpBitLen64: return rewriteValuePPC64_OpBitLen64_0(v) + case OpCeil: + return rewriteValuePPC64_OpCeil_0(v) case OpClosureCall: return rewriteValuePPC64_OpClosureCall_0(v) case OpCom16: @@ -101,6 +105,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpConstNil_0(v) case OpConvert: return rewriteValuePPC64_OpConvert_0(v) + case OpCopysign: + return rewriteValuePPC64_OpCopysign_0(v) case OpCtz32: return rewriteValuePPC64_OpCtz32_0(v) case OpCtz64: @@ -161,6 +167,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpEqB_0(v) case OpEqPtr: return rewriteValuePPC64_OpEqPtr_0(v) + case OpFloor: + return rewriteValuePPC64_OpFloor_0(v) case OpGeq16: return rewriteValuePPC64_OpGeq16_0(v) case OpGeq16U: @@ -181,6 +189,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpGeq8_0(v) case OpGeq8U: return rewriteValuePPC64_OpGeq8U_0(v) + case OpGetCallerSP: + return rewriteValuePPC64_OpGetCallerSP_0(v) case OpGetClosurePtr: return rewriteValuePPC64_OpGetClosurePtr_0(v) case OpGreater16: @@ -393,10 +403,16 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64CMPconst_0(v) case OpPPC64Equal: return rewriteValuePPC64_OpPPC64Equal_0(v) + case OpPPC64FABS: + return rewriteValuePPC64_OpPPC64FABS_0(v) case OpPPC64FADD: return rewriteValuePPC64_OpPPC64FADD_0(v) case OpPPC64FADDS: return rewriteValuePPC64_OpPPC64FADDS_0(v) + case OpPPC64FCEIL: + return rewriteValuePPC64_OpPPC64FCEIL_0(v) + case OpPPC64FFLOOR: + return rewriteValuePPC64_OpPPC64FFLOOR_0(v) case OpPPC64FMOVDload: return rewriteValuePPC64_OpPPC64FMOVDload_0(v) case OpPPC64FMOVDstore: @@ -405,10 +421,16 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64FMOVSload_0(v) case OpPPC64FMOVSstore: return rewriteValuePPC64_OpPPC64FMOVSstore_0(v) + case OpPPC64FNEG: + return rewriteValuePPC64_OpPPC64FNEG_0(v) + case OpPPC64FSQRT: + return rewriteValuePPC64_OpPPC64FSQRT_0(v) case OpPPC64FSUB: return rewriteValuePPC64_OpPPC64FSUB_0(v) case OpPPC64FSUBS: return rewriteValuePPC64_OpPPC64FSUBS_0(v) + case OpPPC64FTRUNC: + return rewriteValuePPC64_OpPPC64FTRUNC_0(v) case OpPPC64GreaterEqual: return rewriteValuePPC64_OpPPC64GreaterEqual_0(v) case OpPPC64GreaterThan: @@ -417,6 +439,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64LessEqual_0(v) case OpPPC64LessThan: return rewriteValuePPC64_OpPPC64LessThan_0(v) + case OpPPC64MFVSRD: + return rewriteValuePPC64_OpPPC64MFVSRD_0(v) case OpPPC64MOVBZload: return rewriteValuePPC64_OpPPC64MOVBZload_0(v) case OpPPC64MOVBZreg: @@ -457,12 +481,14 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64MOVWstore_0(v) case OpPPC64MOVWstorezero: return rewriteValuePPC64_OpPPC64MOVWstorezero_0(v) + case OpPPC64MTVSRD: + return rewriteValuePPC64_OpPPC64MTVSRD_0(v) case OpPPC64MaskIfNotCarry: return rewriteValuePPC64_OpPPC64MaskIfNotCarry_0(v) case OpPPC64NotEqual: return rewriteValuePPC64_OpPPC64NotEqual_0(v) case OpPPC64OR: - return rewriteValuePPC64_OpPPC64OR_0(v) + return rewriteValuePPC64_OpPPC64OR_0(v) || rewriteValuePPC64_OpPPC64OR_10(v) || rewriteValuePPC64_OpPPC64OR_20(v) || rewriteValuePPC64_OpPPC64OR_30(v) || rewriteValuePPC64_OpPPC64OR_40(v) || rewriteValuePPC64_OpPPC64OR_50(v) || rewriteValuePPC64_OpPPC64OR_60(v) || rewriteValuePPC64_OpPPC64OR_70(v) || rewriteValuePPC64_OpPPC64OR_80(v) || rewriteValuePPC64_OpPPC64OR_90(v) || rewriteValuePPC64_OpPPC64OR_100(v) || rewriteValuePPC64_OpPPC64OR_110(v) || rewriteValuePPC64_OpPPC64OR_120(v) || rewriteValuePPC64_OpPPC64OR_130(v) || rewriteValuePPC64_OpPPC64OR_140(v) case OpPPC64ORN: return rewriteValuePPC64_OpPPC64ORN_0(v) case OpPPC64ORconst: @@ -470,7 +496,7 @@ func rewriteValuePPC64(v *Value) bool { case OpPPC64SUB: return rewriteValuePPC64_OpPPC64SUB_0(v) case OpPPC64XOR: - return rewriteValuePPC64_OpPPC64XOR_0(v) + return rewriteValuePPC64_OpPPC64XOR_0(v) || rewriteValuePPC64_OpPPC64XOR_10(v) case OpPPC64XORconst: return rewriteValuePPC64_OpPPC64XORconst_0(v) case OpPopCount16: @@ -583,6 +609,8 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpSub8_0(v) case OpSubPtr: return rewriteValuePPC64_OpSubPtr_0(v) + case OpTrunc: + return rewriteValuePPC64_OpTrunc_0(v) case OpTrunc16to8: return rewriteValuePPC64_OpTrunc16to8_0(v) case OpTrunc32to16: @@ -620,6 +648,17 @@ func rewriteValuePPC64(v *Value) bool { } return false } +func rewriteValuePPC64_OpAbs_0(v *Value) bool { + // match: (Abs x) + // cond: + // result: (FABS x) + for { + x := v.Args[0] + v.reset(OpPPC64FABS) + v.AddArg(x) + return true + } +} func rewriteValuePPC64_OpAdd16_0(v *Value) bool { // match: (Add16 x y) // cond: @@ -1070,6 +1109,17 @@ func rewriteValuePPC64_OpBitLen64_0(v *Value) bool { return true } } +func rewriteValuePPC64_OpCeil_0(v *Value) bool { + // match: (Ceil x) + // cond: + // result: (FCEIL x) + for { + x := v.Args[0] + v.reset(OpPPC64FCEIL) + v.AddArg(x) + return true + } +} func rewriteValuePPC64_OpClosureCall_0(v *Value) bool { // match: (ClosureCall [argwid] entry closure mem) // cond: @@ -1239,6 +1289,20 @@ func rewriteValuePPC64_OpConvert_0(v *Value) bool { return true } } +func rewriteValuePPC64_OpCopysign_0(v *Value) bool { + // match: (Copysign x y) + // cond: + // result: (FCPSGN y x) + for { + _ = v.Args[1] + x := v.Args[0] + y := v.Args[1] + v.reset(OpPPC64FCPSGN) + v.AddArg(y) + v.AddArg(x) + return true + } +} func rewriteValuePPC64_OpCtz32_0(v *Value) bool { b := v.Block _ = b @@ -1290,10 +1354,10 @@ func rewriteValuePPC64_OpCvt32Fto32_0(v *Value) bool { _ = typ // match: (Cvt32Fto32 x) // cond: - // result: (Xf2i64 (FCTIWZ x)) + // result: (MFVSRD (FCTIWZ x)) for { x := v.Args[0] - v.reset(OpPPC64Xf2i64) + v.reset(OpPPC64MFVSRD) v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64) v0.AddArg(x) v.AddArg(v0) @@ -1307,10 +1371,10 @@ func rewriteValuePPC64_OpCvt32Fto64_0(v *Value) bool { _ = typ // match: (Cvt32Fto64 x) // cond: - // result: (Xf2i64 (FCTIDZ x)) + // result: (MFVSRD (FCTIDZ x)) for { x := v.Args[0] - v.reset(OpPPC64Xf2i64) + v.reset(OpPPC64MFVSRD) v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64) v0.AddArg(x) v.AddArg(v0) @@ -1336,15 +1400,13 @@ func rewriteValuePPC64_OpCvt32to32F_0(v *Value) bool { _ = typ // match: (Cvt32to32F x) // cond: - // result: (FRSP (FCFID (Xi2f64 (SignExt32to64 x)))) + // result: (FCFIDS (MTVSRD (SignExt32to64 x))) for { x := v.Args[0] - v.reset(OpPPC64FRSP) - v0 := b.NewValue0(v.Pos, OpPPC64FCFID, typ.Float64) - v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, typ.Float64) - v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v2.AddArg(x) - v1.AddArg(v2) + v.reset(OpPPC64FCFIDS) + v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) v0.AddArg(v1) v.AddArg(v0) return true @@ -1357,11 +1419,11 @@ func rewriteValuePPC64_OpCvt32to64F_0(v *Value) bool { _ = typ // match: (Cvt32to64F x) // cond: - // result: (FCFID (Xi2f64 (SignExt32to64 x))) + // result: (FCFID (MTVSRD (SignExt32to64 x))) for { x := v.Args[0] v.reset(OpPPC64FCFID) - v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, typ.Float64) + v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) v1.AddArg(x) v0.AddArg(v1) @@ -1376,10 +1438,10 @@ func rewriteValuePPC64_OpCvt64Fto32_0(v *Value) bool { _ = typ // match: (Cvt64Fto32 x) // cond: - // result: (Xf2i64 (FCTIWZ x)) + // result: (MFVSRD (FCTIWZ x)) for { x := v.Args[0] - v.reset(OpPPC64Xf2i64) + v.reset(OpPPC64MFVSRD) v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64) v0.AddArg(x) v.AddArg(v0) @@ -1404,10 +1466,10 @@ func rewriteValuePPC64_OpCvt64Fto64_0(v *Value) bool { _ = typ // match: (Cvt64Fto64 x) // cond: - // result: (Xf2i64 (FCTIDZ x)) + // result: (MFVSRD (FCTIDZ x)) for { x := v.Args[0] - v.reset(OpPPC64Xf2i64) + v.reset(OpPPC64MFVSRD) v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64) v0.AddArg(x) v.AddArg(v0) @@ -1421,14 +1483,12 @@ func rewriteValuePPC64_OpCvt64to32F_0(v *Value) bool { _ = typ // match: (Cvt64to32F x) // cond: - // result: (FRSP (FCFID (Xi2f64 x))) + // result: (FCFIDS (MTVSRD x)) for { x := v.Args[0] - v.reset(OpPPC64FRSP) - v0 := b.NewValue0(v.Pos, OpPPC64FCFID, typ.Float64) - v1 := b.NewValue0(v.Pos, OpPPC64Xi2f64, typ.Float64) - v1.AddArg(x) - v0.AddArg(v1) + v.reset(OpPPC64FCFIDS) + v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) + v0.AddArg(x) v.AddArg(v0) return true } @@ -1440,11 +1500,11 @@ func rewriteValuePPC64_OpCvt64to64F_0(v *Value) bool { _ = typ // match: (Cvt64to64F x) // cond: - // result: (FCFID (Xi2f64 x)) + // result: (FCFID (MTVSRD x)) for { x := v.Args[0] v.reset(OpPPC64FCFID) - v0 := b.NewValue0(v.Pos, OpPPC64Xi2f64, typ.Float64) + v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64) v0.AddArg(x) v.AddArg(v0) return true @@ -1823,6 +1883,17 @@ func rewriteValuePPC64_OpEqPtr_0(v *Value) bool { return true } } +func rewriteValuePPC64_OpFloor_0(v *Value) bool { + // match: (Floor x) + // cond: + // result: (FFLOOR x) + for { + x := v.Args[0] + v.reset(OpPPC64FFLOOR) + v.AddArg(x) + return true + } +} func rewriteValuePPC64_OpGeq16_0(v *Value) bool { b := v.Block _ = b @@ -2027,6 +2098,15 @@ func rewriteValuePPC64_OpGeq8U_0(v *Value) bool { return true } } +func rewriteValuePPC64_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) + // cond: + // result: (LoweredGetCallerSP) + for { + v.reset(OpPPC64LoweredGetCallerSP) + return true + } +} func rewriteValuePPC64_OpGetClosurePtr_0(v *Value) bool { // match: (GetClosurePtr) // cond: @@ -3298,6 +3378,85 @@ func rewriteValuePPC64_OpLsh32x64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Lsh32x64 x (AND y (MOVDconst [31]))) + // cond: + // result: (SLW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1.AuxInt != 31 { + break + } + v.reset(OpPPC64SLW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Lsh32x64 x (AND (MOVDconst [31]) y)) + // cond: + // result: (SLW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 31 { + break + } + y := v_1.Args[1] + v.reset(OpPPC64SLW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Lsh32x64 x (ANDconst [31] y)) + // cond: + // result: (SLW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64ANDconst { + break + } + if v_1.Type != typ.Int32 { + break + } + if v_1.AuxInt != 31 { + break + } + y := v_1.Args[0] + v.reset(OpPPC64SLW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } // match: (Lsh32x64 x y) // cond: // result: (SLW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) @@ -3501,6 +3660,85 @@ func rewriteValuePPC64_OpLsh64x64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Lsh64x64 x (AND y (MOVDconst [63]))) + // cond: + // result: (SLD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1.AuxInt != 63 { + break + } + v.reset(OpPPC64SLD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Lsh64x64 x (AND (MOVDconst [63]) y)) + // cond: + // result: (SLD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 63 { + break + } + y := v_1.Args[1] + v.reset(OpPPC64SLD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Lsh64x64 x (ANDconst [63] y)) + // cond: + // result: (SLD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64ANDconst { + break + } + if v_1.Type != typ.Int64 { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpPPC64SLD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } // match: (Lsh64x64 x y) // cond: // result: (SLD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) @@ -4678,6 +4916,10 @@ func rewriteValuePPC64_OpOrB_0(v *Value) bool { } } func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (ADD (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (ROTLconst [c] x) @@ -4786,6 +5028,258 @@ func rewriteValuePPC64_OpPPC64ADD_0(v *Value) bool { v.AddArg(x) return true } + // match: (ADD (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) + // cond: + // result: (ROTL x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SLD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst { + break + } + if v_0_1.Type != typ.Int64 { + break + } + if v_0_1.AuxInt != 63 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRD { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 64 { + break + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1_1.Type != typ.UInt { + break + } + if v_1_1_1.AuxInt != 63 { + break + } + if y != v_1_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) (SLD x (ANDconst [63] y))) + // cond: + // result: (ROTL x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64SUB { + break + } + if v_0_1.Type != typ.UInt { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64MOVDconst { + break + } + if v_0_1_0.AuxInt != 64 { + break + } + v_0_1_1 := v_0_1.Args[1] + if v_0_1_1.Op != OpPPC64ANDconst { + break + } + if v_0_1_1.Type != typ.UInt { + break + } + if v_0_1_1.AuxInt != 63 { + break + } + y := v_0_1_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SLD { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.Int64 { + break + } + if v_1_1.AuxInt != 63 { + break + } + if y != v_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) + // cond: + // result: (ROTLW x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SLW { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst { + break + } + if v_0_1.Type != typ.Int32 { + break + } + if v_0_1.AuxInt != 31 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRW { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 32 { + break + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1_1.Type != typ.UInt { + break + } + if v_1_1_1.AuxInt != 31 { + break + } + if y != v_1_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (ADD (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) (SLW x (ANDconst [31] y))) + // cond: + // result: (ROTLW x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRW { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64SUB { + break + } + if v_0_1.Type != typ.UInt { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64MOVDconst { + break + } + if v_0_1_0.AuxInt != 32 { + break + } + v_0_1_1 := v_0_1.Args[1] + if v_0_1_1.Op != OpPPC64ANDconst { + break + } + if v_0_1_1.Type != typ.UInt { + break + } + if v_0_1_1.AuxInt != 31 { + break + } + y := v_0_1_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SLW { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.Int32 { + break + } + if v_1_1.AuxInt != 31 { + break + } + if y != v_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (ADD x (MOVDconst [c])) // cond: is32Bit(c) // result: (ADDconst [c] x) @@ -5717,6 +6211,22 @@ func rewriteValuePPC64_OpPPC64Equal_0(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64FABS_0(v *Value) bool { + // match: (FABS (FMOVDconst [x])) + // cond: + // result: (FMOVDconst [f2i(math.Abs(i2f(x)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := v_0.AuxInt + v.reset(OpPPC64FMOVDconst) + v.AuxInt = f2i(math.Abs(i2f(x))) + return true + } + return false +} func rewriteValuePPC64_OpPPC64FADD_0(v *Value) bool { // match: (FADD (FMUL x y) z) // cond: @@ -5799,23 +6309,82 @@ func rewriteValuePPC64_OpPPC64FADDS_0(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64FCEIL_0(v *Value) bool { + // match: (FCEIL (FMOVDconst [x])) + // cond: + // result: (FMOVDconst [f2i(math.Ceil(i2f(x)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := v_0.AuxInt + v.reset(OpPPC64FMOVDconst) + v.AuxInt = f2i(math.Ceil(i2f(x))) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FFLOOR_0(v *Value) bool { + // match: (FFLOOR (FMOVDconst [x])) + // cond: + // result: (FMOVDconst [f2i(math.Floor(i2f(x)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := v_0.AuxInt + v.reset(OpPPC64FMOVDconst) + v.AuxInt = f2i(math.Floor(i2f(x))) + return true + } + return false +} func rewriteValuePPC64_OpPPC64FMOVDload_0(v *Value) bool { - // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) + // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) + // cond: + // result: (MTVSRD x) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MOVDstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + x := v_1.Args[1] + v.reset(OpPPC64MTVSRD) + v.AddArg(x) + return true + } + // match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64FMOVDload) @@ -5852,6 +6421,28 @@ func rewriteValuePPC64_OpPPC64FMOVDload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FMOVDstore_0(v *Value) bool { + // match: (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) + // cond: + // result: (MOVDstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MTVSRD { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpPPC64MOVDstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: is16Bit(off1+off2) // result: (FMOVDstore [off1+off2] {sym} ptr val mem) @@ -5878,23 +6469,23 @@ func rewriteValuePPC64_OpPPC64FMOVDstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) + // match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64FMOVDstore) @@ -5908,22 +6499,22 @@ func rewriteValuePPC64_OpPPC64FMOVDstore_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64FMOVSload_0(v *Value) bool { - // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) + // match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64FMOVSload) @@ -5986,23 +6577,23 @@ func rewriteValuePPC64_OpPPC64FMOVSstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) + // match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64FMOVSstore) @@ -6015,6 +6606,51 @@ func rewriteValuePPC64_OpPPC64FMOVSstore_0(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64FNEG_0(v *Value) bool { + // match: (FNEG (FABS x)) + // cond: + // result: (FNABS x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FABS { + break + } + x := v_0.Args[0] + v.reset(OpPPC64FNABS) + v.AddArg(x) + return true + } + // match: (FNEG (FNABS x)) + // cond: + // result: (FABS x) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FNABS { + break + } + x := v_0.Args[0] + v.reset(OpPPC64FABS) + v.AddArg(x) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64FSQRT_0(v *Value) bool { + // match: (FSQRT (FMOVDconst [x])) + // cond: + // result: (FMOVDconst [f2i(math.Sqrt(i2f(x)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := v_0.AuxInt + v.reset(OpPPC64FMOVDconst) + v.AuxInt = f2i(math.Sqrt(i2f(x))) + return true + } + return false +} func rewriteValuePPC64_OpPPC64FSUB_0(v *Value) bool { // match: (FSUB (FMUL x y) z) // cond: @@ -6059,6 +6695,22 @@ func rewriteValuePPC64_OpPPC64FSUBS_0(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64FTRUNC_0(v *Value) bool { + // match: (FTRUNC (FMOVDconst [x])) + // cond: + // result: (FMOVDconst [f2i(math.Trunc(i2f(x)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FMOVDconst { + break + } + x := v_0.AuxInt + v.reset(OpPPC64FMOVDconst) + v.AuxInt = f2i(math.Trunc(i2f(x))) + return true + } + return false +} func rewriteValuePPC64_OpPPC64GreaterEqual_0(v *Value) bool { // match: (GreaterEqual (FlagEQ)) // cond: @@ -6267,23 +6919,69 @@ func rewriteValuePPC64_OpPPC64LessThan_0(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64MFVSRD_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MFVSRD (FMOVDconst [c])) + // cond: + // result: (MOVDconst [c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64FMOVDconst { + break + } + c := v_0.AuxInt + v.reset(OpPPC64MOVDconst) + v.AuxInt = c + return true + } + // match: (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVDload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpPPC64FMOVDload { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + return false +} func rewriteValuePPC64_OpPPC64MOVBZload_0(v *Value) bool { - // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVBZload) @@ -6452,6 +7150,10 @@ func rewriteValuePPC64_OpPPC64MOVBreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) // cond: is16Bit(off1+off2) // result: (MOVBstore [off1+off2] {sym} x val mem) @@ -6478,23 +7180,23 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVBstore) @@ -6573,6 +7275,315 @@ func rewriteValuePPC64_OpPPC64MOVBstore_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBstore [i1] {s} p (SRWconst (MOVHZreg w) [8]) x0:(MOVBstore [i0] {s} p w mem)) + // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0) + // result: (MOVHstore [i0] {s} p w mem) + for { + i1 := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRWconst { + break + } + if v_1.AuxInt != 8 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVHZreg { + break + } + w := v_1_0.Args[0] + x0 := v.Args[2] + if x0.Op != OpPPC64MOVBstore { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[2] + if p != x0.Args[0] { + break + } + if w != x0.Args[1] { + break + } + mem := x0.Args[2] + if !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) { + break + } + v.reset(OpPPC64MOVHstore) + v.AuxInt = i0 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i3] {s} p (SRWconst w [24]) x0:(MOVBstore [i2] {s} p (SRWconst w [16]) x1:(MOVBstore [i1] {s} p (SRWconst w [8]) x2:(MOVBstore [i0] {s} p w mem)))) + // cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0) && clobber(x1) && clobber(x2) + // result: (MOVWstore [i0] {s} p w mem) + for { + i3 := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRWconst { + break + } + if v_1.AuxInt != 24 { + break + } + w := v_1.Args[0] + x0 := v.Args[2] + if x0.Op != OpPPC64MOVBstore { + break + } + i2 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[2] + if p != x0.Args[0] { + break + } + x0_1 := x0.Args[1] + if x0_1.Op != OpPPC64SRWconst { + break + } + if x0_1.AuxInt != 16 { + break + } + if w != x0_1.Args[0] { + break + } + x1 := x0.Args[2] + if x1.Op != OpPPC64MOVBstore { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[2] + if p != x1.Args[0] { + break + } + x1_1 := x1.Args[1] + if x1_1.Op != OpPPC64SRWconst { + break + } + if x1_1.AuxInt != 8 { + break + } + if w != x1_1.Args[0] { + break + } + x2 := x1.Args[2] + if x2.Op != OpPPC64MOVBstore { + break + } + i0 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[2] + if p != x2.Args[0] { + break + } + if w != x2.Args[1] { + break + } + mem := x2.Args[2] + if !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0) && clobber(x1) && clobber(x2)) { + break + } + v.reset(OpPPC64MOVWstore) + v.AuxInt = i0 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } + // match: (MOVBstore [i7] {s} p (SRDconst w [56]) x0:(MOVBstore [i6] {s} p (SRDconst w [48]) x1:(MOVBstore [i5] {s} p (SRDconst w [40]) x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVBstore [i3] {s} p (SRDconst w [24]) x4:(MOVBstore [i2] {s} p (SRDconst w [16]) x5:(MOVBstore [i1] {s} p (SRDconst w [8]) x6:(MOVBstore [i0] {s} p w mem)))))))) + // cond: !config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) + // result: (MOVDstore [i0] {s} p w mem) + for { + i7 := v.AuxInt + s := v.Aux + _ = v.Args[2] + p := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRDconst { + break + } + if v_1.AuxInt != 56 { + break + } + w := v_1.Args[0] + x0 := v.Args[2] + if x0.Op != OpPPC64MOVBstore { + break + } + i6 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[2] + if p != x0.Args[0] { + break + } + x0_1 := x0.Args[1] + if x0_1.Op != OpPPC64SRDconst { + break + } + if x0_1.AuxInt != 48 { + break + } + if w != x0_1.Args[0] { + break + } + x1 := x0.Args[2] + if x1.Op != OpPPC64MOVBstore { + break + } + i5 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[2] + if p != x1.Args[0] { + break + } + x1_1 := x1.Args[1] + if x1_1.Op != OpPPC64SRDconst { + break + } + if x1_1.AuxInt != 40 { + break + } + if w != x1_1.Args[0] { + break + } + x2 := x1.Args[2] + if x2.Op != OpPPC64MOVBstore { + break + } + i4 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[2] + if p != x2.Args[0] { + break + } + x2_1 := x2.Args[1] + if x2_1.Op != OpPPC64SRDconst { + break + } + if x2_1.AuxInt != 32 { + break + } + if w != x2_1.Args[0] { + break + } + x3 := x2.Args[2] + if x3.Op != OpPPC64MOVBstore { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[2] + if p != x3.Args[0] { + break + } + x3_1 := x3.Args[1] + if x3_1.Op != OpPPC64SRDconst { + break + } + if x3_1.AuxInt != 24 { + break + } + if w != x3_1.Args[0] { + break + } + x4 := x3.Args[2] + if x4.Op != OpPPC64MOVBstore { + break + } + i2 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[2] + if p != x4.Args[0] { + break + } + x4_1 := x4.Args[1] + if x4_1.Op != OpPPC64SRDconst { + break + } + if x4_1.AuxInt != 16 { + break + } + if w != x4_1.Args[0] { + break + } + x5 := x4.Args[2] + if x5.Op != OpPPC64MOVBstore { + break + } + i1 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[2] + if p != x5.Args[0] { + break + } + x5_1 := x5.Args[1] + if x5_1.Op != OpPPC64SRDconst { + break + } + if x5_1.AuxInt != 8 { + break + } + if w != x5_1.Args[0] { + break + } + x6 := x5.Args[2] + if x6.Op != OpPPC64MOVBstore { + break + } + i0 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[2] + if p != x6.Args[0] { + break + } + if w != x6.Args[1] { + break + } + mem := x6.Args[2] + if !(!config.BigEndian && i0%4 == 0 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6)) { + break + } + v.reset(OpPPC64MOVDstore) + v.AuxInt = i0 + v.Aux = s + v.AddArg(p) + v.AddArg(w) + v.AddArg(mem) + return true + } return false } func rewriteValuePPC64_OpPPC64MOVBstorezero_0(v *Value) bool { @@ -6600,22 +7611,22 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + x := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVBstorezero) @@ -6628,22 +7639,49 @@ func rewriteValuePPC64_OpPPC64MOVBstorezero_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool { - // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) + // cond: + // result: (MFVSRD x) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64FMOVDstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + if ptr != v_1.Args[0] { + break + } + x := v_1.Args[1] + v.reset(OpPPC64MFVSRD) + v.AddArg(x) + return true + } + // match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVDload) @@ -6680,6 +7718,28 @@ func rewriteValuePPC64_OpPPC64MOVDload_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { + // match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem) + // cond: + // result: (FMOVDstore [off] {sym} ptr x mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64MFVSRD { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpPPC64FMOVDstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(x) + v.AddArg(mem) + return true + } // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) // cond: is16Bit(off1+off2) // result: (MOVDstore [off1+off2] {sym} x val mem) @@ -6706,23 +7766,23 @@ func rewriteValuePPC64_OpPPC64MOVDstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVDstore) @@ -6784,22 +7844,22 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + x := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVDstorezero) @@ -6812,22 +7872,22 @@ func rewriteValuePPC64_OpPPC64MOVDstorezero_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHZload_0(v *Value) bool { - // match: (MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVHZload) @@ -6950,22 +8010,22 @@ func rewriteValuePPC64_OpPPC64MOVHZreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVHload_0(v *Value) bool { - // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVHload) @@ -7114,23 +8174,23 @@ func rewriteValuePPC64_OpPPC64MOVHstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVHstore) @@ -7236,22 +8296,22 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + x := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVHstorezero) @@ -7264,22 +8324,22 @@ func rewriteValuePPC64_OpPPC64MOVHstorezero_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWZload_0(v *Value) bool { - // match: (MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVWZload) @@ -7432,22 +8492,22 @@ func rewriteValuePPC64_OpPPC64MOVWZreg_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64MOVWload_0(v *Value) bool { - // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVWload) @@ -7626,23 +8686,23 @@ func rewriteValuePPC64_OpPPC64MOVWstore_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && (ptr.Op != OpSB || p.Uses == 1) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[2] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + ptr := p.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (ptr.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVWstore) @@ -7748,22 +8808,22 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero_0(v *Value) bool { v.AddArg(mem) return true } - // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} x) mem) - // cond: canMergeSym(sym1,sym2) + // match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) + // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1) // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem) for { off1 := v.AuxInt sym1 := v.Aux _ = v.Args[1] - v_0 := v.Args[0] - if v_0.Op != OpPPC64MOVDaddr { + p := v.Args[0] + if p.Op != OpPPC64MOVDaddr { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - x := v_0.Args[0] + off2 := p.AuxInt + sym2 := p.Aux + x := p.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) { break } v.reset(OpPPC64MOVWstorezero) @@ -7775,6 +8835,52 @@ func rewriteValuePPC64_OpPPC64MOVWstorezero_0(v *Value) bool { } return false } +func rewriteValuePPC64_OpPPC64MTVSRD_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ + // match: (MTVSRD (MOVDconst [c])) + // cond: + // result: (FMOVDconst [c]) + for { + v_0 := v.Args[0] + if v_0.Op != OpPPC64MOVDconst { + break + } + c := v_0.AuxInt + v.reset(OpPPC64FMOVDconst) + v.AuxInt = c + return true + } + // match: (MTVSRD x:(MOVDload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (FMOVDload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpPPC64MOVDload { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpPPC64FMOVDload, typ.Float64) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + return false +} func rewriteValuePPC64_OpPPC64MaskIfNotCarry_0(v *Value) bool { // match: (MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) // cond: c < 0 && d > 0 && c + d < 0 @@ -7852,6 +8958,10 @@ func rewriteValuePPC64_OpPPC64NotEqual_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (ROTLconst [c] x) @@ -7960,6 +9070,258 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { v.AddArg(x) return true } + // match: (OR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) + // cond: + // result: (ROTL x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SLD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst { + break + } + if v_0_1.Type != typ.Int64 { + break + } + if v_0_1.AuxInt != 63 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRD { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 64 { + break + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1_1.Type != typ.UInt { + break + } + if v_1_1_1.AuxInt != 63 { + break + } + if y != v_1_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) (SLD x (ANDconst [63] y))) + // cond: + // result: (ROTL x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64SUB { + break + } + if v_0_1.Type != typ.UInt { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64MOVDconst { + break + } + if v_0_1_0.AuxInt != 64 { + break + } + v_0_1_1 := v_0_1.Args[1] + if v_0_1_1.Op != OpPPC64ANDconst { + break + } + if v_0_1_1.Type != typ.UInt { + break + } + if v_0_1_1.AuxInt != 63 { + break + } + y := v_0_1_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SLD { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.Int64 { + break + } + if v_1_1.AuxInt != 63 { + break + } + if y != v_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) + // cond: + // result: (ROTLW x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SLW { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst { + break + } + if v_0_1.Type != typ.Int32 { + break + } + if v_0_1.AuxInt != 31 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRW { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 32 { + break + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1_1.Type != typ.UInt { + break + } + if v_1_1_1.AuxInt != 31 { + break + } + if y != v_1_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (OR (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) (SLW x (ANDconst [31] y))) + // cond: + // result: (ROTLW x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRW { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64SUB { + break + } + if v_0_1.Type != typ.UInt { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64MOVDconst { + break + } + if v_0_1_0.AuxInt != 32 { + break + } + v_0_1_1 := v_0_1.Args[1] + if v_0_1_1.Op != OpPPC64ANDconst { + break + } + if v_0_1_1.Type != typ.UInt { + break + } + if v_0_1_1.AuxInt != 31 { + break + } + y := v_0_1_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SLW { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.Int32 { + break + } + if v_1_1.AuxInt != 31 { + break + } + if y != v_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (OR (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [c|d]) @@ -7998,6 +9360,13 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { v.AuxInt = c | d return true } + return false +} +func rewriteValuePPC64_OpPPC64OR_10(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config // match: (OR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (ORconst [c] x) @@ -8036,6 +9405,29957 @@ func rewriteValuePPC64_OpPPC64OR_0(v *Value) bool { v.AddArg(x) return true } + // match: (OR x0:(MOVBZload [i0] {s} p mem) o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8])) + // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) + // result: @mergePoint(b,x0,x1) (MOVHZload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + x0 := v.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + o1 := v.Args[1] + if o1.Op != OpPPC64SLWconst { + break + } + if o1.AuxInt != 8 { + break + } + x1 := o1.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + if !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) + // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1) + // result: @mergePoint(b,x0,x1) (MOVHZload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o1 := v.Args[0] + if o1.Op != OpPPC64SLWconst { + break + } + if o1.AuxInt != 8 { + break + } + x1 := o1.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + s := x1.Aux + _ = x1.Args[1] + p := x1.Args[0] + mem := x1.Args[1] + x0 := v.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + if !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0) && clobber(x1) && clobber(o1)) { + break + } + b = mergePoint(b, x0, x1) + v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem))) + // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s1 := v.Args[0] + if s1.Op != OpPPC64SLWconst { + break + } + if s1.AuxInt != 24 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i3 := x2.AuxInt + s := x2.Aux + _ = x2.Args[1] + p := x2.Args[0] + mem := x2.Args[1] + o0 := v.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLWconst { + break + } + if s0.AuxInt != 16 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i2 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVHZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + if !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR x0:(MOVHZload [i0] {s} p mem) s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]))) + // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s1 := v.Args[0] + if s1.Op != OpPPC64SLWconst { + break + } + if s1.AuxInt != 24 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i3 := x2.AuxInt + s := x2.Aux + _ = x2.Args[1] + p := x2.Args[0] + mem := x2.Args[1] + o0 := v.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVHZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLWconst { + break + } + if s0.AuxInt != 16 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i2 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + if !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o0:(OR s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)) s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24])) + // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o0 := v.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLWconst { + break + } + if s0.AuxInt != 16 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i2 := x1.AuxInt + s := x1.Aux + _ = x1.Args[1] + p := x1.Args[0] + mem := x1.Args[1] + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVHZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := v.Args[1] + if s1.Op != OpPPC64SLWconst { + break + } + if s1.AuxInt != 24 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i3 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o0:(OR x0:(MOVHZload [i0] {s} p mem) s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16])) s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24])) + // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0) + // result: @mergePoint(b,x0,x1,x2) (MOVWZload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o0 := v.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVHZload { + break + } + i0 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + s0 := o0.Args[1] + if s0.Op != OpPPC64SLWconst { + break + } + if s0.AuxInt != 16 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i2 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := v.Args[1] + if s1.Op != OpPPC64SLWconst { + break + } + if s1.AuxInt != 24 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i3 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(s0) && clobber(s1) && clobber(o0)) { + break + } + b = mergePoint(b, x0, x1, x2) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_20(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_30(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_40(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_50(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_60(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_70(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_80(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR o4:(OR o3:(OR o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]))) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + s6 := v.Args[0] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + s := x7.Aux + _ = x7.Args[1] + p := x7.Args[0] + mem := x7.Args[1] + o5 := v.Args[1] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))))))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))))))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_90(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_100(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_110(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR o3:(OR o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]))) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + s5 := o5.Args[0] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + s := x6.Aux + _ = x6.Args[1] + p := x6.Args[0] + mem := x6.Args[1] + o4 := o5.Args[1] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_120(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]))) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + s4 := o4.Args[0] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + s := x5.Aux + _ = x5.Args[1] + p := x5.Args[0] + mem := x5.Args[1] + o3 := o4.Args[1] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_130(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + s := x4.Aux + _ = x4.Args[1] + p := x4.Args[0] + mem := x4.Args[1] + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + s := x4.Aux + _ = x4.Args[1] + p := x4.Args[0] + mem := x4.Args[1] + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + s := x4.Aux + _ = x4.Args[1] + p := x4.Args[0] + mem := x4.Args[1] + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + s := x4.Aux + _ = x4.Args[1] + p := x4.Args[0] + mem := x4.Args[1] + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + s := x4.Aux + _ = x4.Args[1] + p := x4.Args[0] + mem := x4.Args[1] + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + s := x4.Aux + _ = x4.Args[1] + p := x4.Args[0] + mem := x4.Args[1] + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + s := x4.Aux + _ = x4.Args[1] + p := x4.Args[0] + mem := x4.Args[1] + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]))) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + s3 := o3.Args[0] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + s := x4.Aux + _ = x4.Args[1] + p := x4.Args[0] + mem := x4.Args[1] + o2 := o3.Args[1] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + s := x3.Aux + _ = x3.Args[1] + p := x3.Args[0] + mem := x3.Args[1] + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + s := x3.Aux + _ = x3.Args[1] + p := x3.Args[0] + mem := x3.Args[1] + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValuePPC64_OpPPC64OR_140(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (OR o5:(OR o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + s := x3.Aux + _ = x3.Args[1] + p := x3.Args[0] + mem := x3.Args[1] + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR o2:(OR s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24]) o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]))) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + s2 := o2.Args[0] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + s := x3.Aux + _ = x3.Args[1] + p := x3.Args[0] + mem := x3.Args[1] + o1 := o2.Args[1] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + s := x2.Aux + _ = x2.Args[1] + p := x2.Args[0] + mem := x2.Args[1] + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR o2:(OR o1:(OR s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16]) o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + s1 := o1.Args[0] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + s := x2.Aux + _ = x2.Args[1] + p := x2.Args[0] + mem := x2.Args[1] + o0 := o1.Args[1] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR o2:(OR o1:(OR o0:(OR s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]) x0:(MOVBZload [i0] {s} p mem)) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + s0 := o0.Args[0] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + s := x1.Aux + _ = x1.Args[1] + p := x1.Args[0] + mem := x1.Args[1] + x0 := o0.Args[1] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + if x0.Aux != s { + break + } + _ = x0.Args[1] + if p != x0.Args[0] { + break + } + if mem != x0.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } + // match: (OR o5:(OR o4:(OR o3:(OR o2:(OR o1:(OR o0:(OR x0:(MOVBZload [i0] {s} p mem) s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8])) s1:(SLDconst x2:(MOVBZload [i2] {s} p mem) [16])) s2:(SLDconst x3:(MOVBZload [i3] {s} p mem) [24])) s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])) s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])) s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])) s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])) + // cond: !config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber (s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5) + // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload {s} [i0] p mem) + for { + t := v.Type + _ = v.Args[1] + o5 := v.Args[0] + if o5.Op != OpPPC64OR { + break + } + if o5.Type != t { + break + } + _ = o5.Args[1] + o4 := o5.Args[0] + if o4.Op != OpPPC64OR { + break + } + if o4.Type != t { + break + } + _ = o4.Args[1] + o3 := o4.Args[0] + if o3.Op != OpPPC64OR { + break + } + if o3.Type != t { + break + } + _ = o3.Args[1] + o2 := o3.Args[0] + if o2.Op != OpPPC64OR { + break + } + if o2.Type != t { + break + } + _ = o2.Args[1] + o1 := o2.Args[0] + if o1.Op != OpPPC64OR { + break + } + if o1.Type != t { + break + } + _ = o1.Args[1] + o0 := o1.Args[0] + if o0.Op != OpPPC64OR { + break + } + if o0.Type != t { + break + } + _ = o0.Args[1] + x0 := o0.Args[0] + if x0.Op != OpPPC64MOVBZload { + break + } + i0 := x0.AuxInt + s := x0.Aux + _ = x0.Args[1] + p := x0.Args[0] + mem := x0.Args[1] + s0 := o0.Args[1] + if s0.Op != OpPPC64SLDconst { + break + } + if s0.AuxInt != 8 { + break + } + x1 := s0.Args[0] + if x1.Op != OpPPC64MOVBZload { + break + } + i1 := x1.AuxInt + if x1.Aux != s { + break + } + _ = x1.Args[1] + if p != x1.Args[0] { + break + } + if mem != x1.Args[1] { + break + } + s1 := o1.Args[1] + if s1.Op != OpPPC64SLDconst { + break + } + if s1.AuxInt != 16 { + break + } + x2 := s1.Args[0] + if x2.Op != OpPPC64MOVBZload { + break + } + i2 := x2.AuxInt + if x2.Aux != s { + break + } + _ = x2.Args[1] + if p != x2.Args[0] { + break + } + if mem != x2.Args[1] { + break + } + s2 := o2.Args[1] + if s2.Op != OpPPC64SLDconst { + break + } + if s2.AuxInt != 24 { + break + } + x3 := s2.Args[0] + if x3.Op != OpPPC64MOVBZload { + break + } + i3 := x3.AuxInt + if x3.Aux != s { + break + } + _ = x3.Args[1] + if p != x3.Args[0] { + break + } + if mem != x3.Args[1] { + break + } + s3 := o3.Args[1] + if s3.Op != OpPPC64SLDconst { + break + } + if s3.AuxInt != 32 { + break + } + x4 := s3.Args[0] + if x4.Op != OpPPC64MOVBZload { + break + } + i4 := x4.AuxInt + if x4.Aux != s { + break + } + _ = x4.Args[1] + if p != x4.Args[0] { + break + } + if mem != x4.Args[1] { + break + } + s4 := o4.Args[1] + if s4.Op != OpPPC64SLDconst { + break + } + if s4.AuxInt != 40 { + break + } + x5 := s4.Args[0] + if x5.Op != OpPPC64MOVBZload { + break + } + i5 := x5.AuxInt + if x5.Aux != s { + break + } + _ = x5.Args[1] + if p != x5.Args[0] { + break + } + if mem != x5.Args[1] { + break + } + s5 := o5.Args[1] + if s5.Op != OpPPC64SLDconst { + break + } + if s5.AuxInt != 48 { + break + } + x6 := s5.Args[0] + if x6.Op != OpPPC64MOVBZload { + break + } + i6 := x6.AuxInt + if x6.Aux != s { + break + } + _ = x6.Args[1] + if p != x6.Args[0] { + break + } + if mem != x6.Args[1] { + break + } + s6 := v.Args[1] + if s6.Op != OpPPC64SLDconst { + break + } + if s6.AuxInt != 56 { + break + } + x7 := s6.Args[0] + if x7.Op != OpPPC64MOVBZload { + break + } + i7 := x7.AuxInt + if x7.Aux != s { + break + } + _ = x7.Args[1] + if p != x7.Args[0] { + break + } + if mem != x7.Args[1] { + break + } + if !(!config.BigEndian && i0%4 == 0 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0) && clobber(x1) && clobber(x2) && clobber(x3) && clobber(x4) && clobber(x5) && clobber(x6) && clobber(x7) && clobber(s0) && clobber(s1) && clobber(s2) && clobber(s3) && clobber(s4) && clobber(s5) && clobber(s6) && clobber(o0) && clobber(o1) && clobber(o2) && clobber(o3) && clobber(o4) && clobber(o5)) { + break + } + b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = i0 + v0.Aux = s + v0.AddArg(p) + v0.AddArg(mem) + return true + } return false } func rewriteValuePPC64_OpPPC64ORN_0(v *Value) bool { @@ -8125,6 +39445,10 @@ func rewriteValuePPC64_OpPPC64SUB_0(v *Value) bool { return false } func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (XOR (SLDconst x [c]) (SRDconst x [d])) // cond: d == 64-c // result: (ROTLconst [c] x) @@ -8233,6 +39557,258 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { v.AddArg(x) return true } + // match: (XOR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) + // cond: + // result: (ROTL x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SLD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst { + break + } + if v_0_1.Type != typ.Int64 { + break + } + if v_0_1.AuxInt != 63 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRD { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 64 { + break + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1_1.Type != typ.UInt { + break + } + if v_1_1_1.AuxInt != 63 { + break + } + if y != v_1_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (XOR (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) (SLD x (ANDconst [63] y))) + // cond: + // result: (ROTL x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRD { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64SUB { + break + } + if v_0_1.Type != typ.UInt { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64MOVDconst { + break + } + if v_0_1_0.AuxInt != 64 { + break + } + v_0_1_1 := v_0_1.Args[1] + if v_0_1_1.Op != OpPPC64ANDconst { + break + } + if v_0_1_1.Type != typ.UInt { + break + } + if v_0_1_1.AuxInt != 63 { + break + } + y := v_0_1_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SLD { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.Int64 { + break + } + if v_1_1.AuxInt != 63 { + break + } + if y != v_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTL) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) + // cond: + // result: (ROTLW x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SLW { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64ANDconst { + break + } + if v_0_1.Type != typ.Int32 { + break + } + if v_0_1.AuxInt != 31 { + break + } + y := v_0_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SRW { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64SUB { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 32 { + break + } + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1_1.Type != typ.UInt { + break + } + if v_1_1_1.AuxInt != 31 { + break + } + if y != v_1_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true + } + // match: (XOR (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) (SLW x (ANDconst [31] y))) + // cond: + // result: (ROTLW x y) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpPPC64SRW { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpPPC64SUB { + break + } + if v_0_1.Type != typ.UInt { + break + } + _ = v_0_1.Args[1] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64MOVDconst { + break + } + if v_0_1_0.AuxInt != 32 { + break + } + v_0_1_1 := v_0_1.Args[1] + if v_0_1_1.Op != OpPPC64ANDconst { + break + } + if v_0_1_1.Type != typ.UInt { + break + } + if v_0_1_1.AuxInt != 31 { + break + } + y := v_0_1_1.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SLW { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.Int32 { + break + } + if v_1_1.AuxInt != 31 { + break + } + if y != v_1_1.Args[0] { + break + } + v.reset(OpPPC64ROTLW) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (XOR (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [c^d]) @@ -8271,6 +39847,9 @@ func rewriteValuePPC64_OpPPC64XOR_0(v *Value) bool { v.AuxInt = c ^ d return true } + return false +} +func rewriteValuePPC64_OpPPC64XOR_10(v *Value) bool { // match: (XOR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (XORconst [c] x) @@ -9023,6 +40602,230 @@ func rewriteValuePPC64_OpRsh32Ux64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Rsh32Ux64 x (AND y (MOVDconst [31]))) + // cond: + // result: (SRW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1.AuxInt != 31 { + break + } + v.reset(OpPPC64SRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh32Ux64 x (AND (MOVDconst [31]) y)) + // cond: + // result: (SRW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 31 { + break + } + y := v_1.Args[1] + v.reset(OpPPC64SRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh32Ux64 x (ANDconst [31] y)) + // cond: + // result: (SRW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64ANDconst { + break + } + if v_1.Type != typ.UInt { + break + } + if v_1.AuxInt != 31 { + break + } + y := v_1.Args[0] + v.reset(OpPPC64SRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) + // cond: + // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 32 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.UInt { + break + } + if v_1_1.AuxInt != 31 { + break + } + y := v_1_1.Args[0] + v.reset(OpPPC64SRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 32 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 31 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } + // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) + // cond: + // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 32 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64AND { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + y := v_1_1.Args[0] + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1_1.AuxInt != 31 { + break + } + v.reset(OpPPC64SRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 32 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 31 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } + // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND (MOVDconst [31]) y))) + // cond: + // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 32 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64AND { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 31 { + break + } + y := v_1_1.Args[1] + v.reset(OpPPC64SRW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 32 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 31 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } // match: (Rsh32Ux64 x y) // cond: // result: (SRW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) @@ -9228,6 +41031,230 @@ func rewriteValuePPC64_OpRsh32x64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Rsh32x64 x (AND y (MOVDconst [31]))) + // cond: + // result: (SRAW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1.AuxInt != 31 { + break + } + v.reset(OpPPC64SRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh32x64 x (AND (MOVDconst [31]) y)) + // cond: + // result: (SRAW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 31 { + break + } + y := v_1.Args[1] + v.reset(OpPPC64SRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh32x64 x (ANDconst [31] y)) + // cond: + // result: (SRAW x (ANDconst [31] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64ANDconst { + break + } + if v_1.Type != typ.UInt { + break + } + if v_1.AuxInt != 31 { + break + } + y := v_1.Args[0] + v.reset(OpPPC64SRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v0.AuxInt = 31 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh32x64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) + // cond: + // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 32 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.UInt { + break + } + if v_1_1.AuxInt != 31 { + break + } + y := v_1_1.Args[0] + v.reset(OpPPC64SRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 32 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 31 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } + // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) + // cond: + // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 32 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64AND { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + y := v_1_1.Args[0] + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1_1.AuxInt != 31 { + break + } + v.reset(OpPPC64SRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 32 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 31 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } + // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND (MOVDconst [31]) y))) + // cond: + // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 32 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64AND { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 31 { + break + } + y := v_1_1.Args[1] + v.reset(OpPPC64SRAW) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 32 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 31 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } // match: (Rsh32x64 x y) // cond: // result: (SRAW x (ORN y (MaskIfNotCarry (ADDconstForCarry [-32] y)))) @@ -9431,6 +41458,230 @@ func rewriteValuePPC64_OpRsh64Ux64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Rsh64Ux64 x (AND y (MOVDconst [63]))) + // cond: + // result: (SRD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1.AuxInt != 63 { + break + } + v.reset(OpPPC64SRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh64Ux64 x (AND (MOVDconst [63]) y)) + // cond: + // result: (SRD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 63 { + break + } + y := v_1.Args[1] + v.reset(OpPPC64SRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh64Ux64 x (ANDconst [63] y)) + // cond: + // result: (SRD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64ANDconst { + break + } + if v_1.Type != typ.UInt { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpPPC64SRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) + // cond: + // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 64 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.UInt { + break + } + if v_1_1.AuxInt != 63 { + break + } + y := v_1_1.Args[0] + v.reset(OpPPC64SRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 64 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 63 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } + // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) + // cond: + // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 64 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64AND { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + y := v_1_1.Args[0] + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1_1.AuxInt != 63 { + break + } + v.reset(OpPPC64SRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 64 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 63 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } + // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND (MOVDconst [63]) y))) + // cond: + // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 64 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64AND { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 63 { + break + } + y := v_1_1.Args[1] + v.reset(OpPPC64SRD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 64 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 63 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } // match: (Rsh64Ux64 x y) // cond: // result: (SRD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) @@ -9636,6 +41887,230 @@ func rewriteValuePPC64_OpRsh64x64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Rsh64x64 x (AND y (MOVDconst [63]))) + // cond: + // result: (SRAD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1.AuxInt != 63 { + break + } + v.reset(OpPPC64SRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh64x64 x (AND (MOVDconst [63]) y)) + // cond: + // result: (SRAD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64AND { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 63 { + break + } + y := v_1.Args[1] + v.reset(OpPPC64SRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh64x64 x (ANDconst [63] y)) + // cond: + // result: (SRAD x (ANDconst [63] y)) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64ANDconst { + break + } + if v_1.Type != typ.UInt { + break + } + if v_1.AuxInt != 63 { + break + } + y := v_1.Args[0] + v.reset(OpPPC64SRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v0.AuxInt = 63 + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (Rsh64x64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) + // cond: + // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 64 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64ANDconst { + break + } + if v_1_1.Type != typ.UInt { + break + } + if v_1_1.AuxInt != 63 { + break + } + y := v_1_1.Args[0] + v.reset(OpPPC64SRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 64 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 63 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } + // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) + // cond: + // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 64 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64AND { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + y := v_1_1.Args[0] + v_1_1_1 := v_1_1.Args[1] + if v_1_1_1.Op != OpPPC64MOVDconst { + break + } + if v_1_1_1.AuxInt != 63 { + break + } + v.reset(OpPPC64SRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 64 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 63 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } + // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND (MOVDconst [63]) y))) + // cond: + // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + for { + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpPPC64SUB { + break + } + if v_1.Type != typ.UInt { + break + } + _ = v_1.Args[1] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_0.AuxInt != 64 { + break + } + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpPPC64AND { + break + } + if v_1_1.Type != typ.UInt { + break + } + _ = v_1_1.Args[1] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64MOVDconst { + break + } + if v_1_1_0.AuxInt != 63 { + break + } + y := v_1_1.Args[1] + v.reset(OpPPC64SRAD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) + v1.AuxInt = 64 + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) + v2.AuxInt = 63 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } // match: (Rsh64x64 x y) // cond: // result: (SRAD x (ORN y (MaskIfNotCarry (ADDconstForCarry [-64] y)))) @@ -10463,6 +42938,17 @@ func rewriteValuePPC64_OpSubPtr_0(v *Value) bool { return true } } +func rewriteValuePPC64_OpTrunc_0(v *Value) bool { + // match: (Trunc x) + // cond: + // result: (FTRUNC x) + for { + x := v.Args[0] + v.reset(OpPPC64FTRUNC) + v.AddArg(x) + return true + } +} func rewriteValuePPC64_OpTrunc16to8_0(v *Value) bool { // match: (Trunc16to8 x) // cond: @@ -10991,6 +43477,7 @@ func rewriteBlockPPC64(b *Block) bool { v0.AuxInt = c v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -11015,6 +43502,7 @@ func rewriteBlockPPC64(b *Block) bool { v0.AuxInt = c v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (EQ (FlagEQ) yes no) @@ -11027,6 +43515,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (EQ (FlagLT) yes no) @@ -11039,6 +43528,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11052,6 +43542,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11066,6 +43557,7 @@ func rewriteBlockPPC64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockPPC64EQ b.SetControl(cmp) + b.Aux = nil return true } case BlockPPC64GE: @@ -11079,6 +43571,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (FlagLT) yes no) @@ -11091,6 +43584,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11104,6 +43598,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (InvertFlags cmp) yes no) @@ -11117,6 +43612,7 @@ func rewriteBlockPPC64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockPPC64LE b.SetControl(cmp) + b.Aux = nil return true } case BlockPPC64GT: @@ -11130,6 +43626,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11143,6 +43640,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11156,6 +43654,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GT (InvertFlags cmp) yes no) @@ -11169,6 +43668,7 @@ func rewriteBlockPPC64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockPPC64LT b.SetControl(cmp) + b.Aux = nil return true } case BlockIf: @@ -11183,6 +43683,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64EQ b.SetControl(cc) + b.Aux = nil return true } // match: (If (NotEqual cc) yes no) @@ -11196,6 +43697,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64NE b.SetControl(cc) + b.Aux = nil return true } // match: (If (LessThan cc) yes no) @@ -11209,6 +43711,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64LT b.SetControl(cc) + b.Aux = nil return true } // match: (If (LessEqual cc) yes no) @@ -11222,6 +43725,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64LE b.SetControl(cc) + b.Aux = nil return true } // match: (If (GreaterThan cc) yes no) @@ -11235,6 +43739,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64GT b.SetControl(cc) + b.Aux = nil return true } // match: (If (GreaterEqual cc) yes no) @@ -11248,6 +43753,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64GE b.SetControl(cc) + b.Aux = nil return true } // match: (If (FLessThan cc) yes no) @@ -11261,6 +43767,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64FLT b.SetControl(cc) + b.Aux = nil return true } // match: (If (FLessEqual cc) yes no) @@ -11274,6 +43781,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64FLE b.SetControl(cc) + b.Aux = nil return true } // match: (If (FGreaterThan cc) yes no) @@ -11287,6 +43795,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64FGT b.SetControl(cc) + b.Aux = nil return true } // match: (If (FGreaterEqual cc) yes no) @@ -11300,6 +43809,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v.Args[0] b.Kind = BlockPPC64FGE b.SetControl(cc) + b.Aux = nil return true } // match: (If cond yes no) @@ -11314,6 +43824,7 @@ func rewriteBlockPPC64(b *Block) bool { v0.AuxInt = 0 v0.AddArg(cond) b.SetControl(v0) + b.Aux = nil return true } case BlockPPC64LE: @@ -11327,6 +43838,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagLT) yes no) @@ -11339,6 +43851,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagGT) yes no) @@ -11351,6 +43864,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11365,6 +43879,7 @@ func rewriteBlockPPC64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockPPC64GE b.SetControl(cmp) + b.Aux = nil return true } case BlockPPC64LT: @@ -11378,6 +43893,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11391,6 +43907,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LT (FlagGT) yes no) @@ -11403,6 +43920,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11417,6 +43935,7 @@ func rewriteBlockPPC64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockPPC64GT b.SetControl(cmp) + b.Aux = nil return true } case BlockPPC64NE: @@ -11438,6 +43957,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64EQ b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (NotEqual cc)) yes no) @@ -11458,6 +43978,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64NE b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (LessThan cc)) yes no) @@ -11478,6 +43999,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64LT b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (LessEqual cc)) yes no) @@ -11498,6 +44020,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64LE b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (GreaterThan cc)) yes no) @@ -11518,6 +44041,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64GT b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (GreaterEqual cc)) yes no) @@ -11538,6 +44062,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64GE b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (FLessThan cc)) yes no) @@ -11558,6 +44083,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64FLT b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (FLessEqual cc)) yes no) @@ -11578,6 +44104,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64FLE b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (FGreaterThan cc)) yes no) @@ -11598,6 +44125,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64FGT b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (FGreaterEqual cc)) yes no) @@ -11618,6 +44146,7 @@ func rewriteBlockPPC64(b *Block) bool { cc := v_0.Args[0] b.Kind = BlockPPC64FGE b.SetControl(cc) + b.Aux = nil return true } // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) @@ -11642,6 +44171,7 @@ func rewriteBlockPPC64(b *Block) bool { v0.AuxInt = c v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no) @@ -11666,6 +44196,7 @@ func rewriteBlockPPC64(b *Block) bool { v0.AuxInt = c v0.AddArg(x) b.SetControl(v0) + b.Aux = nil return true } // match: (NE (FlagEQ) yes no) @@ -11678,6 +44209,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -11691,6 +44223,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagGT) yes no) @@ -11703,6 +44236,7 @@ func rewriteBlockPPC64(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (InvertFlags cmp) yes no) @@ -11716,6 +44250,7 @@ func rewriteBlockPPC64(b *Block) bool { cmp := v.Args[0] b.Kind = BlockPPC64NE b.SetControl(cmp) + b.Aux = nil return true } } diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index e84cb5b10c7..fe9b2bd001b 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -73,6 +73,8 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpBswap32_0(v) case OpBswap64: return rewriteValueS390X_OpBswap64_0(v) + case OpCeil: + return rewriteValueS390X_OpCeil_0(v) case OpClosureCall: return rewriteValueS390X_OpClosureCall_0(v) case OpCom16: @@ -161,6 +163,8 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpEqB_0(v) case OpEqPtr: return rewriteValueS390X_OpEqPtr_0(v) + case OpFloor: + return rewriteValueS390X_OpFloor_0(v) case OpGeq16: return rewriteValueS390X_OpGeq16_0(v) case OpGeq16U: @@ -181,6 +185,8 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpGeq8_0(v) case OpGeq8U: return rewriteValueS390X_OpGeq8U_0(v) + case OpGetCallerSP: + return rewriteValueS390X_OpGetCallerSP_0(v) case OpGetClosurePtr: return rewriteValueS390X_OpGetClosurePtr_0(v) case OpGetG: @@ -371,10 +377,14 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpOr8_0(v) case OpOrB: return rewriteValueS390X_OpOrB_0(v) + case OpRound: + return rewriteValueS390X_OpRound_0(v) case OpRound32F: return rewriteValueS390X_OpRound32F_0(v) case OpRound64F: return rewriteValueS390X_OpRound64F_0(v) + case OpRoundToEven: + return rewriteValueS390X_OpRoundToEven_0(v) case OpRsh16Ux16: return rewriteValueS390X_OpRsh16Ux16_0(v) case OpRsh16Ux32: @@ -445,16 +455,24 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XADDW_0(v) || rewriteValueS390X_OpS390XADDW_10(v) case OpS390XADDWconst: return rewriteValueS390X_OpS390XADDWconst_0(v) + case OpS390XADDWload: + return rewriteValueS390X_OpS390XADDWload_0(v) case OpS390XADDconst: return rewriteValueS390X_OpS390XADDconst_0(v) + case OpS390XADDload: + return rewriteValueS390X_OpS390XADDload_0(v) case OpS390XAND: return rewriteValueS390X_OpS390XAND_0(v) || rewriteValueS390X_OpS390XAND_10(v) case OpS390XANDW: return rewriteValueS390X_OpS390XANDW_0(v) || rewriteValueS390X_OpS390XANDW_10(v) case OpS390XANDWconst: return rewriteValueS390X_OpS390XANDWconst_0(v) + case OpS390XANDWload: + return rewriteValueS390X_OpS390XANDWload_0(v) case OpS390XANDconst: return rewriteValueS390X_OpS390XANDconst_0(v) + case OpS390XANDload: + return rewriteValueS390X_OpS390XANDload_0(v) case OpS390XCMP: return rewriteValueS390X_OpS390XCMP_0(v) case OpS390XCMPU: @@ -471,6 +489,8 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XCMPWconst_0(v) case OpS390XCMPconst: return rewriteValueS390X_OpS390XCMPconst_0(v) + case OpS390XCPSDR: + return rewriteValueS390X_OpS390XCPSDR_0(v) case OpS390XFADD: return rewriteValueS390X_OpS390XFADD_0(v) case OpS390XFADDS: @@ -491,10 +511,20 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XFMOVSstore_0(v) case OpS390XFMOVSstoreidx: return rewriteValueS390X_OpS390XFMOVSstoreidx_0(v) + case OpS390XFNEG: + return rewriteValueS390X_OpS390XFNEG_0(v) + case OpS390XFNEGS: + return rewriteValueS390X_OpS390XFNEGS_0(v) case OpS390XFSUB: return rewriteValueS390X_OpS390XFSUB_0(v) case OpS390XFSUBS: return rewriteValueS390X_OpS390XFSUBS_0(v) + case OpS390XLDGR: + return rewriteValueS390X_OpS390XLDGR_0(v) + case OpS390XLEDBR: + return rewriteValueS390X_OpS390XLEDBR_0(v) + case OpS390XLGDR: + return rewriteValueS390X_OpS390XLGDR_0(v) case OpS390XLoweredRound32F: return rewriteValueS390X_OpS390XLoweredRound32F_0(v) case OpS390XLoweredRound64F: @@ -507,6 +537,8 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XMOVBZreg_0(v) || rewriteValueS390X_OpS390XMOVBZreg_10(v) case OpS390XMOVBload: return rewriteValueS390X_OpS390XMOVBload_0(v) + case OpS390XMOVBloadidx: + return rewriteValueS390X_OpS390XMOVBloadidx_0(v) case OpS390XMOVBreg: return rewriteValueS390X_OpS390XMOVBreg_0(v) case OpS390XMOVBstore: @@ -555,8 +587,10 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XMOVHZreg_0(v) case OpS390XMOVHload: return rewriteValueS390X_OpS390XMOVHload_0(v) + case OpS390XMOVHloadidx: + return rewriteValueS390X_OpS390XMOVHloadidx_0(v) case OpS390XMOVHreg: - return rewriteValueS390X_OpS390XMOVHreg_0(v) + return rewriteValueS390X_OpS390XMOVHreg_0(v) || rewriteValueS390X_OpS390XMOVHreg_10(v) case OpS390XMOVHstore: return rewriteValueS390X_OpS390XMOVHstore_0(v) || rewriteValueS390X_OpS390XMOVHstore_10(v) case OpS390XMOVHstoreconst: @@ -572,9 +606,11 @@ func rewriteValueS390X(v *Value) bool { case OpS390XMOVWZloadidx: return rewriteValueS390X_OpS390XMOVWZloadidx_0(v) case OpS390XMOVWZreg: - return rewriteValueS390X_OpS390XMOVWZreg_0(v) + return rewriteValueS390X_OpS390XMOVWZreg_0(v) || rewriteValueS390X_OpS390XMOVWZreg_10(v) case OpS390XMOVWload: return rewriteValueS390X_OpS390XMOVWload_0(v) + case OpS390XMOVWloadidx: + return rewriteValueS390X_OpS390XMOVWloadidx_0(v) case OpS390XMOVWreg: return rewriteValueS390X_OpS390XMOVWreg_0(v) || rewriteValueS390X_OpS390XMOVWreg_10(v) case OpS390XMOVWstore: @@ -587,10 +623,14 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XMULLD_0(v) case OpS390XMULLDconst: return rewriteValueS390X_OpS390XMULLDconst_0(v) + case OpS390XMULLDload: + return rewriteValueS390X_OpS390XMULLDload_0(v) case OpS390XMULLW: return rewriteValueS390X_OpS390XMULLW_0(v) case OpS390XMULLWconst: return rewriteValueS390X_OpS390XMULLWconst_0(v) + case OpS390XMULLWload: + return rewriteValueS390X_OpS390XMULLWload_0(v) case OpS390XNEG: return rewriteValueS390X_OpS390XNEG_0(v) case OpS390XNEGW: @@ -605,8 +645,12 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XORW_0(v) || rewriteValueS390X_OpS390XORW_10(v) || rewriteValueS390X_OpS390XORW_20(v) || rewriteValueS390X_OpS390XORW_30(v) || rewriteValueS390X_OpS390XORW_40(v) || rewriteValueS390X_OpS390XORW_50(v) || rewriteValueS390X_OpS390XORW_60(v) || rewriteValueS390X_OpS390XORW_70(v) || rewriteValueS390X_OpS390XORW_80(v) || rewriteValueS390X_OpS390XORW_90(v) case OpS390XORWconst: return rewriteValueS390X_OpS390XORWconst_0(v) + case OpS390XORWload: + return rewriteValueS390X_OpS390XORWload_0(v) case OpS390XORconst: return rewriteValueS390X_OpS390XORconst_0(v) + case OpS390XORload: + return rewriteValueS390X_OpS390XORload_0(v) case OpS390XSLD: return rewriteValueS390X_OpS390XSLD_0(v) case OpS390XSLW: @@ -621,6 +665,8 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XSRAWconst_0(v) case OpS390XSRD: return rewriteValueS390X_OpS390XSRD_0(v) + case OpS390XSRDconst: + return rewriteValueS390X_OpS390XSRDconst_0(v) case OpS390XSRW: return rewriteValueS390X_OpS390XSRW_0(v) case OpS390XSTM2: @@ -637,16 +683,24 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpS390XSUBW_0(v) case OpS390XSUBWconst: return rewriteValueS390X_OpS390XSUBWconst_0(v) + case OpS390XSUBWload: + return rewriteValueS390X_OpS390XSUBWload_0(v) case OpS390XSUBconst: return rewriteValueS390X_OpS390XSUBconst_0(v) + case OpS390XSUBload: + return rewriteValueS390X_OpS390XSUBload_0(v) case OpS390XXOR: return rewriteValueS390X_OpS390XXOR_0(v) || rewriteValueS390X_OpS390XXOR_10(v) case OpS390XXORW: return rewriteValueS390X_OpS390XXORW_0(v) || rewriteValueS390X_OpS390XXORW_10(v) case OpS390XXORWconst: return rewriteValueS390X_OpS390XXORWconst_0(v) + case OpS390XXORWload: + return rewriteValueS390X_OpS390XXORWload_0(v) case OpS390XXORconst: return rewriteValueS390X_OpS390XXORconst_0(v) + case OpS390XXORload: + return rewriteValueS390X_OpS390XXORload_0(v) case OpSelect0: return rewriteValueS390X_OpSelect0_0(v) case OpSelect1: @@ -685,6 +739,8 @@ func rewriteValueS390X(v *Value) bool { return rewriteValueS390X_OpSub8_0(v) case OpSubPtr: return rewriteValueS390X_OpSubPtr_0(v) + case OpTrunc: + return rewriteValueS390X_OpTrunc_0(v) case OpTrunc16to8: return rewriteValueS390X_OpTrunc16to8_0(v) case OpTrunc32to16: @@ -1172,6 +1228,18 @@ func rewriteValueS390X_OpBswap64_0(v *Value) bool { return true } } +func rewriteValueS390X_OpCeil_0(v *Value) bool { + // match: (Ceil x) + // cond: + // result: (FIDBR [6] x) + for { + x := v.Args[0] + v.reset(OpS390XFIDBR) + v.AuxInt = 6 + v.AddArg(x) + return true + } +} func rewriteValueS390X_OpClosureCall_0(v *Value) bool { // match: (ClosureCall [argwid] entry closure mem) // cond: @@ -1911,6 +1979,18 @@ func rewriteValueS390X_OpEqPtr_0(v *Value) bool { return true } } +func rewriteValueS390X_OpFloor_0(v *Value) bool { + // match: (Floor x) + // cond: + // result: (FIDBR [7] x) + for { + x := v.Args[0] + v.reset(OpS390XFIDBR) + v.AuxInt = 7 + v.AddArg(x) + return true + } +} func rewriteValueS390X_OpGeq16_0(v *Value) bool { b := v.Block _ = b @@ -2187,6 +2267,15 @@ func rewriteValueS390X_OpGeq8U_0(v *Value) bool { return true } } +func rewriteValueS390X_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) + // cond: + // result: (LoweredGetCallerSP) + for { + v.reset(OpS390XLoweredGetCallerSP) + return true + } +} func rewriteValueS390X_OpGetClosurePtr_0(v *Value) bool { // match: (GetClosurePtr) // cond: @@ -4913,6 +5002,18 @@ func rewriteValueS390X_OpOrB_0(v *Value) bool { return true } } +func rewriteValueS390X_OpRound_0(v *Value) bool { + // match: (Round x) + // cond: + // result: (FIDBR [1] x) + for { + x := v.Args[0] + v.reset(OpS390XFIDBR) + v.AuxInt = 1 + v.AddArg(x) + return true + } +} func rewriteValueS390X_OpRound32F_0(v *Value) bool { // match: (Round32F x) // cond: @@ -4935,6 +5036,18 @@ func rewriteValueS390X_OpRound64F_0(v *Value) bool { return true } } +func rewriteValueS390X_OpRoundToEven_0(v *Value) bool { + // match: (RoundToEven x) + // cond: + // result: (FIDBR [4] x) + for { + x := v.Args[0] + v.reset(OpS390XFIDBR) + v.AuxInt = 4 + v.AddArg(x) + return true + } +} func rewriteValueS390X_OpRsh16Ux16_0(v *Value) bool { b := v.Block _ = b @@ -6186,7 +6299,7 @@ func rewriteValueS390X_OpS390XADD_10(v *Value) bool { func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { // match: (ADDW x (MOVDconst [c])) // cond: - // result: (ADDWconst [c] x) + // result: (ADDWconst [int64(int32(c))] x) for { _ = v.Args[1] x := v.Args[0] @@ -6196,13 +6309,13 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpS390XADDWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (ADDW (MOVDconst [c]) x) // cond: - // result: (ADDWconst [c] x) + // result: (ADDWconst [int64(int32(c))] x) for { _ = v.Args[1] v_0 := v.Args[0] @@ -6212,7 +6325,7 @@ func rewriteValueS390X_OpS390XADDW_0(v *Value) bool { c := v_0.AuxInt x := v.Args[1] v.reset(OpS390XADDWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } @@ -6578,6 +6691,62 @@ func rewriteValueS390X_OpS390XADDWconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XADDWload_0(v *Value) bool { + // match: (ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (ADDWload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XADDWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XADDWload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { // match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB))) // cond: ((c+d)&1 == 0) && is32Bit(c+d) @@ -6696,6 +6865,97 @@ func rewriteValueS390X_OpS390XADDconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XADDload_0(v *Value) bool { + b := v.Block + _ = b + // match: (ADDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (ADD x (LGDR y)) + for { + t := v.Type + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr1 := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFMOVDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + y := v_2.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XADD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (ADDload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XADDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XADDload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XAND_0(v *Value) bool { // match: (AND x (MOVDconst [c])) // cond: is32Bit(c) && c < 0 @@ -7009,7 +7269,7 @@ func rewriteValueS390X_OpS390XAND_10(v *Value) bool { func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { // match: (ANDW x (MOVDconst [c])) // cond: - // result: (ANDWconst [c] x) + // result: (ANDWconst [int64(int32(c))] x) for { _ = v.Args[1] x := v.Args[0] @@ -7019,13 +7279,13 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpS390XANDWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (ANDW (MOVDconst [c]) x) // cond: - // result: (ANDWconst [c] x) + // result: (ANDWconst [int64(int32(c))] x) for { _ = v.Args[1] v_0 := v.Args[0] @@ -7035,7 +7295,7 @@ func rewriteValueS390X_OpS390XANDW_0(v *Value) bool { c := v_0.AuxInt x := v.Args[1] v.reset(OpS390XANDWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } @@ -7365,6 +7625,62 @@ func rewriteValueS390X_OpS390XANDWconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XANDWload_0(v *Value) bool { + // match: (ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (ANDWload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XANDWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XANDWload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XANDconst_0(v *Value) bool { // match: (ANDconst [c] (ANDconst [d] x)) // cond: @@ -7422,6 +7738,97 @@ func rewriteValueS390X_OpS390XANDconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XANDload_0(v *Value) bool { + b := v.Block + _ = b + // match: (ANDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (AND x (LGDR y)) + for { + t := v.Type + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr1 := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFMOVDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + y := v_2.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XAND) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (ANDload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XANDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XANDload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XCMP_0(v *Value) bool { b := v.Block _ = b @@ -7472,7 +7879,7 @@ func rewriteValueS390X_OpS390XCMPU_0(v *Value) bool { _ = b // match: (CMPU x (MOVDconst [c])) // cond: isU32Bit(c) - // result: (CMPUconst x [int64(uint32(c))]) + // result: (CMPUconst x [int64(int32(c))]) for { _ = v.Args[1] x := v.Args[0] @@ -7485,13 +7892,13 @@ func rewriteValueS390X_OpS390XCMPU_0(v *Value) bool { break } v.reset(OpS390XCMPUconst) - v.AuxInt = int64(uint32(c)) + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (CMPU (MOVDconst [c]) x) // cond: isU32Bit(c) - // result: (InvertFlags (CMPUconst x [int64(uint32(c))])) + // result: (InvertFlags (CMPUconst x [int64(int32(c))])) for { _ = v.Args[1] v_0 := v.Args[0] @@ -7505,7 +7912,7 @@ func rewriteValueS390X_OpS390XCMPU_0(v *Value) bool { } v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags) - v0.AuxInt = int64(uint32(c)) + v0.AuxInt = int64(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -7568,7 +7975,7 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { _ = b // match: (CMPW x (MOVDconst [c])) // cond: - // result: (CMPWconst x [c]) + // result: (CMPWconst x [int64(int32(c))]) for { _ = v.Args[1] x := v.Args[0] @@ -7578,13 +7985,13 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpS390XCMPWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (CMPW (MOVDconst [c]) x) // cond: - // result: (InvertFlags (CMPWconst x [c])) + // result: (InvertFlags (CMPWconst x [int64(int32(c))])) for { _ = v.Args[1] v_0 := v.Args[0] @@ -7595,7 +8002,7 @@ func rewriteValueS390X_OpS390XCMPW_0(v *Value) bool { x := v.Args[1] v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags) - v0.AuxInt = c + v0.AuxInt = int64(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -7607,7 +8014,7 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { _ = b // match: (CMPWU x (MOVDconst [c])) // cond: - // result: (CMPWUconst x [int64(uint32(c))]) + // result: (CMPWUconst x [int64(int32(c))]) for { _ = v.Args[1] x := v.Args[0] @@ -7617,13 +8024,13 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpS390XCMPWUconst) - v.AuxInt = int64(uint32(c)) + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (CMPWU (MOVDconst [c]) x) // cond: - // result: (InvertFlags (CMPWUconst x [int64(uint32(c))])) + // result: (InvertFlags (CMPWUconst x [int64(int32(c))])) for { _ = v.Args[1] v_0 := v.Args[0] @@ -7634,7 +8041,7 @@ func rewriteValueS390X_OpS390XCMPWU_0(v *Value) bool { x := v.Args[1] v.reset(OpS390XInvertFlags) v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags) - v0.AuxInt = int64(uint32(c)) + v0.AuxInt = int64(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -7903,6 +8310,45 @@ func rewriteValueS390X_OpS390XCMPconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XCPSDR_0(v *Value) bool { + // match: (CPSDR y (FMOVDconst [c])) + // cond: c & -1<<63 == 0 + // result: (LPDFR y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XFMOVDconst { + break + } + c := v_1.AuxInt + if !(c&-1<<63 == 0) { + break + } + v.reset(OpS390XLPDFR) + v.AddArg(y) + return true + } + // match: (CPSDR y (FMOVDconst [c])) + // cond: c & -1<<63 != 0 + // result: (LNDFR y) + for { + _ = v.Args[1] + y := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XFMOVDconst { + break + } + c := v_1.AuxInt + if !(c&-1<<63 != 0) { + break + } + v.reset(OpS390XLNDFR) + v.AddArg(y) + return true + } + return false +} func rewriteValueS390X_OpS390XFADD_0(v *Value) bool { // match: (FADD (FMUL y z) x) // cond: @@ -7986,6 +8432,63 @@ func rewriteValueS390X_OpS390XFADDS_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { + // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (LDGR x) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XLDGR) + v.AddArg(x) + return true + } + // match: (FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XFMOVDstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (FMOVDload [off1+off2] {sym} ptr mem) @@ -8093,7 +8596,7 @@ func rewriteValueS390X_OpS390XFMOVDload_0(v *Value) bool { } func rewriteValueS390X_OpS390XFMOVDloadidx_0(v *Value) bool { // match: (FMOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: + // cond: is20Bit(c+d) // result: (FMOVDloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8107,6 +8610,9 @@ func rewriteValueS390X_OpS390XFMOVDloadidx_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XFMOVDloadidx) v.AuxInt = c + d v.Aux = sym @@ -8116,7 +8622,7 @@ func rewriteValueS390X_OpS390XFMOVDloadidx_0(v *Value) bool { return true } // match: (FMOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: + // cond: is20Bit(c+d) // result: (FMOVDloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8130,6 +8636,9 @@ func rewriteValueS390X_OpS390XFMOVDloadidx_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XFMOVDloadidx) v.AuxInt = c + d v.Aux = sym @@ -8256,7 +8765,7 @@ func rewriteValueS390X_OpS390XFMOVDstore_0(v *Value) bool { } func rewriteValueS390X_OpS390XFMOVDstoreidx_0(v *Value) bool { // match: (FMOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: + // cond: is20Bit(c+d) // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -8271,6 +8780,9 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XFMOVDstoreidx) v.AuxInt = c + d v.Aux = sym @@ -8281,7 +8793,7 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx_0(v *Value) bool { return true } // match: (FMOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: + // cond: is20Bit(c+d) // result: (FMOVDstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -8296,6 +8808,9 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XFMOVDstoreidx) v.AuxInt = c + d v.Aux = sym @@ -8308,6 +8823,35 @@ func rewriteValueS390X_OpS390XFMOVDstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { + // match: (FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: x + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XFMOVSstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (FMOVSload [off1+off2] {sym} ptr mem) @@ -8415,7 +8959,7 @@ func rewriteValueS390X_OpS390XFMOVSload_0(v *Value) bool { } func rewriteValueS390X_OpS390XFMOVSloadidx_0(v *Value) bool { // match: (FMOVSloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: + // cond: is20Bit(c+d) // result: (FMOVSloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8429,6 +8973,9 @@ func rewriteValueS390X_OpS390XFMOVSloadidx_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XFMOVSloadidx) v.AuxInt = c + d v.Aux = sym @@ -8438,7 +8985,7 @@ func rewriteValueS390X_OpS390XFMOVSloadidx_0(v *Value) bool { return true } // match: (FMOVSloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: + // cond: is20Bit(c+d) // result: (FMOVSloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8452,6 +8999,9 @@ func rewriteValueS390X_OpS390XFMOVSloadidx_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XFMOVSloadidx) v.AuxInt = c + d v.Aux = sym @@ -8578,7 +9128,7 @@ func rewriteValueS390X_OpS390XFMOVSstore_0(v *Value) bool { } func rewriteValueS390X_OpS390XFMOVSstoreidx_0(v *Value) bool { // match: (FMOVSstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: + // cond: is20Bit(c+d) // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -8593,6 +9143,9 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XFMOVSstoreidx) v.AuxInt = c + d v.Aux = sym @@ -8603,7 +9156,7 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx_0(v *Value) bool { return true } // match: (FMOVSstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: + // cond: is20Bit(c+d) // result: (FMOVSstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -8618,6 +9171,9 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XFMOVSstoreidx) v.AuxInt = c + d v.Aux = sym @@ -8629,6 +9185,64 @@ func rewriteValueS390X_OpS390XFMOVSstoreidx_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XFNEG_0(v *Value) bool { + // match: (FNEG (LPDFR x)) + // cond: + // result: (LNDFR x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XLPDFR { + break + } + x := v_0.Args[0] + v.reset(OpS390XLNDFR) + v.AddArg(x) + return true + } + // match: (FNEG (LNDFR x)) + // cond: + // result: (LPDFR x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XLNDFR { + break + } + x := v_0.Args[0] + v.reset(OpS390XLPDFR) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XFNEGS_0(v *Value) bool { + // match: (FNEGS (LPDFR x)) + // cond: + // result: (LNDFR x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XLPDFR { + break + } + x := v_0.Args[0] + v.reset(OpS390XLNDFR) + v.AddArg(x) + return true + } + // match: (FNEGS (LNDFR x)) + // cond: + // result: (LPDFR x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XLNDFR { + break + } + x := v_0.Args[0] + v.reset(OpS390XLPDFR) + v.AddArg(x) + return true + } + return false +} func rewriteValueS390X_OpS390XFSUB_0(v *Value) bool { // match: (FSUB (FMUL y z) x) // cond: @@ -8673,6 +9287,191 @@ func rewriteValueS390X_OpS390XFSUBS_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XLDGR_0(v *Value) bool { + b := v.Block + _ = b + // match: (LDGR (SRDconst [1] (SLDconst [1] x))) + // cond: + // result: (LPDFR (LDGR x)) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpS390XSRDconst { + break + } + if v_0.AuxInt != 1 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XSLDconst { + break + } + if v_0_0.AuxInt != 1 { + break + } + x := v_0_0.Args[0] + v.reset(OpS390XLPDFR) + v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (LDGR (OR (MOVDconst [-1<<63]) x)) + // cond: + // result: (LNDFR (LDGR x)) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpS390XOR { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XMOVDconst { + break + } + if v_0_0.AuxInt != -1<<63 { + break + } + x := v_0.Args[1] + v.reset(OpS390XLNDFR) + v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (LDGR (OR x (MOVDconst [-1<<63]))) + // cond: + // result: (LNDFR (LDGR x)) + for { + t := v.Type + v_0 := v.Args[0] + if v_0.Op != OpS390XOR { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpS390XMOVDconst { + break + } + if v_0_1.AuxInt != -1<<63 { + break + } + v.reset(OpS390XLNDFR) + v0 := b.NewValue0(v.Pos, OpS390XLDGR, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (LDGR x:(ORload [off] {sym} (MOVDconst [-1<<63]) ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (LNDFR (LDGR (MOVDload [off] {sym} ptr mem))) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XORload { + break + } + t1 := x.Type + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + x_0 := x.Args[0] + if x_0.Op != OpS390XMOVDconst { + break + } + if x_0.AuxInt != -1<<63 { + break + } + ptr := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XLNDFR, t) + v.reset(OpCopy) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpS390XLDGR, t) + v2 := b.NewValue0(v.Pos, OpS390XMOVDload, t1) + v2.AuxInt = off + v2.Aux = sym + v2.AddArg(ptr) + v2.AddArg(mem) + v1.AddArg(v2) + v0.AddArg(v1) + return true + } + // match: (LDGR (LGDR x)) + // cond: + // result: x + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XLGDR { + break + } + x := v_0.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLEDBR_0(v *Value) bool { + // match: (LEDBR (LPDFR (LDEBR x))) + // cond: + // result: (LPDFR x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XLPDFR { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XLDEBR { + break + } + x := v_0_0.Args[0] + v.reset(OpS390XLPDFR) + v.AddArg(x) + return true + } + // match: (LEDBR (LNDFR (LDEBR x))) + // cond: + // result: (LNDFR x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XLNDFR { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XLDEBR { + break + } + x := v_0_0.Args[0] + v.reset(OpS390XLNDFR) + v.AddArg(x) + return true + } + return false +} +func rewriteValueS390X_OpS390XLGDR_0(v *Value) bool { + // match: (LGDR (LDGR x)) + // cond: + // result: (MOVDreg x) + for { + v_0 := v.Args[0] + if v_0.Op != OpS390XLDGR { + break + } + x := v_0.Args[0] + v.reset(OpS390XMOVDreg) + v.AddArg(x) + return true + } + return false +} func rewriteValueS390X_OpS390XLoweredRound32F_0(v *Value) bool { // match: (LoweredRound32F x:(FMOVSconst)) // cond: @@ -8706,24 +9505,28 @@ func rewriteValueS390X_OpS390XLoweredRound64F_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { - // match: (MOVBZload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // match: (MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) // result: (MOVBZreg x) for { off := v.AuxInt sym := v.Aux _ = v.Args[1] - ptr := v.Args[0] + ptr1 := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XMOVBstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } _ = v_1.Args[2] ptr2 := v_1.Args[0] x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XMOVBZreg) @@ -8837,7 +9640,7 @@ func rewriteValueS390X_OpS390XMOVBZload_0(v *Value) bool { } func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { // match: (MOVBZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8851,6 +9654,9 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVBZloadidx) v.AuxInt = c + d v.Aux = sym @@ -8860,7 +9666,7 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { return true } // match: (MOVBZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8874,6 +9680,9 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVBZloadidx) v.AuxInt = c + d v.Aux = sym @@ -8883,7 +9692,7 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { return true } // match: (MOVBZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8897,6 +9706,9 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVBZloadidx) v.AuxInt = c + d v.Aux = sym @@ -8906,7 +9718,7 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { return true } // match: (MOVBZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVBZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8920,6 +9732,9 @@ func rewriteValueS390X_OpS390XMOVBZloadidx_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVBZloadidx) v.AuxInt = c + d v.Aux = sym @@ -9224,6 +10039,32 @@ func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVBZreg x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBZload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBload { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } // match: (MOVBZreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) @@ -9252,9 +10093,65 @@ func rewriteValueS390X_OpS390XMOVBZreg_10(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVBZreg x:(MOVBloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBZloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBZloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { + // match: (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVBreg x) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVBstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVBreg) + v.AddArg(x) + return true + } // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (MOVBload [off1+off2] {sym} ptr mem) @@ -9304,6 +10201,167 @@ func rewriteValueS390X_OpS390XMOVBload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVBload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVBloadidx [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVBloadidx_0(v *Value) bool { + // match: (MOVBloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: is20Bit(c+d) + // result: (MOVBloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBloadidx [c] {sym} idx (ADDconst [d] ptr) mem) + // cond: is20Bit(c+d) + // result: (MOVBloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + idx := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: is20Bit(c+d) + // result: (MOVBloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVBloadidx [c] {sym} (ADDconst [d] idx) ptr mem) + // cond: is20Bit(c+d) + // result: (MOVBloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVBloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { @@ -9365,7 +10423,7 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { } // match: (MOVBreg x:(MOVBZload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVBload [off] {sym} ptr mem) + // result: @x.Block (MOVBload [off] {sym} ptr mem) for { x := v.Args[0] if x.Op != OpS390XMOVBZload { @@ -9389,6 +10447,88 @@ func rewriteValueS390X_OpS390XMOVBreg_0(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVBreg x:(MOVBload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBload { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVBreg x:(MOVBZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBZloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVBreg x:(MOVBloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVBloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { @@ -9545,7 +10685,7 @@ func rewriteValueS390X_OpS390XMOVBstore_0(v *Value) bool { } // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem) // cond: ptr.Op != OpSB - // result: (MOVBstoreidx [off] {sym} ptr idx val mem) + // result: (MOVBstoreidx [off] {sym} ptr idx val mem) for { off := v.AuxInt sym := v.Aux @@ -10041,8 +11181,8 @@ func rewriteValueS390X_OpS390XMOVBstoreconst_0(v *Value) bool { } func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { // match: (MOVBstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -10056,6 +11196,9 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVBstoreidx) v.AuxInt = c + d v.Aux = sym @@ -10066,8 +11209,8 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { return true } // match: (MOVBstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -10081,6 +11224,9 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { ptr := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVBstoreidx) v.AuxInt = c + d v.Aux = sym @@ -10091,8 +11237,8 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { return true } // match: (MOVBstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -10106,6 +11252,9 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVBstoreidx) v.AuxInt = c + d v.Aux = sym @@ -10116,8 +11265,8 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { return true } // match: (MOVBstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: - // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVBstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -10131,6 +11280,9 @@ func rewriteValueS390X_OpS390XMOVBstoreidx_0(v *Value) bool { ptr := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVBstoreidx) v.AuxInt = c + d v.Aux = sym @@ -12329,30 +13481,62 @@ func rewriteValueS390X_OpS390XMOVDaddridx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { - // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // match: (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) // result: (MOVDreg x) for { off := v.AuxInt sym := v.Aux _ = v.Args[1] - ptr := v.Args[0] + ptr1 := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XMOVDstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } _ = v_1.Args[2] ptr2 := v_1.Args[0] x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XMOVDreg) v.AddArg(x) return true } + // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (LGDR x) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XFMOVDstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XLGDR) + v.AddArg(x) + return true + } // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (MOVDload [off1+off2] {sym} ptr mem) @@ -12433,7 +13617,7 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { } // match: (MOVDload [off] {sym} (ADD ptr idx) mem) // cond: ptr.Op != OpSB - // result: (MOVDloadidx [off] {sym} ptr idx mem) + // result: (MOVDloadidx [off] {sym} ptr idx mem) for { off := v.AuxInt sym := v.Aux @@ -12461,8 +13645,8 @@ func rewriteValueS390X_OpS390XMOVDload_0(v *Value) bool { } func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { // match: (MOVDloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) + // cond: is20Bit(c+d) + // result: (MOVDloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -12475,6 +13659,9 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVDloadidx) v.AuxInt = c + d v.Aux = sym @@ -12484,8 +13671,8 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { return true } // match: (MOVDloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) + // cond: is20Bit(c+d) + // result: (MOVDloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -12498,6 +13685,9 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVDloadidx) v.AuxInt = c + d v.Aux = sym @@ -12507,8 +13697,8 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { return true } // match: (MOVDloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) + // cond: is20Bit(c+d) + // result: (MOVDloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -12521,6 +13711,9 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVDloadidx) v.AuxInt = c + d v.Aux = sym @@ -12530,8 +13723,8 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { return true } // match: (MOVDloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: - // result: (MOVDloadidx [c+d] {sym} ptr idx mem) + // cond: is20Bit(c+d) + // result: (MOVDloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt sym := v.Aux @@ -12544,6 +13737,9 @@ func rewriteValueS390X_OpS390XMOVDloadidx_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVDloadidx) v.AuxInt = c + d v.Aux = sym @@ -12807,6 +14003,35 @@ func rewriteValueS390X_OpS390XMOVDnop_0(v *Value) bool { func rewriteValueS390X_OpS390XMOVDnop_10(v *Value) bool { b := v.Block _ = b + // match: (MOVDnop x:(MOVBloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVBloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } // match: (MOVDnop x:(MOVHZloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) @@ -12836,6 +14061,35 @@ func rewriteValueS390X_OpS390XMOVDnop_10(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVDnop x:(MOVHloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVHloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } // match: (MOVDnop x:(MOVWZloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) @@ -12865,6 +14119,35 @@ func rewriteValueS390X_OpS390XMOVDnop_10(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVDnop x:(MOVWloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVWloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } // match: (MOVDnop x:(MOVDloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVDloadidx [off] {sym} ptr idx mem) @@ -13161,6 +14444,35 @@ func rewriteValueS390X_OpS390XMOVDreg_10(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVDreg x:(MOVBloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVBloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVBloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVBloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } // match: (MOVDreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) @@ -13190,6 +14502,35 @@ func rewriteValueS390X_OpS390XMOVDreg_10(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVDreg x:(MOVHloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVHloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } // match: (MOVDreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) @@ -13219,6 +14560,35 @@ func rewriteValueS390X_OpS390XMOVDreg_10(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVDreg x:(MOVWloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWloadidx [off] {sym} ptr idx mem) + for { + t := v.Type + x := v.Args[0] + if x.Op != OpS390XMOVWloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWloadidx, t) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } // match: (MOVDreg x:(MOVDloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVDloadidx [off] {sym} ptr idx mem) @@ -13361,7 +14731,7 @@ func rewriteValueS390X_OpS390XMOVDstore_0(v *Value) bool { } // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem) // cond: ptr.Op != OpSB - // result: (MOVDstoreidx [off] {sym} ptr idx val mem) + // result: (MOVDstoreidx [off] {sym} ptr idx val mem) for { off := v.AuxInt sym := v.Aux @@ -13560,8 +14930,8 @@ func rewriteValueS390X_OpS390XMOVDstoreconst_0(v *Value) bool { } func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { // match: (MOVDstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -13575,6 +14945,9 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVDstoreidx) v.AuxInt = c + d v.Aux = sym @@ -13585,8 +14958,8 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { return true } // match: (MOVDstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -13600,6 +14973,9 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { ptr := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVDstoreidx) v.AuxInt = c + d v.Aux = sym @@ -13610,8 +14986,8 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { return true } // match: (MOVDstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -13625,6 +15001,9 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVDstoreidx) v.AuxInt = c + d v.Aux = sym @@ -13635,8 +15014,8 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { return true } // match: (MOVDstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: - // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVDstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -13650,6 +15029,9 @@ func rewriteValueS390X_OpS390XMOVDstoreidx_0(v *Value) bool { ptr := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVDstoreidx) v.AuxInt = c + d v.Aux = sym @@ -14701,24 +16083,28 @@ func rewriteValueS390X_OpS390XMOVHBRstoreidx_10(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { - // match: (MOVHZload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // match: (MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) // result: (MOVHZreg x) for { off := v.AuxInt sym := v.Aux _ = v.Args[1] - ptr := v.Args[0] + ptr1 := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XMOVHstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } _ = v_1.Args[2] ptr2 := v_1.Args[0] x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XMOVHZreg) @@ -14833,7 +16219,7 @@ func rewriteValueS390X_OpS390XMOVHZload_0(v *Value) bool { } func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { // match: (MOVHZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -14847,6 +16233,9 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVHZloadidx) v.AuxInt = c + d v.Aux = sym @@ -14856,7 +16245,7 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { return true } // match: (MOVHZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -14870,6 +16259,9 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVHZloadidx) v.AuxInt = c + d v.Aux = sym @@ -14879,7 +16271,7 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { return true } // match: (MOVHZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -14893,6 +16285,9 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVHZloadidx) v.AuxInt = c + d v.Aux = sym @@ -14902,7 +16297,7 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { return true } // match: (MOVHZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVHZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -14916,6 +16311,9 @@ func rewriteValueS390X_OpS390XMOVHZloadidx_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVHZloadidx) v.AuxInt = c + d v.Aux = sym @@ -15034,6 +16432,32 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVHZreg x:(MOVHload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHload { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } // match: (MOVHZreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) @@ -15062,9 +16486,65 @@ func rewriteValueS390X_OpS390XMOVHZreg_0(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVHZreg x:(MOVHloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHZloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHZloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { + // match: (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVHreg x) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVHstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVHreg) + v.AddArg(x) + return true + } // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (MOVHload [off1+off2] {sym} ptr mem) @@ -15115,6 +16595,167 @@ func rewriteValueS390X_OpS390XMOVHload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVHload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVHloadidx [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHloadidx_0(v *Value) bool { + // match: (MOVHloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: is20Bit(c+d) + // result: (MOVHloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHloadidx [c] {sym} idx (ADDconst [d] ptr) mem) + // cond: is20Bit(c+d) + // result: (MOVHloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + idx := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: is20Bit(c+d) + // result: (MOVHloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVHloadidx [c] {sym} (ADDconst [d] idx) ptr mem) + // cond: is20Bit(c+d) + // result: (MOVHloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVHloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { @@ -15226,7 +16867,7 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { } // match: (MOVHreg x:(MOVHZload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVHload [off] {sym} ptr mem) + // result: @x.Block (MOVHload [off] {sym} ptr mem) for { x := v.Args[0] if x.Op != OpS390XMOVHZload { @@ -15250,6 +16891,93 @@ func rewriteValueS390X_OpS390XMOVHreg_0(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVHreg x:(MOVHload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHload { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVHreg_10(v *Value) bool { + b := v.Block + _ = b + // match: (MOVHreg x:(MOVHZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHZloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVHreg x:(MOVHloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVHloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVHloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVHloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { @@ -15407,7 +17135,7 @@ func rewriteValueS390X_OpS390XMOVHstore_0(v *Value) bool { } // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem) // cond: ptr.Op != OpSB - // result: (MOVHstoreidx [off] {sym} ptr idx val mem) + // result: (MOVHstoreidx [off] {sym} ptr idx val mem) for { off := v.AuxInt sym := v.Aux @@ -15720,8 +17448,8 @@ func rewriteValueS390X_OpS390XMOVHstoreconst_0(v *Value) bool { } func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { // match: (MOVHstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -15735,6 +17463,9 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVHstoreidx) v.AuxInt = c + d v.Aux = sym @@ -15745,8 +17476,8 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { return true } // match: (MOVHstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -15760,6 +17491,9 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { ptr := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVHstoreidx) v.AuxInt = c + d v.Aux = sym @@ -15770,8 +17504,8 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { return true } // match: (MOVHstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -15785,6 +17519,9 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVHstoreidx) v.AuxInt = c + d v.Aux = sym @@ -15795,8 +17532,8 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { return true } // match: (MOVHstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: - // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVHstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -15810,6 +17547,9 @@ func rewriteValueS390X_OpS390XMOVHstoreidx_0(v *Value) bool { ptr := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVHstoreidx) v.AuxInt = c + d v.Aux = sym @@ -17186,24 +18926,28 @@ func rewriteValueS390X_OpS390XMOVWBRstoreidx_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { - // match: (MOVWZload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) - // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // match: (MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) // result: (MOVWZreg x) for { off := v.AuxInt sym := v.Aux _ = v.Args[1] - ptr := v.Args[0] + ptr1 := v.Args[0] v_1 := v.Args[1] if v_1.Op != OpS390XMOVWstore { break } - off2 := v_1.AuxInt - sym2 := v_1.Aux + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } _ = v_1.Args[2] ptr2 := v_1.Args[0] x := v_1.Args[1] - if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + if !(isSamePtr(ptr1, ptr2)) { break } v.reset(OpS390XMOVWZreg) @@ -17318,7 +19062,7 @@ func rewriteValueS390X_OpS390XMOVWZload_0(v *Value) bool { } func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { // match: (MOVWZloadidx [c] {sym} (ADDconst [d] ptr) idx mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -17332,6 +19076,9 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVWZloadidx) v.AuxInt = c + d v.Aux = sym @@ -17341,7 +19088,7 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { return true } // match: (MOVWZloadidx [c] {sym} idx (ADDconst [d] ptr) mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -17355,6 +19102,9 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVWZloadidx) v.AuxInt = c + d v.Aux = sym @@ -17364,7 +19114,7 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { return true } // match: (MOVWZloadidx [c] {sym} ptr (ADDconst [d] idx) mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -17378,6 +19128,9 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVWZloadidx) v.AuxInt = c + d v.Aux = sym @@ -17387,7 +19140,7 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { return true } // match: (MOVWZloadidx [c] {sym} (ADDconst [d] idx) ptr mem) - // cond: + // cond: is20Bit(c+d) // result: (MOVWZloadidx [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -17401,6 +19154,9 @@ func rewriteValueS390X_OpS390XMOVWZloadidx_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVWZloadidx) v.AuxInt = c + d v.Aux = sym @@ -17544,6 +19300,37 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVWZreg x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWload { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWZreg_10(v *Value) bool { + b := v.Block + _ = b // match: (MOVWZreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) // cond: x.Uses == 1 && clobber(x) // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) @@ -17572,9 +19359,65 @@ func rewriteValueS390X_OpS390XMOVWZreg_0(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVWZreg x:(MOVWloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWZloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWZloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { + // match: (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MOVWreg x) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + ptr1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVWstore { + break + } + if v_1.AuxInt != off { + break + } + if v_1.Aux != sym { + break + } + _ = v_1.Args[2] + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMOVWreg) + v.AddArg(x) + return true + } // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: is20Bit(off1+off2) // result: (MOVWload [off1+off2] {sym} ptr mem) @@ -17625,6 +19468,167 @@ func rewriteValueS390X_OpS390XMOVWload_0(v *Value) bool { v.AddArg(mem) return true } + // match: (MOVWload [off1] {sym1} (MOVDaddridx [off2] {sym2} ptr idx) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWloadidx [off1+off2] {mergeSym(sym1,sym2)} ptr idx mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDaddridx { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWload [off] {sym} (ADD ptr idx) mem) + // cond: ptr.Op != OpSB + // result: (MOVWloadidx [off] {sym} ptr idx mem) + for { + off := v.AuxInt + sym := v.Aux + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XADD { + break + } + _ = v_0.Args[1] + ptr := v_0.Args[0] + idx := v_0.Args[1] + mem := v.Args[1] + if !(ptr.Op != OpSB) { + break + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueS390X_OpS390XMOVWloadidx_0(v *Value) bool { + // match: (MOVWloadidx [c] {sym} (ADDconst [d] ptr) idx mem) + // cond: is20Bit(c+d) + // result: (MOVWloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + ptr := v_0.Args[0] + idx := v.Args[1] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx [c] {sym} idx (ADDconst [d] ptr) mem) + // cond: is20Bit(c+d) + // result: (MOVWloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + idx := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx [c] {sym} ptr (ADDconst [d] idx) mem) + // cond: is20Bit(c+d) + // result: (MOVWloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + d := v_1.AuxInt + idx := v_1.Args[0] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } + // match: (MOVWloadidx [c] {sym} (ADDconst [d] idx) ptr mem) + // cond: is20Bit(c+d) + // result: (MOVWloadidx [c+d] {sym} ptr idx mem) + for { + c := v.AuxInt + sym := v.Aux + _ = v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpS390XADDconst { + break + } + d := v_0.AuxInt + idx := v_0.Args[0] + ptr := v.Args[1] + mem := v.Args[2] + if !(is20Bit(c + d)) { + break + } + v.reset(OpS390XMOVWloadidx) + v.AuxInt = c + d + v.Aux = sym + v.AddArg(ptr) + v.AddArg(idx) + v.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVWreg_0(v *Value) bool { @@ -17789,7 +19793,7 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { } // match: (MOVWreg x:(MOVWZload [off] {sym} ptr mem)) // cond: x.Uses == 1 && clobber(x) - // result: @x.Block (MOVWload [off] {sym} ptr mem) + // result: @x.Block (MOVWload [off] {sym} ptr mem) for { x := v.Args[0] if x.Op != OpS390XMOVWZload { @@ -17813,6 +19817,88 @@ func rewriteValueS390X_OpS390XMOVWreg_10(v *Value) bool { v0.AddArg(mem) return true } + // match: (MOVWreg x:(MOVWload [off] {sym} ptr mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWload [off] {sym} ptr mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWload { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[1] + ptr := x.Args[0] + mem := x.Args[1] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWload, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(mem) + return true + } + // match: (MOVWreg x:(MOVWZloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWZloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } + // match: (MOVWreg x:(MOVWloadidx [off] {sym} ptr idx mem)) + // cond: x.Uses == 1 && clobber(x) + // result: @x.Block (MOVWloadidx [off] {sym} ptr idx mem) + for { + x := v.Args[0] + if x.Op != OpS390XMOVWloadidx { + break + } + off := x.AuxInt + sym := x.Aux + _ = x.Args[2] + ptr := x.Args[0] + idx := x.Args[1] + mem := x.Args[2] + if !(x.Uses == 1 && clobber(x)) { + break + } + b = x.Block + v0 := b.NewValue0(v.Pos, OpS390XMOVWloadidx, v.Type) + v.reset(OpCopy) + v.AddArg(v0) + v0.AuxInt = off + v0.Aux = sym + v0.AddArg(ptr) + v0.AddArg(idx) + v0.AddArg(mem) + return true + } return false } func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { @@ -17970,7 +20056,7 @@ func rewriteValueS390X_OpS390XMOVWstore_0(v *Value) bool { } // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem) // cond: ptr.Op != OpSB - // result: (MOVWstoreidx [off] {sym} ptr idx val mem) + // result: (MOVWstoreidx [off] {sym} ptr idx val mem) for { off := v.AuxInt sym := v.Aux @@ -18305,8 +20391,8 @@ func rewriteValueS390X_OpS390XMOVWstoreconst_0(v *Value) bool { } func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { // match: (MOVWstoreidx [c] {sym} (ADDconst [d] ptr) idx val mem) - // cond: - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -18320,6 +20406,9 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVWstoreidx) v.AuxInt = c + d v.Aux = sym @@ -18330,8 +20419,8 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { return true } // match: (MOVWstoreidx [c] {sym} idx (ADDconst [d] ptr) val mem) - // cond: - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -18345,6 +20434,9 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { ptr := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVWstoreidx) v.AuxInt = c + d v.Aux = sym @@ -18355,8 +20447,8 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { return true } // match: (MOVWstoreidx [c] {sym} ptr (ADDconst [d] idx) val mem) - // cond: - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -18370,6 +20462,9 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVWstoreidx) v.AuxInt = c + d v.Aux = sym @@ -18380,8 +20475,8 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { return true } // match: (MOVWstoreidx [c] {sym} (ADDconst [d] idx) ptr val mem) - // cond: - // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) + // cond: is20Bit(c+d) + // result: (MOVWstoreidx [c+d] {sym} ptr idx val mem) for { c := v.AuxInt sym := v.Aux @@ -18395,6 +20490,9 @@ func rewriteValueS390X_OpS390XMOVWstoreidx_0(v *Value) bool { ptr := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is20Bit(c + d)) { + break + } v.reset(OpS390XMOVWstoreidx) v.AuxInt = c + d v.Aux = sym @@ -19085,10 +21183,101 @@ func rewriteValueS390X_OpS390XMULLDconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XMULLDload_0(v *Value) bool { + b := v.Block + _ = b + // match: (MULLDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (MULLD x (LGDR y)) + for { + t := v.Type + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr1 := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFMOVDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + y := v_2.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XMULLD) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (MULLDload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XMULLDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XMULLDload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { // match: (MULLW x (MOVDconst [c])) // cond: - // result: (MULLWconst [c] x) + // result: (MULLWconst [int64(int32(c))] x) for { _ = v.Args[1] x := v.Args[0] @@ -19098,13 +21287,13 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpS390XMULLWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (MULLW (MOVDconst [c]) x) // cond: - // result: (MULLWconst [c] x) + // result: (MULLWconst [int64(int32(c))] x) for { _ = v.Args[1] v_0 := v.Args[0] @@ -19114,7 +21303,7 @@ func rewriteValueS390X_OpS390XMULLW_0(v *Value) bool { c := v_0.AuxInt x := v.Args[1] v.reset(OpS390XMULLWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } @@ -19447,6 +21636,62 @@ func rewriteValueS390X_OpS390XMULLWconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XMULLWload_0(v *Value) bool { + // match: (MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (MULLWload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XMULLWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XMULLWload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XNEG_0(v *Value) bool { // match: (NEG (MOVDconst [c])) // cond: @@ -19540,6 +21785,8 @@ func rewriteValueS390X_OpS390XNOTW_0(v *Value) bool { return false } func rewriteValueS390X_OpS390XOR_0(v *Value) bool { + b := v.Block + _ = b // match: (OR x (MOVDconst [c])) // cond: isU32Bit(c) // result: (ORconst [c] x) @@ -19632,6 +21879,227 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { v.AddArg(x) return true } + // match: (OR (MOVDconst [-1<<63]) (LGDR x)) + // cond: + // result: (LGDR (LNDFR x)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + if v_0.AuxInt != -1<<63 { + break + } + v_1 := v.Args[1] + if v_1.Op != OpS390XLGDR { + break + } + t := v_1.Type + x := v_1.Args[0] + v.reset(OpS390XLGDR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (OR (LGDR x) (MOVDconst [-1<<63])) + // cond: + // result: (LGDR (LNDFR x)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XLGDR { + break + } + t := v_0.Type + x := v_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + if v_1.AuxInt != -1<<63 { + break + } + v.reset(OpS390XLGDR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (OR (SLDconst [63] (SRDconst [63] (LGDR x))) (LGDR (LPDFR y))) + // cond: + // result: (LGDR (CPSDR y x)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XSLDconst { + break + } + if v_0.AuxInt != 63 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XSRDconst { + break + } + if v_0_0.AuxInt != 63 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XLGDR { + break + } + x := v_0_0_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XLGDR { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpS390XLPDFR { + break + } + t := v_1_0.Type + y := v_1_0.Args[0] + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (OR (LGDR (LPDFR y)) (SLDconst [63] (SRDconst [63] (LGDR x)))) + // cond: + // result: (LGDR (CPSDR y x)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XLGDR { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XLPDFR { + break + } + t := v_0_0.Type + y := v_0_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XSLDconst { + break + } + if v_1.AuxInt != 63 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpS390XSRDconst { + break + } + if v_1_0.AuxInt != 63 { + break + } + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpS390XLGDR { + break + } + x := v_1_0_0.Args[0] + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (OR (SLDconst [63] (SRDconst [63] (LGDR x))) (MOVDconst [c])) + // cond: c & -1<<63 == 0 + // result: (LGDR (CPSDR (FMOVDconst [c]) x)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XSLDconst { + break + } + if v_0.AuxInt != 63 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XSRDconst { + break + } + if v_0_0.AuxInt != 63 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpS390XLGDR { + break + } + x := v_0_0_0.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDconst { + break + } + c := v_1.AuxInt + if !(c&-1<<63 == 0) { + break + } + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) + v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) + v1.AuxInt = c + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (OR (MOVDconst [c]) (SLDconst [63] (SRDconst [63] (LGDR x)))) + // cond: c & -1<<63 == 0 + // result: (LGDR (CPSDR (FMOVDconst [c]) x)) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpS390XMOVDconst { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpS390XSLDconst { + break + } + if v_1.AuxInt != 63 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpS390XSRDconst { + break + } + if v_1_0.AuxInt != 63 { + break + } + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpS390XLGDR { + break + } + x := v_1_0_0.Args[0] + if !(c&-1<<63 == 0) { + break + } + v.reset(OpS390XLGDR) + v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type) + v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type) + v1.AuxInt = c + v0.AddArg(v1) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueS390X_OpS390XOR_10(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR (MOVDconst [c]) (MOVDconst [d])) // cond: // result: (MOVDconst [c|d]) @@ -19768,13 +22236,6 @@ func rewriteValueS390X_OpS390XOR_0(v *Value) bool { v.AddArg(mem) return true } - return false -} -func rewriteValueS390X_OpS390XOR_10(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR x g:(MOVDload [off] {sym} ptr mem)) // cond: ptr.Op != OpSB && is20Bit(off) && canMergeLoad(v, g, x) && clobber(g) // result: (ORload [off] {sym} x ptr mem) @@ -19950,6 +22411,13 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValueS390X_OpS390XOR_20(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)) x1:(MOVHZload [i1] {s} p mem)) // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem) @@ -20289,13 +22757,6 @@ func rewriteValueS390X_OpS390XOR_10(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_20(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZload [i0] {s} p mem)) y) @@ -20552,6 +23013,13 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { v0.AddArg(y) return true } + return false +} +func rewriteValueS390X_OpS390XOR_30(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem))) s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))) // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZload [i0] {s} p mem)) y) @@ -20886,13 +23354,6 @@ func rewriteValueS390X_OpS390XOR_20(v *Value) bool { v0.AddArg(mem) return true } - return false -} -func rewriteValueS390X_OpS390XOR_30(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR sh:(SLDconst [8] x0:(MOVBZloadidx [i0] {s} idx p mem)) x1:(MOVBZloadidx [i1] {s} p idx mem)) // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZloadidx [i0] {s} p idx mem) @@ -21109,6 +23570,13 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValueS390X_OpS390XOR_40(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR x1:(MOVHZloadidx [i1] {s} idx p mem) sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} p idx mem))) // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) @@ -21433,13 +23901,6 @@ func rewriteValueS390X_OpS390XOR_30(v *Value) bool { v0.AddArg(mem) return true } - return false -} -func rewriteValueS390X_OpS390XOR_40(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR sh:(SLDconst [16] x0:(MOVHZloadidx [i0] {s} idx p mem)) x1:(MOVHZloadidx [i1] {s} idx p mem)) // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZloadidx [i0] {s} p idx mem) @@ -21656,6 +24117,13 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValueS390X_OpS390XOR_50(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR x1:(MOVWZloadidx [i1] {s} idx p mem) sh:(SLDconst [32] x0:(MOVWZloadidx [i0] {s} idx p mem))) // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDloadidx [i0] {s} p idx mem) @@ -21995,13 +24463,6 @@ func rewriteValueS390X_OpS390XOR_40(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_50(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)) y)) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) @@ -22278,6 +24739,13 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool { v0.AddArg(y) return true } + return false +} +func rewriteValueS390X_OpS390XOR_60(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem)))) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) @@ -22692,13 +25160,6 @@ func rewriteValueS390X_OpS390XOR_50(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_60(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem))) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) @@ -22975,6 +25436,13 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool { v0.AddArg(y) return true } + return false +} +func rewriteValueS390X_OpS390XOR_70(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVHZloadidx [i0] {s} p idx mem)) y) @@ -23389,13 +25857,6 @@ func rewriteValueS390X_OpS390XOR_60(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_70(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem)) or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} p idx mem)))) // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) @@ -23672,6 +26133,13 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool { v0.AddArg(y) return true } + return false +} +func rewriteValueS390X_OpS390XOR_80(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR or:(OR s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem)) y) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} p idx mem))) // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) @@ -24086,13 +26554,6 @@ func rewriteValueS390X_OpS390XOR_70(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_80(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR or:(OR y s1:(SLDconst [j1] x1:(MOVHZloadidx [i1] {s} idx p mem))) s0:(SLDconst [j0] x0:(MOVHZloadidx [i0] {s} idx p mem))) // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j1] (MOVWZloadidx [i0] {s} p idx mem)) y) @@ -24323,6 +26784,13 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool { v0.AddArg(v1) return true } + return false +} +func rewriteValueS390X_OpS390XOR_90(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem)) @@ -24694,13 +27162,6 @@ func rewriteValueS390X_OpS390XOR_80(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_90(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))) s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y) @@ -24989,6 +27450,13 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { v0.AddArg(y) return true } + return false +} +func rewriteValueS390X_OpS390XOR_100(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))) // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y) @@ -25343,13 +27811,6 @@ func rewriteValueS390X_OpS390XOR_90(v *Value) bool { v0.AddArg(v1) return true } - return false -} -func rewriteValueS390X_OpS390XOR_100(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR sh:(SLDconst [8] x1:(MOVBZloadidx [i1] {s} idx p mem)) x0:(MOVBZloadidx [i0] {s} p idx mem)) // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem)) @@ -25582,6 +28043,13 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool { v0.AddArg(v1) return true } + return false +} +func rewriteValueS390X_OpS390XOR_110(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) @@ -25966,13 +28434,6 @@ func rewriteValueS390X_OpS390XOR_100(v *Value) bool { v0.AddArg(v1) return true } - return false -} -func rewriteValueS390X_OpS390XOR_110(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem)) @@ -26223,6 +28684,13 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool { v0.AddArg(mem) return true } + return false +} +func rewriteValueS390X_OpS390XOR_120(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR r0:(MOVWZreg x0:(MOVWBRloadidx [i0] {s} idx p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRloadidx [i1] {s} idx p mem)))) // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(sh) // result: @mergePoint(b,x0,x1) (MOVDBRloadidx [i0] {s} p idx mem) @@ -26604,13 +29072,6 @@ func rewriteValueS390X_OpS390XOR_110(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_120(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)) y)) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) @@ -26895,6 +29356,13 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool { v0.AddArg(y) return true } + return false +} +func rewriteValueS390X_OpS390XOR_130(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem)) or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} p idx mem)))) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) @@ -27321,13 +29789,6 @@ func rewriteValueS390X_OpS390XOR_120(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_130(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} p idx mem))) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) @@ -27612,6 +30073,13 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool { v0.AddArg(y) return true } + return false +} +func rewriteValueS390X_OpS390XOR_140(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR or:(OR y s0:(SLDconst [j0] x0:(MOVBZloadidx [i0] {s} idx p mem))) s1:(SLDconst [j1] x1:(MOVBZloadidx [i1] {s} idx p mem))) // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVHZreg (MOVHBRloadidx [i0] {s} p idx mem))) y) @@ -28078,13 +30546,6 @@ func rewriteValueS390X_OpS390XOR_130(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_140(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem))) or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} p idx mem))))) // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) @@ -28401,6 +30862,13 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool { v0.AddArg(y) return true } + return false +} +func rewriteValueS390X_OpS390XOR_150(v *Value) bool { + b := v.Block + _ = b + typ := &b.Func.Config.Types + _ = typ // match: (OR or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem))) y) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} p idx mem)))) // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) @@ -28875,13 +31343,6 @@ func rewriteValueS390X_OpS390XOR_140(v *Value) bool { v0.AddArg(y) return true } - return false -} -func rewriteValueS390X_OpS390XOR_150(v *Value) bool { - b := v.Block - _ = b - typ := &b.Func.Config.Types - _ = typ // match: (OR or:(OR y s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRloadidx [i0] {s} idx p mem)))) s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRloadidx [i1] {s} idx p mem)))) // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0) && clobber(x1) && clobber(r0) && clobber(r1) && clobber(s0) && clobber(s1) && clobber(or) // result: @mergePoint(b,x0,x1) (OR (SLDconst [j0] (MOVWZreg (MOVWBRloadidx [i0] {s} p idx mem))) y) @@ -28966,7 +31427,7 @@ func rewriteValueS390X_OpS390XOR_150(v *Value) bool { func rewriteValueS390X_OpS390XORW_0(v *Value) bool { // match: (ORW x (MOVDconst [c])) // cond: - // result: (ORWconst [c] x) + // result: (ORWconst [int64(int32(c))] x) for { _ = v.Args[1] x := v.Args[0] @@ -28976,13 +31437,13 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpS390XORWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (ORW (MOVDconst [c]) x) // cond: - // result: (ORWconst [c] x) + // result: (ORWconst [int64(int32(c))] x) for { _ = v.Args[1] v_0 := v.Args[0] @@ -28992,7 +31453,7 @@ func rewriteValueS390X_OpS390XORW_0(v *Value) bool { c := v_0.AuxInt x := v.Args[1] v.reset(OpS390XORWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } @@ -34376,6 +36837,62 @@ func rewriteValueS390X_OpS390XORWconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XORWload_0(v *Value) bool { + // match: (ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (ORWload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XORWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XORWload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XORconst_0(v *Value) bool { // match: (ORconst [0] x) // cond: @@ -34417,6 +36934,97 @@ func rewriteValueS390X_OpS390XORconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XORload_0(v *Value) bool { + b := v.Block + _ = b + // match: (ORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (OR x (LGDR y)) + for { + t := v.Type + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr1 := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFMOVDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + y := v_2.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XOR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (ORload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (ORload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XORload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XORload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XSLD_0(v *Value) bool { // match: (SLD x (MOVDconst [c])) // cond: @@ -34641,6 +37249,38 @@ func rewriteValueS390X_OpS390XSRD_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XSRDconst_0(v *Value) bool { + b := v.Block + _ = b + // match: (SRDconst [1] (SLDconst [1] (LGDR x))) + // cond: + // result: (LGDR (LPDFR x)) + for { + if v.AuxInt != 1 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpS390XSLDconst { + break + } + if v_0.AuxInt != 1 { + break + } + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpS390XLGDR { + break + } + t := v_0_0.Type + x := v_0_0.Args[0] + v.reset(OpS390XLGDR) + v.Type = t + v0 := b.NewValue0(v.Pos, OpS390XLPDFR, x.Type) + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} func rewriteValueS390X_OpS390XSRW_0(v *Value) bool { // match: (SRW x (MOVDconst [c])) // cond: @@ -34964,7 +37604,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { _ = b // match: (SUBW x (MOVDconst [c])) // cond: - // result: (SUBWconst x [c]) + // result: (SUBWconst x [int64(int32(c))]) for { _ = v.Args[1] x := v.Args[0] @@ -34974,13 +37614,13 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpS390XSUBWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (SUBW (MOVDconst [c]) x) // cond: - // result: (NEGW (SUBWconst x [c])) + // result: (NEGW (SUBWconst x [int64(int32(c))])) for { _ = v.Args[1] v_0 := v.Args[0] @@ -34991,7 +37631,7 @@ func rewriteValueS390X_OpS390XSUBW_0(v *Value) bool { x := v.Args[1] v.reset(OpS390XNEGW) v0 := b.NewValue0(v.Pos, OpS390XSUBWconst, v.Type) - v0.AuxInt = c + v0.AuxInt = int64(int32(c)) v0.AddArg(x) v.AddArg(v0) return true @@ -35094,6 +37734,62 @@ func rewriteValueS390X_OpS390XSUBWconst_0(v *Value) bool { return true } } +func rewriteValueS390X_OpS390XSUBWload_0(v *Value) bool { + // match: (SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (SUBWload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XSUBWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XSUBWload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XSUBconst_0(v *Value) bool { // match: (SUBconst [0] x) // cond: @@ -35157,6 +37853,97 @@ func rewriteValueS390X_OpS390XSUBconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XSUBload_0(v *Value) bool { + b := v.Block + _ = b + // match: (SUBload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (SUB x (LGDR y)) + for { + t := v.Type + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr1 := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFMOVDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + y := v_2.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XSUB) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (SUBload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XSUBload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XSUBload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XXOR_0(v *Value) bool { // match: (XOR x (MOVDconst [c])) // cond: isU32Bit(c) @@ -35421,7 +38208,7 @@ func rewriteValueS390X_OpS390XXOR_10(v *Value) bool { func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { // match: (XORW x (MOVDconst [c])) // cond: - // result: (XORWconst [c] x) + // result: (XORWconst [int64(int32(c))] x) for { _ = v.Args[1] x := v.Args[0] @@ -35431,13 +38218,13 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { } c := v_1.AuxInt v.reset(OpS390XXORWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } // match: (XORW (MOVDconst [c]) x) // cond: - // result: (XORWconst [c] x) + // result: (XORWconst [int64(int32(c))] x) for { _ = v.Args[1] v_0 := v.Args[0] @@ -35447,7 +38234,7 @@ func rewriteValueS390X_OpS390XXORW_0(v *Value) bool { c := v_0.AuxInt x := v.Args[1] v.reset(OpS390XXORWconst) - v.AuxInt = c + v.AuxInt = int64(int32(c)) v.AddArg(x) return true } @@ -35778,6 +38565,62 @@ func rewriteValueS390X_OpS390XXORWconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XXORWload_0(v *Value) bool { + // match: (XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (XORWload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XXORWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XXORWload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpS390XXORconst_0(v *Value) bool { // match: (XORconst [0] x) // cond: @@ -35808,6 +38651,97 @@ func rewriteValueS390X_OpS390XXORconst_0(v *Value) bool { } return false } +func rewriteValueS390X_OpS390XXORload_0(v *Value) bool { + b := v.Block + _ = b + // match: (XORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) + // cond: isSamePtr(ptr1, ptr2) + // result: (XOR x (LGDR y)) + for { + t := v.Type + off := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + ptr1 := v.Args[1] + v_2 := v.Args[2] + if v_2.Op != OpS390XFMOVDstore { + break + } + if v_2.AuxInt != off { + break + } + if v_2.Aux != sym { + break + } + _ = v_2.Args[2] + ptr2 := v_2.Args[0] + y := v_2.Args[1] + if !(isSamePtr(ptr1, ptr2)) { + break + } + v.reset(OpS390XXOR) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpS390XLGDR, t) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (XORload [off1] {sym} x (ADDconst [off2] ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(off1+off2) + // result: (XORload [off1+off2] {sym} x ptr mem) + for { + off1 := v.AuxInt + sym := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XADDconst { + break + } + off2 := v_1.AuxInt + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(off1+off2)) { + break + } + v.reset(OpS390XXORload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) + // cond: ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2) + // result: (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + for { + o1 := v.AuxInt + s1 := v.Aux + _ = v.Args[2] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpS390XMOVDaddr { + break + } + o2 := v_1.AuxInt + s2 := v_1.Aux + ptr := v_1.Args[0] + mem := v.Args[2] + if !(ptr.Op != OpSB && is20Bit(o1+o2) && canMergeSym(s1, s2)) { + break + } + v.reset(OpS390XXORload) + v.AuxInt = o1 + o2 + v.Aux = mergeSym(s1, s2) + v.AddArg(x) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} func rewriteValueS390X_OpSelect0_0(v *Value) bool { b := v.Block _ = b @@ -36200,6 +39134,18 @@ func rewriteValueS390X_OpSubPtr_0(v *Value) bool { return true } } +func rewriteValueS390X_OpTrunc_0(v *Value) bool { + // match: (Trunc x) + // cond: + // result: (FIDBR [5] x) + for { + x := v.Args[0] + v.reset(OpS390XFIDBR) + v.AuxInt = 5 + v.AddArg(x) + return true + } +} func rewriteValueS390X_OpTrunc16to8_0(v *Value) bool { // match: (Trunc16to8 x) // cond: @@ -36620,6 +39566,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[0] b.Kind = BlockS390XEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (EQ (FlagEQ) yes no) @@ -36632,6 +39579,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (EQ (FlagLT) yes no) @@ -36644,6 +39592,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -36657,6 +39606,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -36672,6 +39622,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[0] b.Kind = BlockS390XLE b.SetControl(cmp) + b.Aux = nil return true } // match: (GE (FlagEQ) yes no) @@ -36684,6 +39635,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (GE (FlagLT) yes no) @@ -36696,6 +39648,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -36709,6 +39662,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockS390XGT: @@ -36723,6 +39677,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[0] b.Kind = BlockS390XLT b.SetControl(cmp) + b.Aux = nil return true } // match: (GT (FlagEQ) yes no) @@ -36735,6 +39690,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -36748,6 +39704,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -36761,6 +39718,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } case BlockIf: @@ -36790,6 +39748,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[2] b.Kind = BlockS390XLT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) @@ -36818,6 +39777,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[2] b.Kind = BlockS390XLE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) @@ -36846,6 +39806,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[2] b.Kind = BlockS390XGT b.SetControl(cmp) + b.Aux = nil return true } // match: (If (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) @@ -36874,6 +39835,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[2] b.Kind = BlockS390XGE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) @@ -36902,6 +39864,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[2] b.Kind = BlockS390XEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (If (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) @@ -36930,6 +39893,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[2] b.Kind = BlockS390XNE b.SetControl(cmp) + b.Aux = nil return true } // match: (If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) @@ -36958,6 +39922,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[2] b.Kind = BlockS390XGTF b.SetControl(cmp) + b.Aux = nil return true } // match: (If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) @@ -36986,6 +39951,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[2] b.Kind = BlockS390XGEF b.SetControl(cmp) + b.Aux = nil return true } // match: (If cond yes no) @@ -37002,6 +39968,7 @@ func rewriteBlockS390X(b *Block) bool { v1.AddArg(cond) v0.AddArg(v1) b.SetControl(v0) + b.Aux = nil return true } case BlockS390XLE: @@ -37016,6 +39983,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[0] b.Kind = BlockS390XGE b.SetControl(cmp) + b.Aux = nil return true } // match: (LE (FlagEQ) yes no) @@ -37028,6 +39996,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagLT) yes no) @@ -37040,6 +40009,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LE (FlagGT) yes no) @@ -37052,6 +40022,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -37067,6 +40038,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[0] b.Kind = BlockS390XGT b.SetControl(cmp) + b.Aux = nil return true } // match: (LT (FlagEQ) yes no) @@ -37079,6 +40051,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -37092,6 +40065,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (LT (FlagGT) yes no) @@ -37104,6 +40078,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -37141,6 +40116,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v_0.Args[2] b.Kind = BlockS390XLT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (MOVDLE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) @@ -37176,6 +40152,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v_0.Args[2] b.Kind = BlockS390XLE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (MOVDGT (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) @@ -37211,6 +40188,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v_0.Args[2] b.Kind = BlockS390XGT b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (MOVDGE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) @@ -37246,6 +40224,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v_0.Args[2] b.Kind = BlockS390XGE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (MOVDEQ (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) @@ -37281,6 +40260,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v_0.Args[2] b.Kind = BlockS390XEQ b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (MOVDNE (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) @@ -37316,6 +40296,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v_0.Args[2] b.Kind = BlockS390XNE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) @@ -37351,6 +40332,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v_0.Args[2] b.Kind = BlockS390XGTF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (CMPWconst [0] (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp)) yes no) @@ -37386,6 +40368,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v_0.Args[2] b.Kind = BlockS390XGEF b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (InvertFlags cmp) yes no) @@ -37399,6 +40382,7 @@ func rewriteBlockS390X(b *Block) bool { cmp := v.Args[0] b.Kind = BlockS390XNE b.SetControl(cmp) + b.Aux = nil return true } // match: (NE (FlagEQ) yes no) @@ -37411,6 +40395,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } @@ -37424,6 +40409,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (NE (FlagGT) yes no) @@ -37436,6 +40422,7 @@ func rewriteBlockS390X(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } } diff --git a/src/cmd/compile/internal/ssa/rewrite_test.go b/src/cmd/compile/internal/ssa/rewrite_test.go index c21c64bb7b5..8a097b04f60 100644 --- a/src/cmd/compile/internal/ssa/rewrite_test.go +++ b/src/cmd/compile/internal/ssa/rewrite_test.go @@ -103,3 +103,25 @@ func TestLog2(t *testing.T) { } } } + +// We generate memmove for copy(x[1:], x[:]), however we may change it to OpMove, +// because size is known. Check that OpMove is alias-safe, or we did call memmove. +func TestMove(t *testing.T) { + x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40} + copy(x[1:], x[:]) + for i := 1; i < len(x); i++ { + if int(x[i]) != i { + t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d insted of %d in position %d", int(x[i]), i, i+1) + } + } +} + +func TestMoveSmall(t *testing.T) { + x := [...]byte{1, 2, 3, 4, 5, 6, 7} + copy(x[1:], x[:]) + for i := 1; i < len(x); i++ { + if int(x[i]) != i { + t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1) + } + } +} diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index c67e4f90eb1..def2cca9fcd 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -16,17 +16,17 @@ var _ = types.TypeMem // in case not otherwise used func rewriteValuegeneric(v *Value) bool { switch v.Op { case OpAdd16: - return rewriteValuegeneric_OpAdd16_0(v) || rewriteValuegeneric_OpAdd16_10(v) || rewriteValuegeneric_OpAdd16_20(v) + return rewriteValuegeneric_OpAdd16_0(v) || rewriteValuegeneric_OpAdd16_10(v) || rewriteValuegeneric_OpAdd16_20(v) || rewriteValuegeneric_OpAdd16_30(v) case OpAdd32: - return rewriteValuegeneric_OpAdd32_0(v) || rewriteValuegeneric_OpAdd32_10(v) || rewriteValuegeneric_OpAdd32_20(v) + return rewriteValuegeneric_OpAdd32_0(v) || rewriteValuegeneric_OpAdd32_10(v) || rewriteValuegeneric_OpAdd32_20(v) || rewriteValuegeneric_OpAdd32_30(v) case OpAdd32F: return rewriteValuegeneric_OpAdd32F_0(v) case OpAdd64: - return rewriteValuegeneric_OpAdd64_0(v) || rewriteValuegeneric_OpAdd64_10(v) || rewriteValuegeneric_OpAdd64_20(v) + return rewriteValuegeneric_OpAdd64_0(v) || rewriteValuegeneric_OpAdd64_10(v) || rewriteValuegeneric_OpAdd64_20(v) || rewriteValuegeneric_OpAdd64_30(v) case OpAdd64F: return rewriteValuegeneric_OpAdd64F_0(v) case OpAdd8: - return rewriteValuegeneric_OpAdd8_0(v) || rewriteValuegeneric_OpAdd8_10(v) || rewriteValuegeneric_OpAdd8_20(v) + return rewriteValuegeneric_OpAdd8_0(v) || rewriteValuegeneric_OpAdd8_10(v) || rewriteValuegeneric_OpAdd8_20(v) || rewriteValuegeneric_OpAdd8_30(v) case OpAddPtr: return rewriteValuegeneric_OpAddPtr_0(v) case OpAnd16: @@ -57,10 +57,26 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpConstString_0(v) case OpConvert: return rewriteValuegeneric_OpConvert_0(v) + case OpCvt32Fto32: + return rewriteValuegeneric_OpCvt32Fto32_0(v) + case OpCvt32Fto64: + return rewriteValuegeneric_OpCvt32Fto64_0(v) case OpCvt32Fto64F: return rewriteValuegeneric_OpCvt32Fto64F_0(v) + case OpCvt32to32F: + return rewriteValuegeneric_OpCvt32to32F_0(v) + case OpCvt32to64F: + return rewriteValuegeneric_OpCvt32to64F_0(v) + case OpCvt64Fto32: + return rewriteValuegeneric_OpCvt64Fto32_0(v) case OpCvt64Fto32F: return rewriteValuegeneric_OpCvt64Fto32F_0(v) + case OpCvt64Fto64: + return rewriteValuegeneric_OpCvt64Fto64_0(v) + case OpCvt64to32F: + return rewriteValuegeneric_OpCvt64to32F_0(v) + case OpCvt64to64F: + return rewriteValuegeneric_OpCvt64to64F_0(v) case OpDiv16: return rewriteValuegeneric_OpDiv16_0(v) case OpDiv16u: @@ -85,8 +101,12 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpEq16_0(v) case OpEq32: return rewriteValuegeneric_OpEq32_0(v) + case OpEq32F: + return rewriteValuegeneric_OpEq32F_0(v) case OpEq64: return rewriteValuegeneric_OpEq64_0(v) + case OpEq64F: + return rewriteValuegeneric_OpEq64F_0(v) case OpEq8: return rewriteValuegeneric_OpEq8_0(v) case OpEqB: @@ -103,10 +123,14 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpGeq16U_0(v) case OpGeq32: return rewriteValuegeneric_OpGeq32_0(v) + case OpGeq32F: + return rewriteValuegeneric_OpGeq32F_0(v) case OpGeq32U: return rewriteValuegeneric_OpGeq32U_0(v) case OpGeq64: return rewriteValuegeneric_OpGeq64_0(v) + case OpGeq64F: + return rewriteValuegeneric_OpGeq64F_0(v) case OpGeq64U: return rewriteValuegeneric_OpGeq64U_0(v) case OpGeq8: @@ -119,10 +143,14 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpGreater16U_0(v) case OpGreater32: return rewriteValuegeneric_OpGreater32_0(v) + case OpGreater32F: + return rewriteValuegeneric_OpGreater32F_0(v) case OpGreater32U: return rewriteValuegeneric_OpGreater32U_0(v) case OpGreater64: return rewriteValuegeneric_OpGreater64_0(v) + case OpGreater64F: + return rewriteValuegeneric_OpGreater64F_0(v) case OpGreater64U: return rewriteValuegeneric_OpGreater64U_0(v) case OpGreater8: @@ -134,7 +162,7 @@ func rewriteValuegeneric(v *Value) bool { case OpInterCall: return rewriteValuegeneric_OpInterCall_0(v) case OpIsInBounds: - return rewriteValuegeneric_OpIsInBounds_0(v) || rewriteValuegeneric_OpIsInBounds_10(v) || rewriteValuegeneric_OpIsInBounds_20(v) + return rewriteValuegeneric_OpIsInBounds_0(v) || rewriteValuegeneric_OpIsInBounds_10(v) || rewriteValuegeneric_OpIsInBounds_20(v) || rewriteValuegeneric_OpIsInBounds_30(v) case OpIsNonNil: return rewriteValuegeneric_OpIsNonNil_0(v) case OpIsSliceInBounds: @@ -145,10 +173,14 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpLeq16U_0(v) case OpLeq32: return rewriteValuegeneric_OpLeq32_0(v) + case OpLeq32F: + return rewriteValuegeneric_OpLeq32F_0(v) case OpLeq32U: return rewriteValuegeneric_OpLeq32U_0(v) case OpLeq64: return rewriteValuegeneric_OpLeq64_0(v) + case OpLeq64F: + return rewriteValuegeneric_OpLeq64F_0(v) case OpLeq64U: return rewriteValuegeneric_OpLeq64U_0(v) case OpLeq8: @@ -161,10 +193,14 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpLess16U_0(v) case OpLess32: return rewriteValuegeneric_OpLess32_0(v) + case OpLess32F: + return rewriteValuegeneric_OpLess32F_0(v) case OpLess32U: return rewriteValuegeneric_OpLess32U_0(v) case OpLess64: return rewriteValuegeneric_OpLess64_0(v) + case OpLess64F: + return rewriteValuegeneric_OpLess64F_0(v) case OpLess64U: return rewriteValuegeneric_OpLess64U_0(v) case OpLess8: @@ -172,7 +208,7 @@ func rewriteValuegeneric(v *Value) bool { case OpLess8U: return rewriteValuegeneric_OpLess8U_0(v) case OpLoad: - return rewriteValuegeneric_OpLoad_0(v) + return rewriteValuegeneric_OpLoad_0(v) || rewriteValuegeneric_OpLoad_10(v) case OpLsh16x16: return rewriteValuegeneric_OpLsh16x16_0(v) case OpLsh16x32: @@ -249,8 +285,12 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpNeq16_0(v) case OpNeq32: return rewriteValuegeneric_OpNeq32_0(v) + case OpNeq32F: + return rewriteValuegeneric_OpNeq32F_0(v) case OpNeq64: return rewriteValuegeneric_OpNeq64_0(v) + case OpNeq64F: + return rewriteValuegeneric_OpNeq64F_0(v) case OpNeq8: return rewriteValuegeneric_OpNeq8_0(v) case OpNeqB: @@ -369,6 +409,8 @@ func rewriteValuegeneric(v *Value) bool { return rewriteValuegeneric_OpSlicemask_0(v) case OpSqrt: return rewriteValuegeneric_OpSqrt_0(v) + case OpStaticCall: + return rewriteValuegeneric_OpStaticCall_0(v) case OpStore: return rewriteValuegeneric_OpStore_0(v) || rewriteValuegeneric_OpStore_10(v) case OpStringLen: @@ -467,6 +509,251 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { v.AuxInt = int64(int16(c + d)) return true } + // match: (Add16 (Mul16 x y) (Mul16 x z)) + // cond: + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add16 (Mul16 y x) (Mul16 x z)) + // cond: + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add16 (Mul16 x y) (Mul16 z x)) + // cond: + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add16 (Mul16 y x) (Mul16 z x)) + // cond: + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add16 (Mul16 x z) (Mul16 x y)) + // cond: + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add16 (Mul16 z x) (Mul16 x y)) + // cond: + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + z := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add16 (Mul16 x z) (Mul16 y x)) + // cond: + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add16 (Mul16 z x) (Mul16 y x)) + // cond: + // result: (Mul16 x (Add16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + z := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpAdd16_10(v *Value) bool { + b := v.Block + _ = b // match: (Add16 (Const16 [0]) x) // cond: // result: x @@ -657,11 +944,6 @@ func rewriteValuegeneric_OpAdd16_0(v *Value) bool { v.AddArg(v0) return true } - return false -} -func rewriteValuegeneric_OpAdd16_10(v *Value) bool { - b := v.Block - _ = b // match: (Add16 (Sub16 i:(Const16 ) z) x) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Add16 i (Sub16 x z)) @@ -718,6 +1000,11 @@ func rewriteValuegeneric_OpAdd16_10(v *Value) bool { v.AddArg(v0) return true } + return false +} +func rewriteValuegeneric_OpAdd16_20(v *Value) bool { + b := v.Block + _ = b // match: (Add16 x (Sub16 i:(Const16 ) z)) // cond: (z.Op != OpConst16 && x.Op != OpConst16) // result: (Add16 i (Sub16 x z)) @@ -950,11 +1237,6 @@ func rewriteValuegeneric_OpAdd16_10(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpAdd16_20(v *Value) bool { - b := v.Block - _ = b // match: (Add16 (Add16 (Const16 [d]) x) (Const16 [c])) // cond: // result: (Add16 (Const16 [int64(int16(c+d))]) x) @@ -1019,6 +1301,11 @@ func rewriteValuegeneric_OpAdd16_20(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuegeneric_OpAdd16_30(v *Value) bool { + b := v.Block + _ = b // match: (Add16 (Const16 [c]) (Sub16 (Const16 [d]) x)) // cond: // result: (Sub16 (Const16 [int64(int16(c+d))]) x) @@ -1190,6 +1477,251 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { v.AuxInt = int64(int32(c + d)) return true } + // match: (Add32 (Mul32 x y) (Mul32 x z)) + // cond: + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add32 (Mul32 y x) (Mul32 x z)) + // cond: + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add32 (Mul32 x y) (Mul32 z x)) + // cond: + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add32 (Mul32 y x) (Mul32 z x)) + // cond: + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add32 (Mul32 x z) (Mul32 x y)) + // cond: + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add32 (Mul32 z x) (Mul32 x y)) + // cond: + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + z := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add32 (Mul32 x z) (Mul32 y x)) + // cond: + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add32 (Mul32 z x) (Mul32 y x)) + // cond: + // result: (Mul32 x (Add32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + z := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpAdd32_10(v *Value) bool { + b := v.Block + _ = b // match: (Add32 (Const32 [0]) x) // cond: // result: x @@ -1380,11 +1912,6 @@ func rewriteValuegeneric_OpAdd32_0(v *Value) bool { v.AddArg(v0) return true } - return false -} -func rewriteValuegeneric_OpAdd32_10(v *Value) bool { - b := v.Block - _ = b // match: (Add32 (Sub32 i:(Const32 ) z) x) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Add32 i (Sub32 x z)) @@ -1441,6 +1968,11 @@ func rewriteValuegeneric_OpAdd32_10(v *Value) bool { v.AddArg(v0) return true } + return false +} +func rewriteValuegeneric_OpAdd32_20(v *Value) bool { + b := v.Block + _ = b // match: (Add32 x (Sub32 i:(Const32 ) z)) // cond: (z.Op != OpConst32 && x.Op != OpConst32) // result: (Add32 i (Sub32 x z)) @@ -1673,11 +2205,6 @@ func rewriteValuegeneric_OpAdd32_10(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpAdd32_20(v *Value) bool { - b := v.Block - _ = b // match: (Add32 (Add32 (Const32 [d]) x) (Const32 [c])) // cond: // result: (Add32 (Const32 [int64(int32(c+d))]) x) @@ -1742,6 +2269,11 @@ func rewriteValuegeneric_OpAdd32_20(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuegeneric_OpAdd32_30(v *Value) bool { + b := v.Block + _ = b // match: (Add32 (Const32 [c]) (Sub32 (Const32 [d]) x)) // cond: // result: (Sub32 (Const32 [int64(int32(c+d))]) x) @@ -1990,6 +2522,251 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { v.AuxInt = c + d return true } + // match: (Add64 (Mul64 x y) (Mul64 x z)) + // cond: + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add64 (Mul64 y x) (Mul64 x z)) + // cond: + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add64 (Mul64 x y) (Mul64 z x)) + // cond: + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add64 (Mul64 y x) (Mul64 z x)) + // cond: + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add64 (Mul64 x z) (Mul64 x y)) + // cond: + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add64 (Mul64 z x) (Mul64 x y)) + // cond: + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + z := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add64 (Mul64 x z) (Mul64 y x)) + // cond: + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add64 (Mul64 z x) (Mul64 y x)) + // cond: + // result: (Mul64 x (Add64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + z := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpAdd64_10(v *Value) bool { + b := v.Block + _ = b // match: (Add64 (Const64 [0]) x) // cond: // result: x @@ -2180,11 +2957,6 @@ func rewriteValuegeneric_OpAdd64_0(v *Value) bool { v.AddArg(v0) return true } - return false -} -func rewriteValuegeneric_OpAdd64_10(v *Value) bool { - b := v.Block - _ = b // match: (Add64 (Sub64 i:(Const64 ) z) x) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Add64 i (Sub64 x z)) @@ -2241,6 +3013,11 @@ func rewriteValuegeneric_OpAdd64_10(v *Value) bool { v.AddArg(v0) return true } + return false +} +func rewriteValuegeneric_OpAdd64_20(v *Value) bool { + b := v.Block + _ = b // match: (Add64 x (Sub64 i:(Const64 ) z)) // cond: (z.Op != OpConst64 && x.Op != OpConst64) // result: (Add64 i (Sub64 x z)) @@ -2473,11 +3250,6 @@ func rewriteValuegeneric_OpAdd64_10(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpAdd64_20(v *Value) bool { - b := v.Block - _ = b // match: (Add64 (Add64 (Const64 [d]) x) (Const64 [c])) // cond: // result: (Add64 (Const64 [c+d]) x) @@ -2542,6 +3314,11 @@ func rewriteValuegeneric_OpAdd64_20(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuegeneric_OpAdd64_30(v *Value) bool { + b := v.Block + _ = b // match: (Add64 (Const64 [c]) (Sub64 (Const64 [d]) x)) // cond: // result: (Sub64 (Const64 [c+d]) x) @@ -2790,6 +3567,251 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { v.AuxInt = int64(int8(c + d)) return true } + // match: (Add8 (Mul8 x y) (Mul8 x z)) + // cond: + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add8 (Mul8 y x) (Mul8 x z)) + // cond: + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add8 (Mul8 x y) (Mul8 z x)) + // cond: + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add8 (Mul8 y x) (Mul8 z x)) + // cond: + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add8 (Mul8 x z) (Mul8 x y)) + // cond: + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add8 (Mul8 z x) (Mul8 x y)) + // cond: + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + z := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + y := v_1.Args[1] + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add8 (Mul8 x z) (Mul8 y x)) + // cond: + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + z := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Add8 (Mul8 z x) (Mul8 y x)) + // cond: + // result: (Mul8 x (Add8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + z := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + y := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpAdd8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + return false +} +func rewriteValuegeneric_OpAdd8_10(v *Value) bool { + b := v.Block + _ = b // match: (Add8 (Const8 [0]) x) // cond: // result: x @@ -2980,11 +4002,6 @@ func rewriteValuegeneric_OpAdd8_0(v *Value) bool { v.AddArg(v0) return true } - return false -} -func rewriteValuegeneric_OpAdd8_10(v *Value) bool { - b := v.Block - _ = b // match: (Add8 (Sub8 i:(Const8 ) z) x) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Sub8 x z)) @@ -3041,6 +4058,11 @@ func rewriteValuegeneric_OpAdd8_10(v *Value) bool { v.AddArg(v0) return true } + return false +} +func rewriteValuegeneric_OpAdd8_20(v *Value) bool { + b := v.Block + _ = b // match: (Add8 x (Sub8 i:(Const8 ) z)) // cond: (z.Op != OpConst8 && x.Op != OpConst8) // result: (Add8 i (Sub8 x z)) @@ -3273,11 +4295,6 @@ func rewriteValuegeneric_OpAdd8_10(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpAdd8_20(v *Value) bool { - b := v.Block - _ = b // match: (Add8 (Add8 (Const8 [d]) x) (Const8 [c])) // cond: // result: (Add8 (Const8 [int64(int8(c+d))]) x) @@ -3342,6 +4359,11 @@ func rewriteValuegeneric_OpAdd8_20(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuegeneric_OpAdd8_30(v *Value) bool { + b := v.Block + _ = b // match: (Add8 (Const8 [c]) (Sub8 (Const8 [d]) x)) // cond: // result: (Sub8 (Const8 [int64(int8(c+d))]) x) @@ -5684,25 +6706,6 @@ func rewriteValuegeneric_OpArraySelect_0(v *Value) bool { v.AddArg(x) return true } - // match: (ArraySelect [0] (Load ptr mem)) - // cond: - // result: (Load ptr mem) - for { - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpLoad { - break - } - _ = v_0.Args[1] - ptr := v_0.Args[0] - mem := v_0.Args[1] - v.reset(OpLoad) - v.AddArg(ptr) - v.AddArg(mem) - return true - } // match: (ArraySelect [0] x:(IData _)) // cond: // result: x @@ -6059,6 +7062,38 @@ func rewriteValuegeneric_OpConvert_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpCvt32Fto32_0(v *Value) bool { + // match: (Cvt32Fto32 (Const32F [c])) + // cond: + // result: (Const32 [int64(int32(i2f(c)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst32F { + break + } + c := v_0.AuxInt + v.reset(OpConst32) + v.AuxInt = int64(int32(i2f(c))) + return true + } + return false +} +func rewriteValuegeneric_OpCvt32Fto64_0(v *Value) bool { + // match: (Cvt32Fto64 (Const32F [c])) + // cond: + // result: (Const64 [int64(i2f(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst32F { + break + } + c := v_0.AuxInt + v.reset(OpConst64) + v.AuxInt = int64(i2f(c)) + return true + } + return false +} func rewriteValuegeneric_OpCvt32Fto64F_0(v *Value) bool { // match: (Cvt32Fto64F (Const32F [c])) // cond: @@ -6075,6 +7110,54 @@ func rewriteValuegeneric_OpCvt32Fto64F_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpCvt32to32F_0(v *Value) bool { + // match: (Cvt32to32F (Const32 [c])) + // cond: + // result: (Const32F [f2i(float64(float32(int32(c))))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + c := v_0.AuxInt + v.reset(OpConst32F) + v.AuxInt = f2i(float64(float32(int32(c)))) + return true + } + return false +} +func rewriteValuegeneric_OpCvt32to64F_0(v *Value) bool { + // match: (Cvt32to64F (Const32 [c])) + // cond: + // result: (Const64F [f2i(float64(int32(c)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst32 { + break + } + c := v_0.AuxInt + v.reset(OpConst64F) + v.AuxInt = f2i(float64(int32(c))) + return true + } + return false +} +func rewriteValuegeneric_OpCvt64Fto32_0(v *Value) bool { + // match: (Cvt64Fto32 (Const64F [c])) + // cond: + // result: (Const32 [int64(int32(i2f(c)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst64F { + break + } + c := v_0.AuxInt + v.reset(OpConst32) + v.AuxInt = int64(int32(i2f(c))) + return true + } + return false +} func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool { // match: (Cvt64Fto32F (Const64F [c])) // cond: @@ -6091,6 +7174,54 @@ func rewriteValuegeneric_OpCvt64Fto32F_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpCvt64Fto64_0(v *Value) bool { + // match: (Cvt64Fto64 (Const64F [c])) + // cond: + // result: (Const64 [int64(i2f(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst64F { + break + } + c := v_0.AuxInt + v.reset(OpConst64) + v.AuxInt = int64(i2f(c)) + return true + } + return false +} +func rewriteValuegeneric_OpCvt64to32F_0(v *Value) bool { + // match: (Cvt64to32F (Const64 [c])) + // cond: + // result: (Const32F [f2i(float64(float32(c)))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + c := v_0.AuxInt + v.reset(OpConst32F) + v.AuxInt = f2i(float64(float32(c))) + return true + } + return false +} +func rewriteValuegeneric_OpCvt64to64F_0(v *Value) bool { + // match: (Cvt64to64F (Const64 [c])) + // cond: + // result: (Const64F [f2i(float64(c))]) + for { + v_0 := v.Args[0] + if v_0.Op != OpConst64 { + break + } + c := v_0.AuxInt + v.reset(OpConst64F) + v.AuxInt = f2i(float64(c)) + return true + } + return false +} func rewriteValuegeneric_OpDiv16_0(v *Value) bool { b := v.Block _ = b @@ -6118,6 +7249,27 @@ func rewriteValuegeneric_OpDiv16_0(v *Value) bool { v.AuxInt = int64(int16(c) / int16(d)) return true } + // match: (Div16 n (Const16 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xffff) + // result: (Rsh16Ux64 n (Const64 [log2(c&0xffff)])) + for { + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c&0xffff)) { + break + } + v.reset(OpRsh16Ux64) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = log2(c & 0xffff) + v.AddArg(v0) + return true + } // match: (Div16 n (Const16 [c])) // cond: c < 0 && c != -1<<15 // result: (Neg16 (Div16 n (Const16 [-c]))) @@ -6461,6 +7613,27 @@ func rewriteValuegeneric_OpDiv32_0(v *Value) bool { v.AuxInt = int64(int32(c) / int32(d)) return true } + // match: (Div32 n (Const32 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xffffffff) + // result: (Rsh32Ux64 n (Const64 [log2(c&0xffffffff)])) + for { + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c&0xffffffff)) { + break + } + v.reset(OpRsh32Ux64) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = log2(c & 0xffffffff) + v.AddArg(v0) + return true + } // match: (Div32 n (Const32 [c])) // cond: c < 0 && c != -1<<31 // result: (Neg32 (Div32 n (Const32 [-c]))) @@ -6982,6 +8155,47 @@ func rewriteValuegeneric_OpDiv64_0(v *Value) bool { v.AuxInt = c / d return true } + // match: (Div64 n (Const64 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c) + // result: (Rsh64Ux64 n (Const64 [log2(c)])) + for { + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c)) { + break + } + v.reset(OpRsh64Ux64) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = log2(c) + v.AddArg(v0) + return true + } + // match: (Div64 n (Const64 [-1<<63])) + // cond: isNonNegative(n) + // result: (Const64 [0]) + for { + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + if v_1.AuxInt != -1<<63 { + break + } + if !(isNonNegative(n)) { + break + } + v.reset(OpConst64) + v.AuxInt = 0 + return true + } // match: (Div64 n (Const64 [c])) // cond: c < 0 && c != -1<<63 // result: (Neg64 (Div64 n (Const64 [-c]))) @@ -7378,6 +8592,27 @@ func rewriteValuegeneric_OpDiv8_0(v *Value) bool { v.AuxInt = int64(int8(c) / int8(d)) return true } + // match: (Div8 n (Const8 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xff) + // result: (Rsh8Ux64 n (Const64 [log2(c&0xff)])) + for { + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c&0xff)) { + break + } + v.reset(OpRsh8Ux64) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64) + v0.AuxInt = log2(c & 0xff) + v.AddArg(v0) + return true + } // match: (Div8 n (Const8 [c])) // cond: c < 0 && c != -1<<7 // result: (Neg8 (Div8 n (Const8 [-c]))) @@ -7953,6 +9188,47 @@ func rewriteValuegeneric_OpEq32_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpEq32F_0(v *Value) bool { + // match: (Eq32F (Const32F [c]) (Const32F [d])) + // cond: + // result: (ConstBool [b2i(i2f(c) == i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32F { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32F { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) == i2f(d)) + return true + } + // match: (Eq32F (Const32F [d]) (Const32F [c])) + // cond: + // result: (ConstBool [b2i(i2f(c) == i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32F { + break + } + d := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32F { + break + } + c := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) == i2f(d)) + return true + } + return false +} func rewriteValuegeneric_OpEq64_0(v *Value) bool { b := v.Block _ = b @@ -8137,6 +9413,47 @@ func rewriteValuegeneric_OpEq64_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpEq64F_0(v *Value) bool { + // match: (Eq64F (Const64F [c]) (Const64F [d])) + // cond: + // result: (ConstBool [b2i(i2f(c) == i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64F { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64F { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) == i2f(d)) + return true + } + // match: (Eq64F (Const64F [d]) (Const64F [c])) + // cond: + // result: (ConstBool [b2i(i2f(c) == i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64F { + break + } + d := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64F { + break + } + c := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) == i2f(d)) + return true + } + return false +} func rewriteValuegeneric_OpEq8_0(v *Value) bool { b := v.Block _ = b @@ -8640,6 +9957,28 @@ func rewriteValuegeneric_OpGeq32_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpGeq32F_0(v *Value) bool { + // match: (Geq32F (Const32F [c]) (Const32F [d])) + // cond: + // result: (ConstBool [b2i(i2f(c) >= i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32F { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32F { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) >= i2f(d)) + return true + } + return false +} func rewriteValuegeneric_OpGeq32U_0(v *Value) bool { // match: (Geq32U (Const32 [c]) (Const32 [d])) // cond: @@ -8684,6 +10023,28 @@ func rewriteValuegeneric_OpGeq64_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpGeq64F_0(v *Value) bool { + // match: (Geq64F (Const64F [c]) (Const64F [d])) + // cond: + // result: (ConstBool [b2i(i2f(c) >= i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64F { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64F { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) >= i2f(d)) + return true + } + return false +} func rewriteValuegeneric_OpGeq64U_0(v *Value) bool { // match: (Geq64U (Const64 [c]) (Const64 [d])) // cond: @@ -8816,6 +10177,28 @@ func rewriteValuegeneric_OpGreater32_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpGreater32F_0(v *Value) bool { + // match: (Greater32F (Const32F [c]) (Const32F [d])) + // cond: + // result: (ConstBool [b2i(i2f(c) > i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32F { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32F { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) > i2f(d)) + return true + } + return false +} func rewriteValuegeneric_OpGreater32U_0(v *Value) bool { // match: (Greater32U (Const32 [c]) (Const32 [d])) // cond: @@ -8860,6 +10243,28 @@ func rewriteValuegeneric_OpGreater64_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpGreater64F_0(v *Value) bool { + // match: (Greater64F (Const64F [c]) (Const64F [d])) + // cond: + // result: (ConstBool [b2i(i2f(c) > i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64F { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64F { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) > i2f(d)) + return true + } + return false +} func rewriteValuegeneric_OpGreater64U_0(v *Value) bool { // match: (Greater64U (Const64 [c]) (Const64 [d])) // cond: @@ -9774,6 +11179,303 @@ func rewriteValuegeneric_OpIsInBounds_20(v *Value) bool { v.AuxInt = 1 return true } + // match: (IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) + // cond: 0 < c && c < 8 && 1< p1 (Store {t2} p2 (Const64 [x]) _)) + // cond: isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitFloat(t1) + // result: (Const64F [x]) + for { + t1 := v.Type + _ = v.Args[1] + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64 { + break + } + x := v_1_1.AuxInt + if !(isSamePtr(p1, p2) && t2.(*types.Type).Size() == 8 && is64BitFloat(t1)) { + break + } + v.reset(OpConst64F) + v.AuxInt = x + return true + } + // match: (Load p1 (Store {t2} p2 (Const32 [x]) _)) + // cond: isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitFloat(t1) + // result: (Const32F [f2i(float64(math.Float32frombits(uint32(x))))]) + for { + t1 := v.Type + _ = v.Args[1] + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32 { + break + } + x := v_1_1.AuxInt + if !(isSamePtr(p1, p2) && t2.(*types.Type).Size() == 4 && is32BitFloat(t1)) { + break + } + v.reset(OpConst32F) + v.AuxInt = f2i(float64(math.Float32frombits(uint32(x)))) + return true + } + // match: (Load p1 (Store {t2} p2 (Const64F [x]) _)) + // cond: isSamePtr(p1,p2) && t2.(*types.Type).Size() == 8 && is64BitInt(t1) + // result: (Const64 [x]) + for { + t1 := v.Type + _ = v.Args[1] + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst64F { + break + } + x := v_1_1.AuxInt + if !(isSamePtr(p1, p2) && t2.(*types.Type).Size() == 8 && is64BitInt(t1)) { + break + } + v.reset(OpConst64) + v.AuxInt = x + return true + } + // match: (Load p1 (Store {t2} p2 (Const32F [x]) _)) + // cond: isSamePtr(p1,p2) && t2.(*types.Type).Size() == 4 && is32BitInt(t1) + // result: (Const32 [int64(int32(math.Float32bits(float32(i2f(x)))))]) + for { + t1 := v.Type + _ = v.Args[1] + p1 := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpStore { + break + } + t2 := v_1.Aux + _ = v_1.Args[2] + p2 := v_1.Args[0] + v_1_1 := v_1.Args[1] + if v_1_1.Op != OpConst32F { + break + } + x := v_1_1.AuxInt + if !(isSamePtr(p1, p2) && t2.(*types.Type).Size() == 4 && is32BitInt(t1)) { + break + } + v.reset(OpConst32) + v.AuxInt = int64(int32(math.Float32bits(float32(i2f(x))))) + return true + } // match: (Load _ _) // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) // result: (StructMake0) @@ -10524,6 +12418,13 @@ func rewriteValuegeneric_OpLoad_0(v *Value) bool { v.AddArg(v6) return true } + return false +} +func rewriteValuegeneric_OpLoad_10(v *Value) bool { + b := v.Block + _ = b + fe := b.Func.fe + _ = fe // match: (Load _ _) // cond: t.IsArray() && t.NumElem() == 0 // result: (ArrayMake0) @@ -11662,6 +13563,28 @@ func rewriteValuegeneric_OpMod16_0(v *Value) bool { return true } // match: (Mod16 n (Const16 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xffff) + // result: (And16 n (Const16 [(c&0xffff)-1])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst16 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c&0xffff)) { + break + } + v.reset(OpAnd16) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = (c & 0xffff) - 1 + v.AddArg(v0) + return true + } + // match: (Mod16 n (Const16 [c])) // cond: c < 0 && c != -1<<15 // result: (Mod16 n (Const16 [-c])) for { @@ -11821,6 +13744,28 @@ func rewriteValuegeneric_OpMod32_0(v *Value) bool { return true } // match: (Mod32 n (Const32 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xffffffff) + // result: (And32 n (Const32 [(c&0xffffffff)-1])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst32 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c&0xffffffff)) { + break + } + v.reset(OpAnd32) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = (c & 0xffffffff) - 1 + v.AddArg(v0) + return true + } + // match: (Mod32 n (Const32 [c])) // cond: c < 0 && c != -1<<31 // result: (Mod32 n (Const32 [-c])) for { @@ -11980,6 +13925,49 @@ func rewriteValuegeneric_OpMod64_0(v *Value) bool { return true } // match: (Mod64 n (Const64 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c) + // result: (And64 n (Const64 [c-1])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c)) { + break + } + v.reset(OpAnd64) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = c - 1 + v.AddArg(v0) + return true + } + // match: (Mod64 n (Const64 [-1<<63])) + // cond: isNonNegative(n) + // result: n + for { + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + if v_1.AuxInt != -1<<63 { + break + } + if !(isNonNegative(n)) { + break + } + v.reset(OpCopy) + v.Type = n.Type + v.AddArg(n) + return true + } + // match: (Mod64 n (Const64 [c])) // cond: c < 0 && c != -1<<63 // result: (Mod64 n (Const64 [-c])) for { @@ -12160,6 +14148,28 @@ func rewriteValuegeneric_OpMod8_0(v *Value) bool { return true } // match: (Mod8 n (Const8 [c])) + // cond: isNonNegative(n) && isPowerOfTwo(c&0xff) + // result: (And8 n (Const8 [(c&0xff)-1])) + for { + t := v.Type + _ = v.Args[1] + n := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst8 { + break + } + c := v_1.AuxInt + if !(isNonNegative(n) && isPowerOfTwo(c&0xff)) { + break + } + v.reset(OpAnd8) + v.AddArg(n) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = (c & 0xff) - 1 + v.AddArg(v0) + return true + } + // match: (Mod8 n (Const8 [c])) // cond: c < 0 && c != -1<<7 // result: (Mod8 n (Const8 [-c])) for { @@ -14937,6 +16947,47 @@ func rewriteValuegeneric_OpNeq32_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpNeq32F_0(v *Value) bool { + // match: (Neq32F (Const32F [c]) (Const32F [d])) + // cond: + // result: (ConstBool [b2i(i2f(c) != i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32F { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32F { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) != i2f(d)) + return true + } + // match: (Neq32F (Const32F [d]) (Const32F [c])) + // cond: + // result: (ConstBool [b2i(i2f(c) != i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst32F { + break + } + d := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst32F { + break + } + c := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) != i2f(d)) + return true + } + return false +} func rewriteValuegeneric_OpNeq64_0(v *Value) bool { b := v.Block _ = b @@ -15121,6 +17172,47 @@ func rewriteValuegeneric_OpNeq64_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpNeq64F_0(v *Value) bool { + // match: (Neq64F (Const64F [c]) (Const64F [d])) + // cond: + // result: (ConstBool [b2i(i2f(c) != i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64F { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64F { + break + } + d := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) != i2f(d)) + return true + } + // match: (Neq64F (Const64F [d]) (Const64F [c])) + // cond: + // result: (ConstBool [b2i(i2f(c) != i2f(d))]) + for { + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpConst64F { + break + } + d := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpConst64F { + break + } + c := v_1.AuxInt + v.reset(OpConstBool) + v.AuxInt = b2i(i2f(c) != i2f(d)) + return true + } + return false +} func rewriteValuegeneric_OpNeq8_0(v *Value) bool { b := v.Block _ = b @@ -15556,8 +17648,8 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { v.AddArg(mem) return true } - // match: (NilCheck (Load (OffPtr [c] (SP)) mem) mem) - // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check") + // match: (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _) + // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check") // result: (Invalid) for { _ = v.Args[1] @@ -15575,18 +17667,19 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { if v_0_0_0.Op != OpSP { break } - mem := v_0.Args[1] - if mem != v.Args[1] { + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpStaticCall { break } - if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) { + sym := v_0_1.Aux + if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) { break } v.reset(OpInvalid) return true } - // match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) mem)) mem) - // cond: mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check") + // match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _) + // cond: isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check") // result: (Invalid) for { _ = v.Args[1] @@ -15608,11 +17701,12 @@ func rewriteValuegeneric_OpNilCheck_0(v *Value) bool { if v_0_0_0_0.Op != OpSP { break } - mem := v_0_0.Args[1] - if mem != v.Args[1] { + v_0_0_1 := v_0_0.Args[1] + if v_0_0_1.Op != OpStaticCall { break } - if !(mem.Op == OpStaticCall && isSameSym(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) { + sym := v_0_0_1.Aux + if !(isSameSym(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil() && v.Pos.Line() > 1, v, "removed nil check")) { break } v.reset(OpInvalid) @@ -21640,6 +23734,93 @@ func rewriteValuegeneric_OpSqrt_0(v *Value) bool { } return false } +func rewriteValuegeneric_OpStaticCall_0(v *Value) bool { + b := v.Block + _ = b + config := b.Func.Config + _ = config + // match: (StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) + // cond: isSameSym(sym,"runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmoveSize(sz,config) + // result: (Move {t.(*types.Type).Elem()} [sz] dst src mem) + for { + sym := v.Aux + s1 := v.Args[0] + if s1.Op != OpStore { + break + } + _ = s1.Args[2] + s1_1 := s1.Args[1] + if s1_1.Op != OpConst64 { + break + } + sz := s1_1.AuxInt + s2 := s1.Args[2] + if s2.Op != OpStore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpStore { + break + } + t := s3.Aux + _ = s3.Args[2] + dst := s3.Args[1] + mem := s3.Args[2] + if !(isSameSym(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmoveSize(sz, config)) { + break + } + v.reset(OpMove) + v.AuxInt = sz + v.Aux = t.(*types.Type).Elem() + v.AddArg(dst) + v.AddArg(src) + v.AddArg(mem) + return true + } + // match: (StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))) + // cond: isSameSym(sym,"runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmoveSize(sz,config) + // result: (Move {t.(*types.Type).Elem()} [sz] dst src mem) + for { + sym := v.Aux + s1 := v.Args[0] + if s1.Op != OpStore { + break + } + _ = s1.Args[2] + s1_1 := s1.Args[1] + if s1_1.Op != OpConst32 { + break + } + sz := s1_1.AuxInt + s2 := s1.Args[2] + if s2.Op != OpStore { + break + } + _ = s2.Args[2] + src := s2.Args[1] + s3 := s2.Args[2] + if s3.Op != OpStore { + break + } + t := s3.Aux + _ = s3.Args[2] + dst := s3.Args[1] + mem := s3.Args[2] + if !(isSameSym(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmoveSize(sz, config)) { + break + } + v.reset(OpMove) + v.AuxInt = sz + v.Aux = t.(*types.Type).Elem() + v.AddArg(dst) + v.AddArg(src) + v.AddArg(mem) + return true + } + return false +} func rewriteValuegeneric_OpStore_0(v *Value) bool { b := v.Block _ = b @@ -22517,6 +24698,126 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { v.AddArg(x) return true } + // match: (Sub16 (Mul16 x y) (Mul16 x z)) + // cond: + // result: (Mul16 x (Sub16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub16 (Mul16 y x) (Mul16 x z)) + // cond: + // result: (Mul16 x (Sub16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub16 (Mul16 x y) (Mul16 z x)) + // cond: + // result: (Mul16 x (Sub16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub16 (Mul16 y x) (Mul16 z x)) + // cond: + // result: (Mul16 x (Sub16 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul16 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul16 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul16) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub16, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } // match: (Sub16 x x) // cond: // result: (Const16 [0]) @@ -22590,6 +24891,11 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuegeneric_OpSub16_10(v *Value) bool { + b := v.Block + _ = b // match: (Sub16 (Add16 y x) y) // cond: // result: x @@ -22698,11 +25004,6 @@ func rewriteValuegeneric_OpSub16_0(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpSub16_10(v *Value) bool { - b := v.Block - _ = b // match: (Sub16 (Const16 [c]) (Sub16 (Const16 [d]) x)) // cond: // result: (Add16 (Const16 [int64(int16(c-d))]) x) @@ -22781,6 +25082,126 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { v.AddArg(x) return true } + // match: (Sub32 (Mul32 x y) (Mul32 x z)) + // cond: + // result: (Mul32 x (Sub32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub32 (Mul32 y x) (Mul32 x z)) + // cond: + // result: (Mul32 x (Sub32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub32 (Mul32 x y) (Mul32 z x)) + // cond: + // result: (Mul32 x (Sub32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub32 (Mul32 y x) (Mul32 z x)) + // cond: + // result: (Mul32 x (Sub32 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul32 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul32 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul32) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub32, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } // match: (Sub32 x x) // cond: // result: (Const32 [0]) @@ -22854,6 +25275,11 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuegeneric_OpSub32_10(v *Value) bool { + b := v.Block + _ = b // match: (Sub32 (Add32 y x) y) // cond: // result: x @@ -22962,11 +25388,6 @@ func rewriteValuegeneric_OpSub32_0(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpSub32_10(v *Value) bool { - b := v.Block - _ = b // match: (Sub32 (Const32 [c]) (Sub32 (Const32 [d]) x)) // cond: // result: (Add32 (Const32 [int64(int32(c-d))]) x) @@ -23085,6 +25506,126 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { v.AddArg(x) return true } + // match: (Sub64 (Mul64 x y) (Mul64 x z)) + // cond: + // result: (Mul64 x (Sub64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub64 (Mul64 y x) (Mul64 x z)) + // cond: + // result: (Mul64 x (Sub64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub64 (Mul64 x y) (Mul64 z x)) + // cond: + // result: (Mul64 x (Sub64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub64 (Mul64 y x) (Mul64 z x)) + // cond: + // result: (Mul64 x (Sub64 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul64 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul64 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul64) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub64, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } // match: (Sub64 x x) // cond: // result: (Const64 [0]) @@ -23158,6 +25699,11 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuegeneric_OpSub64_10(v *Value) bool { + b := v.Block + _ = b // match: (Sub64 (Add64 y x) y) // cond: // result: x @@ -23266,11 +25812,6 @@ func rewriteValuegeneric_OpSub64_0(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpSub64_10(v *Value) bool { - b := v.Block - _ = b // match: (Sub64 (Const64 [c]) (Sub64 (Const64 [d]) x)) // cond: // result: (Add64 (Const64 [c-d]) x) @@ -23389,6 +25930,126 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { v.AddArg(x) return true } + // match: (Sub8 (Mul8 x y) (Mul8 x z)) + // cond: + // result: (Mul8 x (Sub8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub8 (Mul8 y x) (Mul8 x z)) + // cond: + // result: (Mul8 x (Sub8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + if x != v_1.Args[0] { + break + } + z := v_1.Args[1] + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub8 (Mul8 x y) (Mul8 z x)) + // cond: + // result: (Mul8 x (Sub8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + y := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (Sub8 (Mul8 y x) (Mul8 z x)) + // cond: + // result: (Mul8 x (Sub8 y z)) + for { + t := v.Type + _ = v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpMul8 { + break + } + _ = v_0.Args[1] + y := v_0.Args[0] + x := v_0.Args[1] + v_1 := v.Args[1] + if v_1.Op != OpMul8 { + break + } + _ = v_1.Args[1] + z := v_1.Args[0] + if x != v_1.Args[1] { + break + } + v.reset(OpMul8) + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpSub8, t) + v0.AddArg(y) + v0.AddArg(z) + v.AddArg(v0) + return true + } // match: (Sub8 x x) // cond: // result: (Const8 [0]) @@ -23462,6 +26123,11 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { v.AddArg(x) return true } + return false +} +func rewriteValuegeneric_OpSub8_10(v *Value) bool { + b := v.Block + _ = b // match: (Sub8 (Add8 y x) y) // cond: // result: x @@ -23570,11 +26236,6 @@ func rewriteValuegeneric_OpSub8_0(v *Value) bool { v.AddArg(x) return true } - return false -} -func rewriteValuegeneric_OpSub8_10(v *Value) bool { - b := v.Block - _ = b // match: (Sub8 (Const8 [c]) (Sub8 (Const8 [d]) x)) // cond: // result: (Add8 (Const8 [int64(int8(c-d))]) x) @@ -26219,6 +28880,7 @@ func rewriteBlockgeneric(b *Block) bool { cond := v.Args[0] b.Kind = BlockIf b.SetControl(cond) + b.Aux = nil b.swapSuccessors() return true } @@ -26236,6 +28898,7 @@ func rewriteBlockgeneric(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil return true } // match: (If (ConstBool [c]) yes no) @@ -26252,6 +28915,7 @@ func rewriteBlockgeneric(b *Block) bool { } b.Kind = BlockFirst b.SetControl(nil) + b.Aux = nil b.swapSuccessors() return true } diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go index 9fab7b664f7..f8bbed91b4d 100644 --- a/src/cmd/compile/internal/ssa/sizeof_test.go +++ b/src/cmd/compile/internal/ssa/sizeof_test.go @@ -24,6 +24,7 @@ func TestSizeof(t *testing.T) { }{ {Value{}, 68, 112}, {Block{}, 152, 288}, + {LocalSlot{}, 32, 48}, {valState{}, 28, 40}, } diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go new file mode 100644 index 00000000000..39829b046c5 --- /dev/null +++ b/src/cmd/compile/internal/ssa/softfloat.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssa + +import "math" + +func softfloat(f *Func) { + if !f.Config.SoftFloat { + return + } + newInt64 := false + + for _, b := range f.Blocks { + for _, v := range b.Values { + if v.Type.IsFloat() { + switch v.Op { + case OpPhi, OpLoad, OpArg: + if v.Type.Size() == 4 { + v.Type = f.Config.Types.UInt32 + } else { + v.Type = f.Config.Types.UInt64 + } + case OpConst32F: + v.Op = OpConst32 + v.Type = f.Config.Types.UInt32 + v.AuxInt = int64(int32(math.Float32bits(i2f32(v.AuxInt)))) + case OpConst64F: + v.Op = OpConst64 + v.Type = f.Config.Types.UInt64 + case OpNeg32F: + arg0 := v.Args[0] + v.reset(OpXor32) + v.Type = f.Config.Types.UInt32 + v.AddArg(arg0) + mask := v.Block.NewValue0(v.Pos, OpConst32, v.Type) + mask.AuxInt = -0x80000000 + v.AddArg(mask) + case OpNeg64F: + arg0 := v.Args[0] + v.reset(OpXor64) + v.Type = f.Config.Types.UInt64 + v.AddArg(arg0) + mask := v.Block.NewValue0(v.Pos, OpConst64, v.Type) + mask.AuxInt = -0x8000000000000000 + v.AddArg(mask) + case OpRound32F: + v.Op = OpCopy + v.Type = f.Config.Types.UInt32 + case OpRound64F: + v.Op = OpCopy + v.Type = f.Config.Types.UInt64 + } + newInt64 = newInt64 || v.Type.Size() == 8 + } + } + } + + if newInt64 && f.Config.RegSize == 4 { + // On 32bit arch, decompose Uint64 introduced in the switch above. + decomposeBuiltIn(f) + applyRewrite(f, rewriteBlockdec64, rewriteValuedec64) + } + +} diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index 3b44986eeed..ca7f95dee1e 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -151,9 +151,9 @@ func (s *stackAllocState) stackalloc() { if v.Op != OpArg { continue } - loc := LocalSlot{v.Aux.(GCNode), v.Type, v.AuxInt} + loc := LocalSlot{N: v.Aux.(GCNode), Type: v.Type, Off: v.AuxInt} if f.pass.debug > stackDebug { - fmt.Printf("stackalloc %s to %s\n", v, loc.Name()) + fmt.Printf("stackalloc %s to %s\n", v, loc) } f.setHome(v, loc) } @@ -216,7 +216,7 @@ func (s *stackAllocState) stackalloc() { } } if f.pass.debug > stackDebug { - fmt.Printf("stackalloc %s to %s\n", v, name.Name()) + fmt.Printf("stackalloc %s to %s\n", v, name) } s.nNamedSlot++ f.setHome(v, name) @@ -253,7 +253,7 @@ func (s *stackAllocState) stackalloc() { // Use the stack variable at that index for v. loc := locs[i] if f.pass.debug > stackDebug { - fmt.Printf("stackalloc %s to %s\n", v, loc.Name()) + fmt.Printf("stackalloc %s to %s\n", v, loc) } f.setHome(v, loc) slots[v.ID] = i diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dbg-dlv.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dbg-dlv.nexts new file mode 100644 index 00000000000..ec79b77de27 --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/hist.dbg-dlv.nexts @@ -0,0 +1,99 @@ + ./testdata/hist.go +55: func test() { +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +58: tinycall() // this forces l etc to stack +59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) +60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) +61: sink = dx + dy //gdb-opt=(dx,dy) +63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' +64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' +65: if len(os.Args) > 1 { +73: scanner := bufio.NewScanner(reader) +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +84: t := 0 +85: n := 0 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +98: } diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dbg-gdb.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dbg-gdb.nexts new file mode 100644 index 00000000000..fe000147bde --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/hist.dbg-gdb.nexts @@ -0,0 +1,123 @@ + src/cmd/compile/internal/ssa/testdata/hist.go +55: func test() { +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +58: tinycall() // this forces l etc to stack +59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) +l.begin.x = 1 +l.end.y = 4 +60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) +61: sink = dx + dy //gdb-opt=(dx,dy) +63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' +64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' +hist = []int = {0, 0, 0, 0, 0, 0, 0} +65: if len(os.Args) > 1 { +73: scanner := bufio.NewScanner(reader) +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 4 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 4 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 5 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +84: t := 0 +85: n := 0 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +n = 3 +i = 1 +t = 3 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +n = 6 +i = 2 +t = 9 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +n = 8 +i = 4 +t = 17 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +n = 9 +i = 5 +t = 22 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +98: } diff --git a/src/cmd/compile/internal/ssa/testdata/hist.go b/src/cmd/compile/internal/ssa/testdata/hist.go new file mode 100644 index 00000000000..8a0cc272808 --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/hist.go @@ -0,0 +1,98 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is the input program for an end-to-end test of the DWARF produced +// by the compiler. It is compiled with various flags, then the resulting +// binary is "debugged" under the control of a harness. Because the compile+debug +// step is time-consuming, the tests for different bugs are all accumulated here +// so that their cost is only the time to "n" through the additional code. + +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +type point struct { + x, y int +} + +type line struct { + begin, end point +} + +var zero int +var sink int + +//go:noinline +func tinycall() { +} + +func ensure(n int, sl []int) []int { + for len(sl) <= n { + sl = append(sl, 0) + } + return sl +} + +var cannedInput string = `1 +1 +1 +2 +2 +2 +4 +4 +5 +` + +func test() { + // For #19868 + l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} + tinycall() // this forces l etc to stack + dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) + dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) + sink = dx + dy //gdb-opt=(dx,dy) + // For #21098 + hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' + var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' + if len(os.Args) > 1 { + var err error + reader, err = os.Open(os.Args[1]) + if err != nil { + fmt.Fprintf(os.Stderr, "There was an error opening %s: %v\n", os.Args[1], err) + return + } + } + scanner := bufio.NewScanner(reader) + for scanner.Scan() { //gdb-opt=(scanner/A) + s := scanner.Text() + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) + fmt.Fprintf(os.Stderr, "There was an error: %v\n", err) + return + } + hist = ensure(int(i), hist) + hist[int(i)]++ + } + t := 0 + n := 0 + for i, a := range hist { + if a == 0 { //gdb-opt=(a,n,t) + continue + } + t += i * a + n += a + fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) + } +} + +func main() { + test() +} diff --git a/src/cmd/compile/internal/ssa/testdata/hist.opt-dlv.nexts b/src/cmd/compile/internal/ssa/testdata/hist.opt-dlv.nexts new file mode 100644 index 00000000000..b98e3c6e65e --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/hist.opt-dlv.nexts @@ -0,0 +1,105 @@ + ./testdata/hist.go +55: func test() { +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +58: tinycall() // this forces l etc to stack +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) +60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) +61: sink = dx + dy //gdb-opt=(dx,dy) +63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' +64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' +65: if len(os.Args) > 1 { +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +98: } diff --git a/src/cmd/compile/internal/ssa/testdata/hist.opt-gdb.nexts b/src/cmd/compile/internal/ssa/testdata/hist.opt-gdb.nexts new file mode 100644 index 00000000000..e4dc2808694 --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/hist.opt-gdb.nexts @@ -0,0 +1,182 @@ + src/cmd/compile/internal/ssa/testdata/hist.go +55: func test() { +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +58: tinycall() // this forces l etc to stack +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) +l = {begin = {x = 1, y = 2}, end = {x = 3, y = 4}} +dx = +dy = +60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) +dx = 2 +dy = +61: sink = dx + dy //gdb-opt=(dx,dy) +dx = 2 +dy = 2 +63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' +dx = +dy = +64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' +65: if len(os.Args) > 1 { +73: scanner := bufio.NewScanner(reader) +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 0, 0, 0, 0, 0, 0} +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 1, 0, 0, 0, 0, 0} +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 2, 0, 0, 0, 0, 0} +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 3, 0, 0, 0, 0, 0} +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 3, 1, 0, 0, 0, 0} +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 3, 2, 0, 0, 0, 0} +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 3, 3, 0, 0, 0, 0} +i = 4 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 3, 3, 0, 1, 0, 0} +i = 4 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = []int = {0, 3, 3, 0, 2, 0, 0} +i = 5 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (struct bufio.Scanner *) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +a = 0 +n = 0 +t = 0 +88: continue +87: if a == 0 { //gdb-opt=(a,n,t) +a = 3 +n = 0 +t = 0 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +87: if a == 0 { //gdb-opt=(a,n,t) +a = 3 +n = 3 +t = 3 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +87: if a == 0 { //gdb-opt=(a,n,t) +a = 0 +n = 6 +t = 9 +88: continue +87: if a == 0 { //gdb-opt=(a,n,t) +a = 2 +n = 6 +t = 9 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +87: if a == 0 { //gdb-opt=(a,n,t) +a = 1 +n = 8 +t = 17 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +90: t += i * a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +87: if a == 0 { //gdb-opt=(a,n,t) +a = 0 +n = 9 +t = 22 +88: continue +98: } diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.dbg-22558-dlv.nexts b/src/cmd/compile/internal/ssa/testdata/i22558.dbg-22558-dlv.nexts new file mode 100644 index 00000000000..3c33fe0bfd2 --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/i22558.dbg-22558-dlv.nexts @@ -0,0 +1,11 @@ + ./testdata/i22558.go +19: func test(t *thing, u *thing) { +20: if t.next != nil { +23: fmt.Fprintf(os.Stderr, "%s\n", t.name) +24: u.self = u +25: t.self = t +26: t.next = u +27: for _, p := range t.stuff { +28: if isFoo(t, p) { +29: return +43: } diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.dbg-22558-gdb.nexts b/src/cmd/compile/internal/ssa/testdata/i22558.dbg-22558-gdb.nexts new file mode 100644 index 00000000000..b88a227ec64 --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/i22558.dbg-22558-gdb.nexts @@ -0,0 +1,11 @@ + src/cmd/compile/internal/ssa/testdata/i22558.go +19: func test(t *thing, u *thing) { +20: if t.next != nil { +23: fmt.Fprintf(os.Stderr, "%s\n", t.name) +24: u.self = u +25: t.self = t +26: t.next = u +27: for _, p := range t.stuff { +28: if isFoo(t, p) { +29: return +43: } diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.go b/src/cmd/compile/internal/ssa/testdata/i22558.go new file mode 100644 index 00000000000..a62e11e5ebe --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/i22558.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + "os" +) + +type big struct { + pile [768]int8 +} + +type thing struct { + name string + next *thing + self *thing + stuff []big +} + +func test(t *thing, u *thing) { + if t.next != nil { + return + } + fmt.Fprintf(os.Stderr, "%s\n", t.name) + u.self = u + t.self = t + t.next = u + for _, p := range t.stuff { + if isFoo(t, p) { + return + } + } +} + +//go:noinline +func isFoo(t *thing, b big) bool { + return true +} + +func main() { + t := &thing{name: "t", self: nil, next: nil, stuff: make([]big, 1)} + u := thing{name: "u", self: t, next: t, stuff: make([]big, 1)} + test(t, &u) +} diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.dbg-race-gdb.nexts b/src/cmd/compile/internal/ssa/testdata/i22600.dbg-race-gdb.nexts new file mode 100644 index 00000000000..bfffec4a5dc --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/i22600.dbg-race-gdb.nexts @@ -0,0 +1,7 @@ + src/cmd/compile/internal/ssa/testdata/i22600.go +8: func test() { +9: pwd, err := os.Getwd() +10: if err != nil { +14: fmt.Println(pwd) +15: } +19: } diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.go b/src/cmd/compile/internal/ssa/testdata/i22600.go new file mode 100644 index 00000000000..f7a7ade3746 --- /dev/null +++ b/src/cmd/compile/internal/ssa/testdata/i22600.go @@ -0,0 +1,19 @@ +package main + +import ( + "fmt" + "os" +) + +func test() { + pwd, err := os.Getwd() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Println(pwd) +} + +func main() { + test() +} diff --git a/src/cmd/compile/internal/ssa/trim.go b/src/cmd/compile/internal/ssa/trim.go index 04b4fd4d541..d97c6baaa1b 100644 --- a/src/cmd/compile/internal/ssa/trim.go +++ b/src/cmd/compile/internal/ssa/trim.go @@ -121,9 +121,8 @@ func trimmableBlock(b *Block) bool { } // mergePhi adjusts the number of `v`s arguments to account for merge -// of `b`, which was `i`th predecessor of the `v`s block. Returns -// `v`. -func mergePhi(v *Value, i int, b *Block) *Value { +// of `b`, which was `i`th predecessor of the `v`s block. +func mergePhi(v *Value, i int, b *Block) { u := v.Args[i] if u.Block == b { if u.Op != OpPhi { @@ -147,5 +146,4 @@ func mergePhi(v *Value, i int, b *Block) *Value { v.AddArg(v.Args[i]) } } - return v } diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go index 7edc71be52a..832ed8d320d 100644 --- a/src/cmd/compile/internal/ssa/value.go +++ b/src/cmd/compile/internal/ssa/value.go @@ -6,10 +6,11 @@ package ssa import ( "cmd/compile/internal/types" - "cmd/internal/obj" "cmd/internal/src" "fmt" "math" + "sort" + "strings" ) // A Value represents a value in the SSA representation of the program. @@ -98,7 +99,7 @@ func (v *Value) AuxValAndOff() ValAndOff { return ValAndOff(v.AuxInt) } -// long form print. v# = opcode [aux] args [: reg] +// long form print. v# = opcode [aux] args [: reg] (names) func (v *Value) LongString() string { s := fmt.Sprintf("v%d = %s", v.ID, v.Op) s += " <" + v.Type.String() + ">" @@ -108,7 +109,20 @@ func (v *Value) LongString() string { } r := v.Block.Func.RegAlloc if int(v.ID) < len(r) && r[v.ID] != nil { - s += " : " + r[v.ID].Name() + s += " : " + r[v.ID].String() + } + var names []string + for name, values := range v.Block.Func.NamedValues { + for _, value := range values { + if value == v { + names = append(names, name.String()) + break // drop duplicates. + } + } + } + if len(names) != 0 { + sort.Strings(names) // Otherwise a source of variation in debugging output. + s += " (" + strings.Join(names, ", ") + ")" } return s } @@ -228,7 +242,13 @@ func (v *Value) copyInto(b *Block) *Value { // The copied value receives no source code position to avoid confusing changes // in debugger information (the intended user is the register allocator). func (v *Value) copyIntoNoXPos(b *Block) *Value { - c := b.NewValue0(src.NoXPos, v.Op, v.Type) // Lose the position, this causes line number churn otherwise. + return v.copyIntoWithXPos(b, src.NoXPos) +} + +// copyIntoWithXPos makes a new value identical to v and adds it to the end of b. +// The supplied position is used as the position of the new value. +func (v *Value) copyIntoWithXPos(b *Block, pos src.XPos) *Value { + c := b.NewValue0(pos, v.Op, v.Type) c.Aux = v.Aux c.AuxInt = v.AuxInt c.AddArgs(v.Args...) @@ -251,38 +271,6 @@ func (v *Value) isGenericIntConst() bool { return v != nil && (v.Op == OpConst64 || v.Op == OpConst32 || v.Op == OpConst16 || v.Op == OpConst8) } -// ExternSymbol is an aux value that encodes a variable's -// constant offset from the static base pointer. -type ExternSymbol struct { - Sym *obj.LSym - // Note: the offset for an external symbol is not - // calculated until link time. -} - -// ArgSymbol is an aux value that encodes an argument or result -// variable's constant offset from FP (FP = SP + framesize). -type ArgSymbol struct { - Node GCNode // A *gc.Node referring to the argument/result variable. -} - -// AutoSymbol is an aux value that encodes a local variable's -// constant offset from SP. -type AutoSymbol struct { - Node GCNode // A *gc.Node referring to a local (auto) variable. -} - -func (s *ExternSymbol) String() string { - return s.Sym.String() -} - -func (s *ArgSymbol) String() string { - return s.Node.String() -} - -func (s *AutoSymbol) String() string { - return s.Node.String() -} - // Reg returns the register assigned to v, in cmd/internal/obj/$ARCH numbering. func (v *Value) Reg() int16 { reg := v.Block.Func.RegAlloc[v.ID] @@ -334,3 +322,14 @@ func (v *Value) MemoryArg() *Value { } return nil } + +// LackingPos indicates whether v is a value that is unlikely to have a correct +// position assigned to it. Ignoring such values leads to more user-friendly positions +// assigned to nearby values and the blocks containing them. +func (v *Value) LackingPos() bool { + // The exact definition of LackingPos is somewhat heuristically defined and may change + // in the future, for example if some of these operations are generated more carefully + // with respect to their source position. + return v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive || v.Op == OpPhi || + (v.Op == OpFwdRef || v.Op == OpCopy) && v.Type == types.TypeMem +} diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go index cf22724a869..b711d8d2bf2 100644 --- a/src/cmd/compile/internal/ssa/writebarrier.go +++ b/src/cmd/compile/internal/ssa/writebarrier.go @@ -17,7 +17,7 @@ func needwb(v *Value) bool { if !ok { v.Fatalf("store aux is not a type: %s", v.LongString()) } - if !t.HasPointer() { + if !t.HasHeapPointer() { return false } if IsStackAddr(v.Args[0]) { @@ -44,7 +44,7 @@ func writebarrier(f *Func) { } var sb, sp, wbaddr, const0 *Value - var writebarrierptr, typedmemmove, typedmemclr *obj.LSym + var writebarrierptr, typedmemmove, typedmemclr, gcWriteBarrier *obj.LSym var stores, after []*Value var sset *sparseSet var storeNumber []int32 @@ -52,7 +52,7 @@ func writebarrier(f *Func) { for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand // first, identify all the stores that need to insert a write barrier. // mark them with WB ops temporarily. record presence of WB ops. - hasStore := false + nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block for _, v := range b.Values { switch v.Op { case OpStore, OpMove, OpZero: @@ -65,11 +65,11 @@ func writebarrier(f *Func) { case OpZero: v.Op = OpZeroWB } - hasStore = true + nWBops++ } } } - if !hasStore { + if nWBops == 0 { continue } @@ -94,9 +94,12 @@ func writebarrier(f *Func) { if sp == nil { sp = f.Entry.NewValue0(initpos, OpSP, f.Config.Types.Uintptr) } - wbsym := &ExternSymbol{Sym: f.fe.Syslook("writeBarrier")} + wbsym := f.fe.Syslook("writeBarrier") wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb) writebarrierptr = f.fe.Syslook("writebarrierptr") + if !f.fe.Debug_eagerwb() { + gcWriteBarrier = f.fe.Syslook("gcWriteBarrier") + } typedmemmove = f.fe.Syslook("typedmemmove") typedmemclr = f.fe.Syslook("typedmemclr") const0 = f.ConstInt32(initpos, f.Config.Types.UInt32, 0) @@ -170,6 +173,15 @@ func writebarrier(f *Func) { b.Succs = b.Succs[:0] b.AddEdgeTo(bThen) b.AddEdgeTo(bElse) + // TODO: For OpStoreWB and the buffered write barrier, + // we could move the write out of the write barrier, + // which would lead to fewer branches. We could do + // something similar to OpZeroWB, since the runtime + // could provide just the barrier half and then we + // could unconditionally do an OpZero (which could + // also generate better zeroing code). OpMoveWB is + // trickier and would require changing how + // cgoCheckMemmove works. bThen.AddEdgeTo(bEnd) bElse.AddEdgeTo(bEnd) @@ -182,19 +194,22 @@ func writebarrier(f *Func) { pos := w.Pos var fn *obj.LSym - var typ *ExternSymbol + var typ *obj.LSym var val *Value switch w.Op { case OpStoreWB: fn = writebarrierptr val = w.Args[1] + nWBops-- case OpMoveWB: fn = typedmemmove val = w.Args[1] - typ = &ExternSymbol{Sym: w.Aux.(*types.Type).Symbol()} + typ = w.Aux.(*types.Type).Symbol() + nWBops-- case OpZeroWB: fn = typedmemclr - typ = &ExternSymbol{Sym: w.Aux.(*types.Type).Symbol()} + typ = w.Aux.(*types.Type).Symbol() + nWBops-- case OpVarDef, OpVarLive, OpVarKill: } @@ -202,7 +217,11 @@ func writebarrier(f *Func) { switch w.Op { case OpStoreWB, OpMoveWB, OpZeroWB: volatile := w.Op == OpMoveWB && isVolatile(val) - memThen = wbcall(pos, bThen, fn, typ, ptr, val, memThen, sp, sb, volatile) + if w.Op == OpStoreWB && !f.fe.Debug_eagerwb() { + memThen = bThen.NewValue3A(pos, OpWB, types.TypeMem, gcWriteBarrier, ptr, val, memThen) + } else { + memThen = wbcall(pos, bThen, fn, typ, ptr, val, memThen, sp, sb, volatile) + } case OpVarDef, OpVarLive, OpVarKill: memThen = bThen.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memThen) } @@ -223,12 +242,7 @@ func writebarrier(f *Func) { if fn != nil { // Note that we set up a writebarrier function call. - if !f.WBPos.IsKnown() { - f.WBPos = pos - } - if f.fe.Debug_wb() { - f.Warnl(pos, "write barrier") - } + f.fe.SetWBPos(pos) } } @@ -261,20 +275,15 @@ func writebarrier(f *Func) { } // if we have more stores in this block, do this block again - // check from end to beginning, to avoid quadratic behavior; issue 13554 - // TODO: track the final value to avoid any looping here at all - for i := len(b.Values) - 1; i >= 0; i-- { - switch b.Values[i].Op { - case OpStoreWB, OpMoveWB, OpZeroWB: - goto again - } + if nWBops > 0 { + goto again } } } // wbcall emits write barrier runtime call in b, returns memory. // if valIsVolatile, it moves val into temp space before making the call. -func wbcall(pos src.XPos, b *Block, fn *obj.LSym, typ *ExternSymbol, ptr, val, mem, sp, sb *Value, valIsVolatile bool) *Value { +func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Value, valIsVolatile bool) *Value { config := b.Func.Config var tmp GCNode @@ -284,9 +293,8 @@ func wbcall(pos src.XPos, b *Block, fn *obj.LSym, typ *ExternSymbol, ptr, val, m // value we're trying to move. t := val.Type.ElemType() tmp = b.Func.fe.Auto(val.Pos, t) - aux := &AutoSymbol{Node: tmp} mem = b.NewValue1A(pos, OpVarDef, types.TypeMem, tmp, mem) - tmpaddr := b.NewValue1A(pos, OpAddr, t.PtrTo(), aux, sp) + tmpaddr := b.NewValue1A(pos, OpAddr, t.PtrTo(), tmp, sp) siz := t.Size() mem = b.NewValue3I(pos, OpMove, types.TypeMem, siz, tmpaddr, val, mem) mem.Aux = t diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go index 5fecdd6551a..2fd97a4a678 100644 --- a/src/cmd/compile/internal/syntax/branches.go +++ b/src/cmd/compile/internal/syntax/branches.go @@ -131,7 +131,7 @@ type targets struct { // blockBranches processes a block's body starting at start and returns the // list of unresolved (forward) gotos. parent is the immediately enclosing // block (or nil), ctxt provides information about the enclosing statements, -// and lstmt is the labeled statement asociated with this block, or nil. +// and lstmt is the labeled statement associated with this block, or nil. func (ls *labelScope) blockBranches(parent *block, ctxt targets, lstmt *LabeledStmt, start src.Pos, body []Stmt) []*BranchStmt { b := &block{parent: parent, start: start, lstmt: lstmt} diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go index b9129b0d9cd..ff3e769864e 100644 --- a/src/cmd/compile/internal/syntax/parser.go +++ b/src/cmd/compile/internal/syntax/parser.go @@ -23,6 +23,7 @@ type parser struct { scanner first error // first error encountered + errcnt int // number of errors encountered pragma Pragma // pragma flags fnest int // function nesting level (for error handling) @@ -57,6 +58,7 @@ func (p *parser) init(base *src.PosBase, r io.Reader, errh ErrorHandler, pragh P ) p.first = nil + p.errcnt = 0 p.pragma = 0 p.fnest = 0 @@ -78,11 +80,12 @@ func (p *parser) updateBase(line, col uint, text string) { p.error_at(p.pos_at(line, col+uint(i+1)), "invalid line number: "+nstr) return } - absFile := text[:i] + filename := text[:i] + absFilename := filename if p.fileh != nil { - absFile = p.fileh(absFile) + absFilename = p.fileh(filename) } - p.base = src.NewLinePragmaBase(src.MakePos(p.base.Pos().Base(), line, col), absFile, uint(n)) + p.base = src.NewLinePragmaBase(src.MakePos(p.base.Pos().Base(), line, col), filename, absFilename, uint(n)) } func (p *parser) got(tok token) bool { @@ -95,7 +98,7 @@ func (p *parser) got(tok token) bool { func (p *parser) want(tok token) { if !p.got(tok) { - p.syntax_error("expecting " + tok.String()) + p.syntax_error("expecting " + tokstring(tok)) p.advance() } } @@ -114,6 +117,7 @@ func (p *parser) error_at(pos src.Pos, msg string) { if p.first == nil { p.first = err } + p.errcnt++ if p.errh == nil { panic(p.first) } @@ -123,7 +127,7 @@ func (p *parser) error_at(pos src.Pos, msg string) { // syntax_error_at reports a syntax error at the given position. func (p *parser) syntax_error_at(pos src.Pos, msg string) { if trace { - defer p.trace("syntax_error (" + msg + ")")() + p.print("syntax error: " + msg) } if p.tok == _EOF && p.first != nil { @@ -165,6 +169,18 @@ func (p *parser) syntax_error_at(pos src.Pos, msg string) { p.error_at(pos, "syntax error: unexpected "+tok+msg) } +// tokstring returns the English word for selected punctuation tokens +// for more readable error messages. +func tokstring(tok token) string { + switch tok { + case _Comma: + return "comma" + case _Semi: + return "semicolon or newline" + } + return tok.String() +} + // Convenience methods using the current token position. func (p *parser) pos() src.Pos { return p.pos_at(p.line, p.col) } func (p *parser) error(msg string) { p.error_at(p.pos(), msg) } @@ -179,7 +195,6 @@ const stopset uint64 = 1<<_Break | 1<<_Defer | 1<<_Fallthrough | 1<<_For | - 1<<_Func | 1<<_Go | 1<<_Goto | 1<<_If | @@ -192,40 +207,42 @@ const stopset uint64 = 1<<_Break | // Advance consumes tokens until it finds a token of the stopset or followlist. // The stopset is only considered if we are inside a function (p.fnest > 0). // The followlist is the list of valid tokens that can follow a production; -// if it is empty, exactly one token is consumed to ensure progress. +// if it is empty, exactly one (non-EOF) token is consumed to ensure progress. func (p *parser) advance(followlist ...token) { - if len(followlist) == 0 { - p.next() - return + if trace { + p.print(fmt.Sprintf("advance %s", followlist)) } // compute follow set // (not speed critical, advance is only called in error situations) - var followset uint64 = 1 << _EOF // never skip over EOF - for _, tok := range followlist { - followset |= 1 << tok + var followset uint64 = 1 << _EOF // don't skip over EOF + if len(followlist) > 0 { + if p.fnest > 0 { + followset |= stopset + } + for _, tok := range followlist { + followset |= 1 << tok + } } - for !(contains(followset, p.tok) || p.fnest > 0 && contains(stopset, p.tok)) { + for !contains(followset, p.tok) { + if trace { + p.print("skip " + p.tok.String()) + } p.next() + if len(followlist) == 0 { + break + } } -} -func tokstring(tok token) string { - switch tok { - case _EOF: - return "EOF" - case _Comma: - return "comma" - case _Semi: - return "semicolon" + if trace { + p.print("next " + p.tok.String()) } - return tok.String() } // usage: defer p.trace(msg)() func (p *parser) trace(msg string) func() { - fmt.Printf("%5d: %s%s (\n", p.line, p.indent, msg) + p.print(msg + " (") const tab = ". " p.indent = append(p.indent, tab...) return func() { @@ -233,10 +250,14 @@ func (p *parser) trace(msg string) func() { if x := recover(); x != nil { panic(x) // skip print_trace } - fmt.Printf("%5d: %s)\n", p.line, p.indent) + p.print(")") } } +func (p *parser) print(msg string) { + fmt.Printf("%5d: %s%s\n", p.line, p.indent, msg) +} + // ---------------------------------------------------------------------------- // Package files // @@ -331,17 +352,47 @@ func isEmptyFuncDecl(dcl Decl) bool { // ---------------------------------------------------------------------------- // Declarations -// appendGroup(f) = f | "(" { f ";" } ")" . -func (p *parser) appendGroup(list []Decl, f func(*Group) Decl) []Decl { - if p.got(_Lparen) { - g := new(Group) - for p.tok != _EOF && p.tok != _Rparen { - list = append(list, f(g)) - if !p.osemi(_Rparen) { - break +// list parses a possibly empty, sep-separated list, optionally +// followed by sep and enclosed by ( and ) or { and }. open is +// one of _Lparen, or _Lbrace, sep is one of _Comma or _Semi, +// and close is expected to be the (closing) opposite of open. +// For each list element, f is called. After f returns true, no +// more list elements are accepted. list returns the position +// of the closing token. +// +// list = "(" { f sep } ")" | +// "{" { f sep } "}" . // sep is optional before ")" or "}" +// +func (p *parser) list(open, sep, close token, f func() bool) src.Pos { + p.want(open) + + var done bool + for p.tok != _EOF && p.tok != close && !done { + done = f() + // sep is optional before close + if !p.got(sep) && p.tok != close { + p.syntax_error(fmt.Sprintf("expecting %s or %s", tokstring(sep), tokstring(close))) + p.advance(_Rparen, _Rbrack, _Rbrace) + if p.tok != close { + // position could be better but we had an error so we don't care + return p.pos() } } - p.want(_Rparen) + } + + pos := p.pos() + p.want(close) + return pos +} + +// appendGroup(f) = f | "(" { f ";" } ")" . // ";" is optional before ")" +func (p *parser) appendGroup(list []Decl, f func(*Group) Decl) []Decl { + if p.tok == _Lparen { + g := new(Group) + p.list(_Lparen, _Semi, _Rparen, func() bool { + list = append(list, f(g)) + return false + }) } else { list = append(list, f(nil)) } @@ -484,39 +535,32 @@ func (p *parser) funcDeclOrNil() *FuncDecl { return nil } - // TODO(gri) check for regular functions only - // if name.Sym.Name == "init" { - // name = renameinit() - // if params != nil || result != nil { - // p.error("func init must have no arguments and no return values") - // } - // } - - // if localpkg.Name == "main" && name.Name == "main" { - // if params != nil || result != nil { - // p.error("func main must have no arguments and no return values") - // } - // } - f.Name = p.name() f.Type = p.funcType() if p.tok == _Lbrace { - f.Body = p.blockStmt("") - if p.mode&CheckBranches != 0 { - checkBranches(f.Body, p.errh) - } + f.Body = p.funcBody() } - f.Pragma = p.pragma - // TODO(gri) deal with function properties - // if noescape && body != nil { - // p.error("can only use //go:noescape with external func implementations") - // } - return f } +func (p *parser) funcBody() *BlockStmt { + p.fnest++ + errcnt := p.errcnt + body := p.blockStmt("") + p.fnest-- + + // Don't check branches if there were syntax errors in the function + // as it may lead to spurious errors (e.g., see test/switch2.go) or + // possibly crashes due to incomplete syntax trees. + if p.mode&CheckBranches != 0 && errcnt == p.errcnt { + checkBranches(body, p.errh) + } + + return body +} + // ---------------------------------------------------------------------------- // Expressions @@ -732,10 +776,7 @@ func (p *parser) operand(keep_parens bool) Expr { f := new(FuncLit) f.pos = pos f.Type = t - f.Body = p.blockStmt("") - if p.mode&CheckBranches != 0 { - checkBranches(f.Body, p.errh) - } + f.Body = p.funcBody() p.xnest-- return f @@ -865,7 +906,11 @@ loop: p.xnest-- case _Lparen: - x = p.call(x) + t := new(CallExpr) + t.pos = pos + t.Fun = x + t.ArgList, t.HasDots = p.argList() + x = t case _Lbrace: // operand may have returned a parenthesized complit @@ -925,10 +970,8 @@ func (p *parser) complitexpr() *CompositeLit { x := new(CompositeLit) x.pos = p.pos() - p.want(_Lbrace) p.xnest++ - - for p.tok != _EOF && p.tok != _Rbrace { + x.Rbrace = p.list(_Lbrace, _Comma, _Rbrace, func() bool { // value e := p.bare_complitexpr() if p.tok == _Colon { @@ -942,14 +985,9 @@ func (p *parser) complitexpr() *CompositeLit { x.NKeys++ } x.ElemList = append(x.ElemList, e) - if !p.ocomma(_Rbrace) { - break - } - } - - x.Rbrace = p.pos() + return false + }) p.xnest-- - p.want(_Rbrace) return x } @@ -1135,14 +1173,10 @@ func (p *parser) structType() *StructType { typ.pos = p.pos() p.want(_Struct) - p.want(_Lbrace) - for p.tok != _EOF && p.tok != _Rbrace { + p.list(_Lbrace, _Semi, _Rbrace, func() bool { p.fieldDecl(typ) - if !p.osemi(_Rbrace) { - break - } - } - p.want(_Rbrace) + return false + }) return typ } @@ -1157,36 +1191,16 @@ func (p *parser) interfaceType() *InterfaceType { typ.pos = p.pos() p.want(_Interface) - p.want(_Lbrace) - for p.tok != _EOF && p.tok != _Rbrace { + p.list(_Lbrace, _Semi, _Rbrace, func() bool { if m := p.methodDecl(); m != nil { typ.MethodList = append(typ.MethodList, m) } - if !p.osemi(_Rbrace) { - break - } - } - p.want(_Rbrace) + return false + }) return typ } -// FunctionBody = Block . -func (p *parser) funcBody() []Stmt { - if trace { - defer p.trace("funcBody")() - } - - p.fnest++ - body := p.stmtList() - p.fnest-- - - if body == nil { - body = []Stmt{new(EmptyStmt)} - } - return body -} - // Result = Parameters | Type . func (p *parser) funcResult() []*Field { if trace { @@ -1435,10 +1449,9 @@ func (p *parser) paramList() (list []*Field) { } pos := p.pos() - p.want(_Lparen) var named int // number of parameters that have an explicit name and type - for p.tok != _EOF && p.tok != _Rparen { + p.list(_Lparen, _Comma, _Rparen, func() bool { if par := p.paramDeclOrNil(); par != nil { if debug && par.Name == nil && par.Type == nil { panic("parameter without name or type") @@ -1448,10 +1461,8 @@ func (p *parser) paramList() (list []*Field) { } list = append(list, par) } - if !p.ocomma(_Rparen) { - break - } - } + return false + }) // distribute parameter types if named == 0 { @@ -1490,7 +1501,6 @@ func (p *parser) paramList() (list []*Field) { } } - p.want(_Rparen) return } @@ -1671,6 +1681,7 @@ func (p *parser) labeledStmtOrNil(label *Name) Stmt { return nil // avoids follow-on errors (see e.g., fixedbugs/bug274.go) } +// context must be a non-empty string unless we know that p.tok == _Lbrace. func (p *parser) blockStmt(context string) *BlockStmt { if trace { defer p.trace("blockStmt")() @@ -1679,10 +1690,14 @@ func (p *parser) blockStmt(context string) *BlockStmt { s := new(BlockStmt) s.pos = p.pos() + // people coming from C may forget that braces are mandatory in Go if !p.got(_Lbrace) { p.syntax_error("expecting { after " + context) p.advance(_Name, _Rbrace) - // TODO(gri) may be better to return here than to continue (#19663) + s.Rbrace = p.pos() // in case we found "}" + if p.got(_Rbrace) { + return s + } } s.List = p.stmtList() @@ -1720,9 +1735,6 @@ func (p *parser) forStmt() Stmt { return s } -// TODO(gri) This function is now so heavily influenced by the keyword that -// it may not make sense anymore to combine all three cases. It -// may be simpler to just split it up for each statement kind. func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleStmt) { p.want(keyword) @@ -1755,10 +1767,10 @@ func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleS pos src.Pos lit string // valid if pos.IsKnown() } - if p.tok == _Semi { + if p.tok != _Lbrace { semi.pos = p.pos() semi.lit = p.lit - p.next() + p.want(_Semi) if keyword == _For { if p.tok != _Semi { if p.tok == _Lbrace { @@ -2056,46 +2068,31 @@ func (p *parser) stmtList() (l []Stmt) { break } l = append(l, s) - // customized version of osemi: - // ';' is optional before a closing ')' or '}' - if p.tok == _Rparen || p.tok == _Rbrace { - continue - } - if !p.got(_Semi) { + // ";" is optional before "}" + if !p.got(_Semi) && p.tok != _Rbrace { p.syntax_error("at end of statement") - p.advance(_Semi, _Rbrace) + p.advance(_Semi, _Rbrace, _Case, _Default) + p.got(_Semi) // avoid spurious empty statement } } return } // Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" . -func (p *parser) call(fun Expr) *CallExpr { +func (p *parser) argList() (list []Expr, hasDots bool) { if trace { - defer p.trace("call")() + defer p.trace("argList")() } - // call or conversion - // convtype '(' expr ocomma ')' - c := new(CallExpr) - c.pos = p.pos() - c.Fun = fun - - p.want(_Lparen) p.xnest++ - - for p.tok != _EOF && p.tok != _Rparen { - c.ArgList = append(c.ArgList, p.expr()) - c.HasDots = p.got(_DotDotDot) - if !p.ocomma(_Rparen) || c.HasDots { - break - } - } - + p.list(_Lparen, _Comma, _Rparen, func() bool { + list = append(list, p.expr()) + hasDots = p.got(_DotDotDot) + return hasDots + }) p.xnest-- - p.want(_Rparen) - return c + return } // ---------------------------------------------------------------------------- @@ -2182,40 +2179,6 @@ func (p *parser) exprList() Expr { return x } -// osemi parses an optional semicolon. -func (p *parser) osemi(follow token) bool { - switch p.tok { - case _Semi: - p.next() - return true - - case _Rparen, _Rbrace: - // semicolon is optional before ) or } - return true - } - - p.syntax_error("expecting semicolon, newline, or " + tokstring(follow)) - p.advance(follow) - return false -} - -// ocomma parses an optional comma. -func (p *parser) ocomma(follow token) bool { - switch p.tok { - case _Comma: - p.next() - return true - - case _Rparen, _Rbrace: - // comma is optional before ) or } - return true - } - - p.syntax_error("expecting comma or " + tokstring(follow)) - p.advance(follow) - return false -} - // unparen removes all parentheses around an expression. func unparen(x Expr) Expr { for { diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go index 0478088ec89..309f1333f4e 100644 --- a/src/cmd/compile/internal/syntax/parser_test.go +++ b/src/cmd/compile/internal/syntax/parser_test.go @@ -221,7 +221,7 @@ func TestLineDirectives(t *testing.T) { if msg := perr.Msg; msg != test.msg { t.Errorf("%s: got msg = %q; want %q", test.src, msg, test.msg) } - if filename := perr.Pos.RelFilename(); filename != test.filename { + if filename := perr.Pos.AbsFilename(); filename != test.filename { t.Errorf("%s: got filename = %q; want %q", test.src, filename, test.filename) } if line := perr.Pos.RelLine(); line != test.line+linebase { diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go index bd0118a141e..e49a027029b 100644 --- a/src/cmd/compile/internal/syntax/tokens.go +++ b/src/cmd/compile/internal/syntax/tokens.go @@ -25,7 +25,7 @@ const ( _Arrow _Star - // delimitors + // delimiters _Lparen _Lbrack _Lbrace @@ -97,7 +97,7 @@ var tokstrings = [...]string{ _Arrow: "<-", _Star: "*", - // delimitors + // delimiters _Lparen: "(", _Lbrack: "[", _Lbrace: "{", diff --git a/src/cmd/compile/internal/types/etype_string.go b/src/cmd/compile/internal/types/etype_string.go new file mode 100644 index 00000000000..acb10119f4d --- /dev/null +++ b/src/cmd/compile/internal/types/etype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type EType -trimprefix T"; DO NOT EDIT. + +package types + +import "fmt" + +const _EType_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTR32PTR64FUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSDDDFIELDSSATUPLENTYPE" + +var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 101, 106, 110, 115, 120, 126, 130, 133, 138, 142, 145, 151, 160, 165, 168, 173, 181, 189, 197, 200, 205, 210} + +func (i EType) String() string { + if i >= EType(len(_EType_index)-1) { + return fmt.Sprintf("EType(%d)", i) + } + return _EType_name[_EType_index[i]:_EType_index[i+1]] +} diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go index 072b8089b0c..aef3b3bbe0a 100644 --- a/src/cmd/compile/internal/types/scope.go +++ b/src/cmd/compile/internal/types/scope.go @@ -4,65 +4,74 @@ package types +import "cmd/internal/src" + // Declaration stack & operations var blockgen int32 = 1 // max block number var Block int32 // current block number +// A dsym stores a symbol's shadowed declaration so that it can be +// restored once the block scope ends. +type dsym struct { + sym *Sym // sym == nil indicates stack mark + def *Node + block int32 + lastlineno src.XPos // last declaration for diagnostic +} + // dclstack maintains a stack of shadowed symbol declarations so that // Popdcl can restore their declarations when a block scope ends. -// -// The Syms on this stack are not "real" Syms as they don't actually -// represent object names. Sym is just a convenient type for saving shadowed -// Sym definitions, and only a subset of its fields are actually used. -var dclstack []*Sym - -func dcopy(a, b *Sym) { - a.Pkg = b.Pkg - a.Name = b.Name - a.Def = b.Def - a.Block = b.Block - a.Lastlineno = b.Lastlineno -} - -func push() *Sym { - d := new(Sym) - dclstack = append(dclstack, d) - return d -} +var dclstack []dsym // Pushdcl pushes the current declaration for symbol s (if any) so that // it can be shadowed by a new declaration within a nested block scope. func Pushdcl(s *Sym) { - dcopy(push(), s) + dclstack = append(dclstack, dsym{ + sym: s, + def: s.Def, + block: s.Block, + lastlineno: s.Lastlineno, + }) } // Popdcl pops the innermost block scope and restores all symbol declarations // to their previous state. func Popdcl() { for i := len(dclstack); i > 0; i-- { - d := dclstack[i-1] - if d.Name == "" { + d := &dclstack[i-1] + s := d.sym + if s == nil { // pop stack mark - Block = d.Block + Block = d.block dclstack = dclstack[:i-1] return } - dcopy(d.Pkg.Lookup(d.Name), d) + + s.Def = d.def + s.Block = d.block + s.Lastlineno = d.lastlineno + + // Clear dead pointer fields. + d.sym = nil + d.def = nil } Fatalf("popdcl: no stack mark") } // Markdcl records the start of a new block scope for declarations. func Markdcl() { - push().Block = Block // stack mark (Name == "") + dclstack = append(dclstack, dsym{ + sym: nil, // stack mark + block: Block, + }) blockgen++ Block = blockgen } func IsDclstackValid() bool { for _, d := range dclstack { - if d.Name == "" { + if d.sym == nil { return false } } diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go index f79b07b16c7..1b9d01dab56 100644 --- a/src/cmd/compile/internal/types/sym.go +++ b/src/cmd/compile/internal/types/sym.go @@ -21,9 +21,10 @@ type Sym struct { Importdef *Pkg // where imported definition was found Linkname string // link name + Pkg *Pkg + Name string // object name + // saved and restored by dcopy - Pkg *Pkg - Name string // object name Def *Node // definition: ONAME OTYPE OPACK or OLITERAL Block int32 // blocknumber to catch redeclaration Lastlineno src.XPos // last declaration for diagnostic diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 5c44e625856..e62d324cdee 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -15,11 +15,13 @@ import ( // TODO(gri) try to eliminate soon type Node struct{ _ int } +//go:generate stringer -type EType -trimprefix T + // EType describes a kind of type. type EType uint8 const ( - Txxx = iota + Txxx EType = iota TINT8 TUINT8 @@ -162,22 +164,19 @@ type Type struct { } const ( - typeLocal = 1 << iota // created in this file - typeNotInHeap // type cannot be heap allocated + typeNotInHeap = 1 << iota // type cannot be heap allocated typeBroke // broken type definition typeNoalg // suppress hash and eq algorithm generation typeDeferwidth typeRecur ) -func (t *Type) Local() bool { return t.flags&typeLocal != 0 } func (t *Type) NotInHeap() bool { return t.flags&typeNotInHeap != 0 } func (t *Type) Broke() bool { return t.flags&typeBroke != 0 } func (t *Type) Noalg() bool { return t.flags&typeNoalg != 0 } func (t *Type) Deferwidth() bool { return t.flags&typeDeferwidth != 0 } func (t *Type) Recur() bool { return t.flags&typeRecur != 0 } -func (t *Type) SetLocal(b bool) { t.flags.set(typeLocal, b) } func (t *Type) SetNotInHeap(b bool) { t.flags.set(typeNotInHeap, b) } func (t *Type) SetBroke(b bool) { t.flags.set(typeBroke, b) } func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) } @@ -585,28 +584,28 @@ func SubstAny(t *Type, types *[]*Type) *Type { case TPTR32, TPTR64: elem := SubstAny(t.Elem(), types) if elem != t.Elem() { - t = t.Copy() + t = t.copy() t.Extra = Ptr{Elem: elem} } case TARRAY: elem := SubstAny(t.Elem(), types) if elem != t.Elem() { - t = t.Copy() + t = t.copy() t.Extra.(*Array).Elem = elem } case TSLICE: elem := SubstAny(t.Elem(), types) if elem != t.Elem() { - t = t.Copy() + t = t.copy() t.Extra = Slice{Elem: elem} } case TCHAN: elem := SubstAny(t.Elem(), types) if elem != t.Elem() { - t = t.Copy() + t = t.copy() t.Extra.(*Chan).Elem = elem } @@ -614,7 +613,7 @@ func SubstAny(t *Type, types *[]*Type) *Type { key := SubstAny(t.Key(), types) val := SubstAny(t.Val(), types) if key != t.Key() || val != t.Val() { - t = t.Copy() + t = t.copy() t.Extra.(*Map).Key = key t.Extra.(*Map).Val = val } @@ -624,7 +623,7 @@ func SubstAny(t *Type, types *[]*Type) *Type { params := SubstAny(t.Params(), types) results := SubstAny(t.Results(), types) if recvs != t.Recvs() || params != t.Params() || results != t.Results() { - t = t.Copy() + t = t.copy() t.FuncType().Receiver = recvs t.FuncType().Results = results t.FuncType().Params = params @@ -645,7 +644,7 @@ func SubstAny(t *Type, types *[]*Type) *Type { nfs[i].Type = nft } if nfs != nil { - t = t.Copy() + t = t.copy() t.SetFields(nfs) } } @@ -653,8 +652,8 @@ func SubstAny(t *Type, types *[]*Type) *Type { return t } -// Copy returns a shallow copy of the Type. -func (t *Type) Copy() *Type { +// copy returns a shallow copy of the Type. +func (t *Type) copy() *Type { if t == nil { return nil } @@ -707,6 +706,10 @@ func (t *Type) Recvs() *Type { return t.FuncType().Receiver } func (t *Type) Params() *Type { return t.FuncType().Params } func (t *Type) Results() *Type { return t.FuncType().Results } +func (t *Type) NumRecvs() int { return t.FuncType().Receiver.NumFields() } +func (t *Type) NumParams() int { return t.FuncType().Params.NumFields() } +func (t *Type) NumResults() int { return t.FuncType().Results.NumFields() } + // Recv returns the receiver of function type t, if any. func (t *Type) Recv() *Field { s := t.Recvs() @@ -1317,6 +1320,23 @@ func (t *Type) SetNumElem(n int64) { at.Bound = n } +func (t *Type) NumComponents() int64 { + switch t.Etype { + case TSTRUCT: + if t.IsFuncArgStruct() { + Fatalf("NumComponents func arg struct") + } + var n int64 + for _, f := range t.FieldSlice() { + n += f.Type.NumComponents() + } + return n + case TARRAY: + return t.NumElem() * t.Elem().NumComponents() + } + return 1 +} + // ChanDir returns the direction of a channel type t. // The direction will be one of Crecv, Csend, or Cboth. func (t *Type) ChanDir() ChanDir { @@ -1346,7 +1366,14 @@ func (t *Type) IsUntyped() bool { return false } +// TODO(austin): We probably only need HasHeapPointer. See +// golang.org/cl/73412 for discussion. + func Haspointers(t *Type) bool { + return Haspointers1(t, false) +} + +func Haspointers1(t *Type, ignoreNotInHeap bool) bool { switch t.Etype { case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL: @@ -1356,28 +1383,28 @@ func Haspointers(t *Type) bool { if t.NumElem() == 0 { // empty array has no pointers return false } - return Haspointers(t.Elem()) + return Haspointers1(t.Elem(), ignoreNotInHeap) case TSTRUCT: for _, t1 := range t.Fields().Slice() { - if Haspointers(t1.Type) { + if Haspointers1(t1.Type, ignoreNotInHeap) { return true } } return false + + case TPTR32, TPTR64, TSLICE: + return !(ignoreNotInHeap && t.Elem().NotInHeap()) } return true } -// HasPointer returns whether t contains heap pointer. -// This is used for write barrier insertion, so we ignore +// HasHeapPointer returns whether t contains a heap pointer. +// This is used for write barrier insertion, so it ignores // pointers to go:notinheap types. -func (t *Type) HasPointer() bool { - if t.IsPtr() && t.Elem().NotInHeap() { - return false - } - return Haspointers(t) +func (t *Type) HasHeapPointer() bool { + return Haspointers1(t, true) } func (t *Type) Symbol() *obj.LSym { @@ -1408,9 +1435,9 @@ func FakeRecvType() *Type { } var ( - TypeInvalid *Type = newSSA("invalid") - TypeMem *Type = newSSA("mem") - TypeFlags *Type = newSSA("flags") - TypeVoid *Type = newSSA("void") - TypeInt128 *Type = newSSA("int128") + TypeInvalid = newSSA("invalid") + TypeMem = newSSA("mem") + TypeFlags = newSSA("flags") + TypeVoid = newSSA("void") + TypeInt128 = newSSA("int128") ) diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go index 796cd449de4..0eac402f8e0 100644 --- a/src/cmd/compile/internal/types/utils.go +++ b/src/cmd/compile/internal/types/utils.go @@ -76,51 +76,3 @@ func (f *bitset8) set(mask uint8, b bool) { *(*uint8)(f) &^= mask } } - -var etnames = []string{ - Txxx: "Txxx", - TINT: "INT", - TUINT: "UINT", - TINT8: "INT8", - TUINT8: "UINT8", - TINT16: "INT16", - TUINT16: "UINT16", - TINT32: "INT32", - TUINT32: "UINT32", - TINT64: "INT64", - TUINT64: "UINT64", - TUINTPTR: "UINTPTR", - TFLOAT32: "FLOAT32", - TFLOAT64: "FLOAT64", - TCOMPLEX64: "COMPLEX64", - TCOMPLEX128: "COMPLEX128", - TBOOL: "BOOL", - TPTR32: "PTR32", - TPTR64: "PTR64", - TFUNC: "FUNC", - TARRAY: "ARRAY", - TSLICE: "SLICE", - TSTRUCT: "STRUCT", - TCHAN: "CHAN", - TMAP: "MAP", - TINTER: "INTER", - TFORW: "FORW", - TSTRING: "STRING", - TUNSAFEPTR: "TUNSAFEPTR", - TANY: "ANY", - TIDEAL: "TIDEAL", - TNIL: "TNIL", - TBLANK: "TBLANK", - TFUNCARGS: "TFUNCARGS", - TCHANARGS: "TCHANARGS", - TDDDFIELD: "TDDDFIELD", - TSSA: "TSSA", - TTUPLE: "TTUPLE", -} - -func (et EType) String() string { - if int(et) < len(etnames) && etnames[et] != "" { - return etnames[et] - } - return fmt.Sprintf("E-%d", et) -} diff --git a/src/cmd/compile/internal/x86/387.go b/src/cmd/compile/internal/x86/387.go index cdac000648d..7a3622405ce 100644 --- a/src/cmd/compile/internal/x86/387.go +++ b/src/cmd/compile/internal/x86/387.go @@ -46,6 +46,9 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { case ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1: p.From.Scale = 1 p.From.Index = v.Args[1].Reg() + if p.From.Index == x86.REG_SP { + p.From.Reg, p.From.Index = p.From.Index, p.From.Reg + } case ssa.Op386MOVSSloadidx4: p.From.Scale = 4 p.From.Index = v.Args[1].Reg() @@ -95,6 +98,9 @@ func ssaGenValue387(s *gc.SSAGenState, v *ssa.Value) { case ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1: p.To.Scale = 1 p.To.Index = v.Args[1].Reg() + if p.To.Index == x86.REG_SP { + p.To.Reg, p.To.Index = p.To.Index, p.To.Reg + } case ssa.Op386MOVSSstoreidx4: p.To.Scale = 4 p.To.Index = v.Args[1].Reg() diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go index 54a76bda2f8..69217f29159 100644 --- a/src/cmd/compile/internal/x86/ssa.go +++ b/src/cmd/compile/internal/x86/ssa.go @@ -426,16 +426,23 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Reg = v.Args[0].Reg() case ssa.Op386MOVLconst: x := v.Reg() + + // If flags aren't live (indicated by v.Aux == nil), + // then we can rewrite MOV $0, AX into XOR AX, AX. + if v.AuxInt == 0 && v.Aux == nil { + p := s.Prog(x86.AXORL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + p.To.Type = obj.TYPE_REG + p.To.Reg = x + break + } + p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG p.To.Reg = x - // If flags are live at this instruction, suppress the - // MOV $0,AX -> XOR AX,AX optimization. - if v.Aux != nil { - p.Mark |= x86.PRESERVEFLAGS - } case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst: x := v.Reg() p := s.Prog(v.Op.Asm()) @@ -604,7 +611,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Sym = gc.Duffcopy p.To.Offset = v.AuxInt - case ssa.OpCopy, ssa.Op386MOVLconvert: // TODO: use MOVLreg for reg->reg copies instead of OpCopy? + case ssa.Op386MOVLconvert: + if v.Args[0].Reg() != v.Reg() { + v.Fatalf("MOVLconvert should be a no-op") + } + case ssa.OpCopy: // TODO: use MOVLreg for reg->reg copies instead of OpCopy? if v.Type.IsMemory() { return } @@ -662,6 +673,24 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { q.To.Type = obj.TYPE_REG q.To.Reg = r } + + case ssa.Op386LoweredGetCallerPC: + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_MEM + p.From.Offset = -4 // PC is stored 4 bytes below first parameter. + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.Op386LoweredGetCallerSP: + // caller's SP is the address of the first arg + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter: s.Call(v) case ssa.Op386NEGL, @@ -724,7 +753,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.Op386LoweredNilCheck: // Issue a load which will fault if the input is nil. // TODO: We currently use the 2-byte instruction TESTB AX, (reg). - // Should we use the 3-byte TESTB $0, (reg) instead? It is larger + // Should we use the 3-byte TESTB $0, (reg) instead? It is larger // but it doesn't have false dependency on AX. // Or maybe allocate an output register and use MOVL (reg),reg2 ? // That trades clobbering flags for clobbering a register. diff --git a/src/cmd/cover/cover.go b/src/cmd/cover/cover.go index ee140702d3d..500027ee0d5 100644 --- a/src/cmd/cover/cover.go +++ b/src/cmd/cover/cover.go @@ -10,16 +10,16 @@ import ( "fmt" "go/ast" "go/parser" - "go/printer" "go/token" "io" "io/ioutil" "log" "os" - "path/filepath" "sort" "strconv" - "strings" + + "cmd/internal/edit" + "cmd/internal/objabi" ) const usageMessage = "" + @@ -59,7 +59,7 @@ var ( var profile string // The profile to read; the value of -html or -func -var counterStmt func(*File, ast.Expr) ast.Stmt +var counterStmt func(*File, string) string const ( atomicPackagePath = "sync/atomic" @@ -67,6 +67,7 @@ const ( ) func main() { + objabi.AddVersionFlag() flag.Usage = usage flag.Parse() @@ -151,12 +152,48 @@ type Block struct { // File is a wrapper for the state of a file used in the parser. // The basic parse tree walker is a method of this type. type File struct { - fset *token.FileSet - name string // Name of file. - astFile *ast.File - blocks []Block - atomicPkg string // Package name for "sync/atomic" in this file. - directives map[*ast.Comment]bool // Map of compiler directives to whether it's processed in ast.Visitor or not. + fset *token.FileSet + name string // Name of file. + astFile *ast.File + blocks []Block + content []byte + edit *edit.Buffer +} + +// findText finds text in the original source, starting at pos. +// It correctly skips over comments and assumes it need not +// handle quoted strings. +// It returns a byte offset within f.src. +func (f *File) findText(pos token.Pos, text string) int { + b := []byte(text) + start := f.offset(pos) + i := start + s := f.content + for i < len(s) { + if bytes.HasPrefix(s[i:], b) { + return i + } + if i+2 <= len(s) && s[i] == '/' && s[i+1] == '/' { + for i < len(s) && s[i] != '\n' { + i++ + } + continue + } + if i+2 <= len(s) && s[i] == '/' && s[i+1] == '*' { + for i += 2; ; i++ { + if i+2 > len(s) { + return 0 + } + if s[i] == '*' && s[i+1] == '/' { + i += 2 + break + } + } + continue + } + i++ + } + return -1 } // Visit implements the ast.Visitor interface. @@ -169,18 +206,18 @@ func (f *File) Visit(node ast.Node) ast.Visitor { case *ast.CaseClause: // switch for _, n := range n.List { clause := n.(*ast.CaseClause) - clause.Body = f.addCounters(clause.Colon+1, clause.End(), clause.Body, false) + f.addCounters(clause.Colon+1, clause.Colon+1, clause.End(), clause.Body, false) } return f case *ast.CommClause: // select for _, n := range n.List { clause := n.(*ast.CommClause) - clause.Body = f.addCounters(clause.Colon+1, clause.End(), clause.Body, false) + f.addCounters(clause.Colon+1, clause.Colon+1, clause.End(), clause.Body, false) } return f } } - n.List = f.addCounters(n.Lbrace, n.Rbrace+1, n.List, true) // +1 to step past closing brace. + f.addCounters(n.Lbrace, n.Lbrace+1, n.Rbrace+1, n.List, true) // +1 to step past closing brace. case *ast.IfStmt: if n.Init != nil { ast.Walk(f, n.Init) @@ -201,6 +238,13 @@ func (f *File) Visit(node ast.Node) ast.Visitor { // if y { // } // } + f.edit.Insert(f.offset(n.Body.End()), "else{") + elseOffset := f.findText(n.Body.End(), "else") + if elseOffset < 0 { + panic("lost else") + } + f.edit.Delete(elseOffset, elseOffset+4) + f.edit.Insert(f.offset(n.Else.End()), "}") switch stmt := n.Else.(type) { case *ast.IfStmt: block := &ast.BlockStmt{ @@ -241,21 +285,6 @@ func (f *File) Visit(node ast.Node) ast.Visitor { ast.Walk(f, n.Assign) return nil } - case *ast.CommentGroup: - var list []*ast.Comment - // Drop all but the //go: comments, some of which are semantically important. - // We drop all others because they can appear in places that cause our counters - // to appear in syntactically incorrect places. //go: appears at the beginning of - // the line and is syntactically safe. - for _, c := range n.List { - if f.isDirective(c) { - list = append(list, c) - - // Mark compiler directive as handled. - f.directives[c] = true - } - } - n.List = list } return f } @@ -269,91 +298,8 @@ func unquote(s string) string { return t } -// addImport adds an import for the specified path, if one does not already exist, and returns -// the local package name. -func (f *File) addImport(path string) string { - // Does the package already import it? - for _, s := range f.astFile.Imports { - if unquote(s.Path.Value) == path { - if s.Name != nil { - return s.Name.Name - } - return filepath.Base(path) - } - } - newImport := &ast.ImportSpec{ - Name: ast.NewIdent(atomicPackageName), - Path: &ast.BasicLit{ - Kind: token.STRING, - Value: fmt.Sprintf("%q", path), - }, - } - impDecl := &ast.GenDecl{ - Tok: token.IMPORT, - Specs: []ast.Spec{ - newImport, - }, - } - // Make the new import the first Decl in the file. - astFile := f.astFile - astFile.Decls = append(astFile.Decls, nil) - copy(astFile.Decls[1:], astFile.Decls[0:]) - astFile.Decls[0] = impDecl - astFile.Imports = append(astFile.Imports, newImport) - - // Now refer to the package, just in case it ends up unused. - // That is, append to the end of the file the declaration - // var _ = _cover_atomic_.AddUint32 - reference := &ast.GenDecl{ - Tok: token.VAR, - Specs: []ast.Spec{ - &ast.ValueSpec{ - Names: []*ast.Ident{ - ast.NewIdent("_"), - }, - Values: []ast.Expr{ - &ast.SelectorExpr{ - X: ast.NewIdent(atomicPackageName), - Sel: ast.NewIdent("AddUint32"), - }, - }, - }, - }, - } - astFile.Decls = append(astFile.Decls, reference) - return atomicPackageName -} - var slashslash = []byte("//") -// initialComments returns the prefix of content containing only -// whitespace and line comments. Any +build directives must appear -// within this region. This approach is more reliable than using -// go/printer to print a modified AST containing comments. -// -func initialComments(content []byte) []byte { - // Derived from go/build.Context.shouldBuild. - end := 0 - p := content - for len(p) > 0 { - line := p - if i := bytes.IndexByte(line, '\n'); i >= 0 { - line, p = line[:i], p[i+1:] - } else { - p = p[len(p):] - } - line = bytes.TrimSpace(line) - if len(line) == 0 { // Blank line. - end = len(content) - len(p) - continue - } - if !bytes.HasPrefix(line, slashslash) { // Not comment line. - break - } - } - return content[:end] -} - func annotate(name string) { fset := token.NewFileSet() content, err := ioutil.ReadFile(name) @@ -366,26 +312,25 @@ func annotate(name string) { } file := &File{ - fset: fset, - name: name, - astFile: parsedFile, - directives: map[*ast.Comment]bool{}, + fset: fset, + name: name, + content: content, + edit: edit.NewBuffer(content), + astFile: parsedFile, } if *mode == "atomic" { - file.atomicPkg = file.addImport(atomicPackagePath) + // Add import of sync/atomic immediately after package clause. + // We do this even if there is an existing import, because the + // existing import may be shadowed at any given place we want + // to refer to it, and our name (_cover_atomic_) is less likely to + // be shadowed. + file.edit.Insert(file.offset(file.astFile.Name.End()), + fmt.Sprintf("; import %s %q", atomicPackageName, atomicPackagePath)) } - for _, cg := range parsedFile.Comments { - for _, c := range cg.List { - if file.isDirective(c) { - file.directives[c] = false - } - } - } - // Remove comments. Or else they interfere with new AST. - parsedFile.Comments = nil - ast.Walk(file, file.astFile) + newContent := file.edit.Bytes() + fd := os.Stdout if *output != "" { var err error @@ -394,92 +339,33 @@ func annotate(name string) { log.Fatalf("cover: %s", err) } } - fd.Write(initialComments(content)) // Retain '// +build' directives. - // Retain compiler directives that are not processed in ast.Visitor. - // Some compiler directives like "go:linkname" and "go:cgo_" - // can be not attached to anything in the tree and hence will not be printed by printer. - // So, we have to explicitly print them here. - for cd, handled := range file.directives { - if !handled { - fmt.Fprintln(fd, cd.Text) - } - } + fmt.Fprintf(fd, "//line %s:1\n", name) + fd.Write(newContent) - file.print(fd) // After printing the source tree, add some declarations for the counters etc. // We could do this by adding to the tree, but it's easier just to print the text. file.addVariables(fd) } -func (f *File) print(w io.Writer) { - printer.Fprint(w, f.fset, f.astFile) -} - -// isDirective reports whether a comment is a compiler directive. -func (f *File) isDirective(c *ast.Comment) bool { - return strings.HasPrefix(c.Text, "//go:") && f.fset.Position(c.Slash).Column == 1 -} - -// intLiteral returns an ast.BasicLit representing the integer value. -func (f *File) intLiteral(i int) *ast.BasicLit { - node := &ast.BasicLit{ - Kind: token.INT, - Value: fmt.Sprint(i), - } - return node -} - -// index returns an ast.BasicLit representing the number of counters present. -func (f *File) index() *ast.BasicLit { - return f.intLiteral(len(f.blocks)) -} - // setCounterStmt returns the expression: __count[23] = 1. -func setCounterStmt(f *File, counter ast.Expr) ast.Stmt { - return &ast.AssignStmt{ - Lhs: []ast.Expr{counter}, - Tok: token.ASSIGN, - Rhs: []ast.Expr{f.intLiteral(1)}, - } +func setCounterStmt(f *File, counter string) string { + return fmt.Sprintf("%s = 1", counter) } // incCounterStmt returns the expression: __count[23]++. -func incCounterStmt(f *File, counter ast.Expr) ast.Stmt { - return &ast.IncDecStmt{ - X: counter, - Tok: token.INC, - } +func incCounterStmt(f *File, counter string) string { + return fmt.Sprintf("%s++", counter) } // atomicCounterStmt returns the expression: atomic.AddUint32(&__count[23], 1) -func atomicCounterStmt(f *File, counter ast.Expr) ast.Stmt { - return &ast.ExprStmt{ - X: &ast.CallExpr{ - Fun: &ast.SelectorExpr{ - X: ast.NewIdent(f.atomicPkg), - Sel: ast.NewIdent("AddUint32"), - }, - Args: []ast.Expr{&ast.UnaryExpr{ - Op: token.AND, - X: counter, - }, - f.intLiteral(1), - }, - }, - } +func atomicCounterStmt(f *File, counter string) string { + return fmt.Sprintf("%s.AddUint32(&%s, 1)", atomicPackageName, counter) } // newCounter creates a new counter expression of the appropriate form. -func (f *File) newCounter(start, end token.Pos, numStmt int) ast.Stmt { - counter := &ast.IndexExpr{ - X: &ast.SelectorExpr{ - X: ast.NewIdent(*varVar), - Sel: ast.NewIdent("Count"), - }, - Index: f.index(), - } - stmt := counterStmt(f, counter) +func (f *File) newCounter(start, end token.Pos, numStmt int) string { + stmt := counterStmt(f, fmt.Sprintf("%s.Count[%d]", *varVar, len(f.blocks))) f.blocks = append(f.blocks, Block{start, end, numStmt}) return stmt } @@ -496,15 +382,15 @@ func (f *File) newCounter(start, end token.Pos, numStmt int) ast.Stmt { // counters will be added before S1 and before S3. The block containing S2 // will be visited in a separate call. // TODO: Nested simple blocks get unnecessary (but correct) counters -func (f *File) addCounters(pos, blockEnd token.Pos, list []ast.Stmt, extendToClosingBrace bool) []ast.Stmt { +func (f *File) addCounters(pos, insertPos, blockEnd token.Pos, list []ast.Stmt, extendToClosingBrace bool) { // Special case: make sure we add a counter to an empty block. Can't do this below // or we will add a counter to an empty statement list after, say, a return statement. if len(list) == 0 { - return []ast.Stmt{f.newCounter(pos, blockEnd, 0)} + f.edit.Insert(f.offset(insertPos), f.newCounter(insertPos, blockEnd, 0)+";") + return } // We have a block (statement list), but it may have several basic blocks due to the // appearance of statements that affect the flow of control. - var newList []ast.Stmt for { // Find first statement that affects flow of control (break, continue, if, etc.). // It will be the last statement of this basic block. @@ -547,16 +433,15 @@ func (f *File) addCounters(pos, blockEnd token.Pos, list []ast.Stmt, extendToClo end = blockEnd } if pos != end { // Can have no source to cover if e.g. blocks abut. - newList = append(newList, f.newCounter(pos, end, last)) + f.edit.Insert(f.offset(insertPos), f.newCounter(pos, end, last)+";") } - newList = append(newList, list[0:last]...) list = list[last:] if len(list) == 0 { break } pos = list[0].Pos() + insertPos = pos } - return newList } // hasFuncLiteral reports the existence and position of the first func literal @@ -791,4 +676,10 @@ func (f *File) addVariables(w io.Writer) { // Close the struct initialization. fmt.Fprintf(w, "}\n") + + // Emit a reference to the atomic package to avoid + // import and not used error when there's no code in a file. + if *mode == "atomic" { + fmt.Fprintf(w, "var _ = %s.LoadUint32\n", atomicPackageName) + } } diff --git a/src/cmd/cover/cover_test.go b/src/cmd/cover/cover_test.go index 1584a73b591..79ddf4f4652 100644 --- a/src/cmd/cover/cover_test.go +++ b/src/cmd/cover/cover_test.go @@ -6,13 +6,18 @@ package main_test import ( "bytes" + "flag" "fmt" + "go/ast" + "go/parser" + "go/token" "internal/testenv" "io/ioutil" "os" "os/exec" "path/filepath" "regexp" + "strings" "testing" ) @@ -33,7 +38,7 @@ var ( coverProfile = filepath.Join(testdata, "profile.cov") ) -var debug = false // Keeps the rewritten files around if set. +var debug = flag.Bool("debug", false, "keep rewritten files for debugging") // Run this shell script, but do it in Go so it can be run by "go test". // @@ -59,7 +64,7 @@ func TestCover(t *testing.T) { } // defer removal of test_line.go - if !debug { + if !*debug { defer os.Remove(coverInput) } @@ -75,7 +80,7 @@ func TestCover(t *testing.T) { run(cmd, t) // defer removal of ./testdata/test_cover.go - if !debug { + if !*debug { defer os.Remove(coverOutput) } @@ -89,20 +94,138 @@ func TestCover(t *testing.T) { } // compiler directive must appear right next to function declaration. if got, err := regexp.MatchString(".*\n//go:nosplit\nfunc someFunction().*", string(file)); err != nil || !got { - t.Errorf("misplaced compiler directive: got=(%v, %v); want=(true; nil)", got, err) + t.Error("misplaced compiler directive") } // "go:linkname" compiler directive should be present. if got, err := regexp.MatchString(`.*go\:linkname some\_name some\_name.*`, string(file)); err != nil || !got { - t.Errorf("'go:linkname' compiler directive not found: got=(%v, %v); want=(true; nil)", got, err) + t.Error("'go:linkname' compiler directive not found") } - // No other comments should be present in generated code. - c := ".*// This comment shouldn't appear in generated go code.*" - if got, err := regexp.MatchString(c, string(file)); err != nil || got { - t.Errorf("non compiler directive comment %q found. got=(%v, %v); want=(false; nil)", c, got, err) + // Other comments should be preserved too. + c := ".*// This comment didn't appear in generated go code.*" + if got, err := regexp.MatchString(c, string(file)); err != nil || !got { + t.Errorf("non compiler directive comment %q not found", c) } } +// TestDirectives checks that compiler directives are preserved and positioned +// correctly. Directives that occur before top-level declarations should remain +// above those declarations, even if they are not part of the block of +// documentation comments. +func TestDirectives(t *testing.T) { + // Read the source file and find all the directives. We'll keep + // track of whether each one has been seen in the output. + testDirectives := filepath.Join(testdata, "directives.go") + source, err := ioutil.ReadFile(testDirectives) + if err != nil { + t.Fatal(err) + } + sourceDirectives := findDirectives(source) + + // go tool cover -mode=atomic ./testdata/directives.go + cmd := exec.Command(testenv.GoToolPath(t), "tool", "cover", "-mode=atomic", testDirectives) + cmd.Stderr = os.Stderr + output, err := cmd.Output() + if err != nil { + t.Fatal(err) + } + + // Check that all directives are present in the output. + outputDirectives := findDirectives(output) + foundDirective := make(map[string]bool) + for _, p := range sourceDirectives { + foundDirective[p.name] = false + } + for _, p := range outputDirectives { + if found, ok := foundDirective[p.name]; !ok { + t.Errorf("unexpected directive in output: %s", p.text) + } else if found { + t.Errorf("directive found multiple times in output: %s", p.text) + } + foundDirective[p.name] = true + } + for name, found := range foundDirective { + if !found { + t.Errorf("missing directive: %s", name) + } + } + + // Check that directives that start with the name of top-level declarations + // come before the beginning of the named declaration and after the end + // of the previous declaration. + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, testDirectives, output, 0) + if err != nil { + t.Fatal(err) + } + + prevEnd := 0 + for _, decl := range astFile.Decls { + var name string + switch d := decl.(type) { + case *ast.FuncDecl: + name = d.Name.Name + case *ast.GenDecl: + if len(d.Specs) == 0 { + // An empty group declaration. We still want to check that + // directives can be associated with it, so we make up a name + // to match directives in the test data. + name = "_empty" + } else if spec, ok := d.Specs[0].(*ast.TypeSpec); ok { + name = spec.Name.Name + } + } + pos := fset.Position(decl.Pos()).Offset + end := fset.Position(decl.End()).Offset + if name == "" { + prevEnd = end + continue + } + for _, p := range outputDirectives { + if !strings.HasPrefix(p.name, name) { + continue + } + if p.offset < prevEnd || pos < p.offset { + t.Errorf("directive %s does not appear before definition %s", p.text, name) + } + } + prevEnd = end + } +} + +type directiveInfo struct { + text string // full text of the comment, not including newline + name string // text after //go: + offset int // byte offset of first slash in comment +} + +func findDirectives(source []byte) []directiveInfo { + var directives []directiveInfo + directivePrefix := []byte("\n//go:") + offset := 0 + for { + i := bytes.Index(source[offset:], directivePrefix) + if i < 0 { + break + } + i++ // skip newline + p := source[offset+i:] + j := bytes.IndexByte(p, '\n') + if j < 0 { + // reached EOF + j = len(p) + } + directive := directiveInfo{ + text: string(p[:j]), + name: string(p[len(directivePrefix)-1 : j]), + offset: offset + i, + } + directives = append(directives, directive) + offset += i + j + } + return directives +} + // Makes sure that `cover -func=profile.cov` reports accurate coverage. // Issue #20515. func TestCoverFunc(t *testing.T) { diff --git a/src/cmd/cover/doc.go b/src/cmd/cover/doc.go index 636d7e08d9a..e2c849419ab 100644 --- a/src/cmd/cover/doc.go +++ b/src/cmd/cover/doc.go @@ -14,6 +14,10 @@ than binary-rewriting coverage tools, but also a little less capable. For instance, it does not probe inside && and || expressions, and can be mildly confused by single statements with multiple function literals. +When computing coverage of a package that uses cgo, the cover tool +must be applied to the output of cgo preprocessing, not the input, +because cover deletes comments that are significant to cgo. + For usage information, please see: go help testflag go tool cover -help diff --git a/src/cmd/cover/func.go b/src/cmd/cover/func.go index 05c7c12c994..1673fbf3150 100644 --- a/src/cmd/cover/func.go +++ b/src/cmd/cover/func.go @@ -113,6 +113,10 @@ type FuncVisitor struct { func (v *FuncVisitor) Visit(node ast.Node) ast.Visitor { switch n := node.(type) { case *ast.FuncDecl: + if n.Body == nil { + // Do not count declarations of assembly functions. + break + } start := v.fset.Position(n.Pos()) end := v.fset.Position(n.End()) fe := &FuncExtent{ diff --git a/src/cmd/cover/testdata/directives.go b/src/cmd/cover/testdata/directives.go new file mode 100644 index 00000000000..dfb7b8ec33f --- /dev/null +++ b/src/cmd/cover/testdata/directives.go @@ -0,0 +1,40 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is processed by the cover command, then a test verifies that +// all compiler directives are preserved and positioned appropriately. + +//go:a + +//go:b +package main + +//go:c1 + +//go:c2 +//doc +func c() { +} + +//go:d1 + +//doc +//go:d2 +type d int + +//go:e1 + +//doc +//go:e2 +type ( + e int + f int +) + +//go:_empty1 +//doc +//go:_empty2 +type () + +//go:f diff --git a/src/cmd/cover/testdata/test.go b/src/cmd/cover/testdata/test.go index 5effa2d7e90..0b03ef91ab5 100644 --- a/src/cmd/cover/testdata/test.go +++ b/src/cmd/cover/testdata/test.go @@ -282,7 +282,7 @@ loop: } } -// This comment shouldn't appear in generated go code. +// This comment didn't appear in generated go code. func haha() { // Needed for cover to add counter increment here. _ = 42 diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index 76e42a4b6ab..e80d466d352 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -9,43 +9,46 @@ import ( "encoding/json" "flag" "fmt" + "log" "os" "os/exec" "path/filepath" "sort" "strings" "sync" + "time" ) // Initialization for any invocation. // The usual variables. var ( - goarch string - gobin string - gohostarch string - gohostos string - goos string - goarm string - go386 string - goroot string - goroot_final string - goextlinkenabled string - gogcflags string // For running built compiler - workdir string - tooldir string - oldgoos string - oldgoarch string - slash string - exe string - defaultcc string - defaultcflags string - defaultldflags string - defaultcxxtarget string - defaultcctarget string - defaultpkgconfigtarget string - rebuildall bool - defaultclang bool + goarch string + gobin string + gohostarch string + gohostos string + goos string + goarm string + go386 string + gomips string + goroot string + goroot_final string + goextlinkenabled string + gogcflags string // For running built compiler + goldflags string + workdir string + tooldir string + oldgoos string + oldgoarch string + exe string + defaultcc map[string]string + defaultcxx map[string]string + defaultcflags string + defaultldflags string + defaultpkgconfig string + + rebuildall bool + defaultclang bool vflag int // verbosity ) @@ -93,23 +96,21 @@ func find(p string, l []string) int { // xinit handles initialization of the various global state, like goroot and goarch. func xinit() { - goroot = os.Getenv("GOROOT") - if slash == "/" && len(goroot) > 1 || slash == `\` && len(goroot) > 3 { - // if not "/" or "c:\", then strip trailing path separator - goroot = strings.TrimSuffix(goroot, slash) - } - if goroot == "" { - fatal("$GOROOT must be set") - } - - goroot_final = os.Getenv("GOROOT_FINAL") - if goroot_final == "" { - goroot_final = goroot - } - - b := os.Getenv("GOBIN") + b := os.Getenv("GOROOT") if b == "" { - b = goroot + slash + "bin" + fatalf("$GOROOT must be set") + } + goroot = filepath.Clean(b) + + b = os.Getenv("GOROOT_FINAL") + if b == "" { + b = goroot + } + goroot_final = b + + b = os.Getenv("GOBIN") + if b == "" { + b = pathf("%s/bin", goroot) } gobin = b @@ -119,7 +120,7 @@ func xinit() { } goos = b if find(goos, okgoos) < 0 { - fatal("unknown $GOOS %s", goos) + fatalf("unknown $GOOS %s", goos) } b = os.Getenv("GOARM") @@ -138,9 +139,14 @@ func xinit() { } go386 = b - p := pathf("%s/src/all.bash", goroot) - if !isfile(p) { - fatal("$GOROOT is not set correctly or not exported\n"+ + b = os.Getenv("GOMIPS") + if b == "" { + b = "hardfloat" + } + gomips = b + + if p := pathf("%s/src/all.bash", goroot); !isfile(p) { + fatalf("$GOROOT is not set correctly or not exported\n"+ "\tGOROOT=%s\n"+ "\t%s does not exist", goroot, p) } @@ -149,9 +155,8 @@ func xinit() { if b != "" { gohostarch = b } - if find(gohostarch, okgoarch) < 0 { - fatal("unknown $GOHOSTARCH %s", gohostarch) + fatalf("unknown $GOHOSTARCH %s", gohostarch) } b = os.Getenv("GOARCH") @@ -160,62 +165,34 @@ func xinit() { } goarch = b if find(goarch, okgoarch) < 0 { - fatal("unknown $GOARCH %s", goarch) + fatalf("unknown $GOARCH %s", goarch) } b = os.Getenv("GO_EXTLINK_ENABLED") if b != "" { if b != "0" && b != "1" { - fatal("unknown $GO_EXTLINK_ENABLED %s", b) + fatalf("unknown $GO_EXTLINK_ENABLED %s", b) } goextlinkenabled = b } gogcflags = os.Getenv("BOOT_GO_GCFLAGS") - b = os.Getenv("CC") - if b == "" { - // Use clang on OS X, because gcc is deprecated there. - // Xcode for OS X 10.9 Mavericks will ship a fake "gcc" binary that - // actually runs clang. We prepare different command - // lines for the two binaries, so it matters what we call it. - // See golang.org/issue/5822. - if defaultclang { - b = "clang" - } else { - b = "gcc" - } + cc, cxx := "gcc", "g++" + if defaultclang { + cc, cxx = "clang", "clang++" } - defaultcc = b + defaultcc = compilerEnv("CC", cc) + defaultcxx = compilerEnv("CXX", cxx) defaultcflags = os.Getenv("CFLAGS") - defaultldflags = os.Getenv("LDFLAGS") - b = os.Getenv("CC_FOR_TARGET") - if b == "" { - b = defaultcc - } - defaultcctarget = b - - b = os.Getenv("CXX_FOR_TARGET") - if b == "" { - b = os.Getenv("CXX") - if b == "" { - if defaultclang { - b = "clang++" - } else { - b = "g++" - } - } - } - defaultcxxtarget = b - b = os.Getenv("PKG_CONFIG") if b == "" { b = "pkg-config" } - defaultpkgconfigtarget = b + defaultpkgconfig = b // For tools being invoked but also for os.ExpandEnv. os.Setenv("GO386", go386) @@ -224,9 +201,18 @@ func xinit() { os.Setenv("GOHOSTARCH", gohostarch) os.Setenv("GOHOSTOS", gohostos) os.Setenv("GOOS", goos) + os.Setenv("GOMIPS", gomips) os.Setenv("GOROOT", goroot) os.Setenv("GOROOT_FINAL", goroot_final) + // Use a build cache separate from the default user one. + // Also one that will be wiped out during startup, so that + // make.bash really does start from a clean slate. + // But if the user has specified no caching, don't cache. + if os.Getenv("GOCACHE") != "off" { + os.Setenv("GOCACHE", pathf("%s/pkg/obj/go-build", goroot)) + } + // Make the environment more predictable. os.Setenv("LANG", "C") os.Setenv("LANGUAGE", "en_US.UTF8") @@ -237,6 +223,55 @@ func xinit() { tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) } +// compilerEnv returns a map from "goos/goarch" to the +// compiler setting to use for that platform. +// The entry for key "" covers any goos/goarch not explicitly set in the map. +// For example, compilerEnv("CC", "gcc") returns the C compiler settings +// read from $CC, defaulting to gcc. +// +// The result is a map because additional environment variables +// can be set to change the compiler based on goos/goarch settings. +// The following applies to all envNames but CC is assumed to simplify +// the presentation. +// +// If no environment variables are set, we use def for all goos/goarch. +// $CC, if set, applies to all goos/goarch but is overridden by the following. +// $CC_FOR_TARGET, if set, applies to all goos/goarch except gohostos/gohostarch, +// but is overridden by the following. +// If gohostos=goos and gohostarch=goarch, then $CC_FOR_TARGET applies even for gohostos/gohostarch. +// $CC_FOR_goos_goarch, if set, applies only to goos/goarch. +func compilerEnv(envName, def string) map[string]string { + m := map[string]string{"": def} + + if env := os.Getenv(envName); env != "" { + m[""] = env + } + if env := os.Getenv(envName + "_FOR_TARGET"); env != "" { + if gohostos != goos || gohostarch != goarch { + m[gohostos+"/"+gohostarch] = m[""] + } + m[""] = env + } + + for _, goos := range okgoos { + for _, goarch := range okgoarch { + if env := os.Getenv(envName + "_FOR_" + goos + "_" + goarch); env != "" { + m[goos+"/"+goarch] = env + } + } + } + + return m +} + +// compilerEnvLookup returns the compiler settings for goos/goarch in map m. +func compilerEnvLookup(m map[string]string, goos, goarch string) string { + if cc := m[goos+"/"+goarch]; cc != "" { + return cc + } + return m[""] +} + // rmworkdir deletes the work directory. func rmworkdir() { if vflag > 1 { @@ -251,27 +286,26 @@ func chomp(s string) string { } func branchtag(branch string) (tag string, precise bool) { - b := run(goroot, CheckExit, "git", "log", "--decorate=full", "--format=format:%d", "master.."+branch) + log := run(goroot, CheckExit, "git", "log", "--decorate=full", "--format=format:%d", "master.."+branch) tag = branch - for _, line := range splitlines(b) { + for row, line := range strings.Split(log, "\n") { // Each line is either blank, or looks like // (tag: refs/tags/go1.4rc2, refs/remotes/origin/release-branch.go1.4, refs/heads/release-branch.go1.4) // We need to find an element starting with refs/tags/. - i := strings.Index(line, " refs/tags/") + const s = " refs/tags/" + i := strings.Index(line, s) if i < 0 { continue } - i += len(" refs/tags/") - // The tag name ends at a comma or paren (prefer the first). - j := strings.Index(line[i:], ",") - if j < 0 { - j = strings.Index(line[i:], ")") - } + // Trim off known prefix. + line = line[i+len(s):] + // The tag name ends at a comma or paren. + j := strings.IndexAny(line, ",)") if j < 0 { continue // malformed line; ignore it } - tag = line[i : i+j] - if i == 0 { + tag = line[:j] + if row == 0 { precise = true // tag denotes HEAD } break @@ -292,6 +326,26 @@ func findgoversion() string { // its content if available, which is empty at this point. // Only use the VERSION file if it is non-empty. if b != "" { + // Some builders cross-compile the toolchain on linux-amd64 + // and then copy the toolchain to the target builder (say, linux-arm) + // for use there. But on non-release (devel) branches, the compiler + // used on linux-amd64 will be an amd64 binary, and the compiler + // shipped to linux-arm will be an arm binary, so they will have different + // content IDs (they are binaries for different architectures) and so the + // packages compiled by the running-on-amd64 compiler will appear + // stale relative to the running-on-arm compiler. Avoid this by setting + // the version string to something that doesn't begin with devel. + // Then the version string will be used in place of the content ID, + // and the packages will look up-to-date. + // TODO(rsc): Really the builders could be writing out a better VERSION file instead, + // but it is easier to change cmd/dist than to try to make changes to + // the builder while Brad is away. + if strings.HasPrefix(b, "devel") { + if hostType := os.Getenv("META_BUILDLET_HOST_TYPE"); strings.Contains(hostType, "-cross") { + fmt.Fprintf(os.Stderr, "warning: changing VERSION from %q to %q\n", b, "builder "+hostType) + b = "builder " + hostType + } + } return b } } @@ -306,7 +360,7 @@ func findgoversion() string { // Show a nicer error message if this isn't a Git repo. if !isGitRepo() { - fatal("FAILED: not a Git repo; must put a VERSION file in $GOROOT") + fatalf("FAILED: not a Git repo; must put a VERSION file in $GOROOT") } // Otherwise, use Git. @@ -343,8 +397,7 @@ func isGitRepo() bool { if !filepath.IsAbs(gitDir) { gitDir = filepath.Join(goroot, gitDir) } - fi, err := os.Stat(gitDir) - return err == nil && fi.IsDir() + return isdir(gitDir) } /* @@ -411,14 +464,10 @@ func setup() { } // Create object directory. - // We keep it in pkg/ so that all the generated binaries - // are in one tree. If pkg/obj/libgc.a exists, it is a dreg from - // before we used subdirectories of obj. Delete all of obj - // to clean up. - if p := pathf("%s/pkg/obj/libgc.a", goroot); isfile(p) { - xremoveall(pathf("%s/pkg/obj", goroot)) - } - p = pathf("%s/pkg/obj/%s_%s", goroot, gohostos, gohostarch) + // We used to use it for C objects. + // Now we use it for the build cache, to separate dist's cache + // from any other cache the user might have. + p = pathf("%s/pkg/obj/go-build", goroot) if rebuildall { xremoveall(p) } @@ -441,7 +490,7 @@ func setup() { // If $GOBIN is set and has a Go compiler, it must be cleaned. for _, char := range "56789" { - if isfile(pathf("%s%s%c%s", gobin, slash, char, "g")) { + if isfile(pathf("%s/%c%s", gobin, char, "g")) { for _, old := range oldtool { xremove(pathf("%s/%s", gobin, old)) } @@ -454,7 +503,7 @@ func setup() { if strings.HasPrefix(goversion, "release.") || (strings.HasPrefix(goversion, "go") && !strings.Contains(goversion, "beta")) { for _, dir := range unreleased { if p := pathf("%s/%s", goroot, dir); isdir(p) { - fatal("%s should not exist in release build", p) + fatalf("%s should not exist in release build", p) } } } @@ -467,6 +516,8 @@ func setup() { // deptab lists changes to the default dependencies for a given prefix. // deps ending in /* read the whole directory; deps beginning with - // exclude files with that prefix. +// Note that this table applies only to the build of cmd/go, +// after the main compiler bootstrap. var deptab = []struct { prefix string // prefix of target dep []string // dependency tweaks for targets with that prefix @@ -510,15 +561,35 @@ var gentab = []struct { // installed maps from a dir name (as given to install) to a chan // closed when the dir's package is installed. var installed = make(map[string]chan struct{}) +var installedMu sync.Mutex -// install installs the library, package, or binary associated with dir, -// which is relative to $GOROOT/src. func install(dir string) { - if ch, ok := installed[dir]; ok { - defer close(ch) + <-startInstall(dir) +} + +func startInstall(dir string) chan struct{} { + installedMu.Lock() + ch := installed[dir] + if ch == nil { + ch = make(chan struct{}) + installed[dir] = ch + go runInstall(dir, ch) } - for _, dep := range builddeps[dir] { - <-installed[dep] + installedMu.Unlock() + return ch +} + +// runInstall installs the library, package, or binary associated with dir, +// which is relative to $GOROOT/src. +func runInstall(dir string, ch chan struct{}) { + if dir == "net" || dir == "os/user" || dir == "crypto/x509" { + fatalf("go_bootstrap cannot depend on cgo package %s", dir) + } + + defer close(ch) + + if dir == "unsafe" { + return } if vflag > 0 { @@ -595,7 +666,7 @@ func install(dir string) { // Convert to absolute paths. for i, p := range files { - if !isabs(p) { + if !filepath.IsAbs(p) { files[i] = pathf("%s/%s", path, p) } } @@ -637,7 +708,7 @@ func install(dir string) { } // For package runtime, copy some files into the work space. - if dir == "runtime" || strings.HasPrefix(dir, "runtime/internal/") { + if dir == "runtime" { xmkdirall(pathf("%s/pkg/include", goroot)) // For use by assembly and C files. copyfile(pathf("%s/pkg/include/textflag.h", goroot), @@ -672,11 +743,23 @@ func install(dir string) { } // Did not rebuild p. if find(p, missing) >= 0 { - fatal("missing file %s", p) + fatalf("missing file %s", p) } built: } + // Make sure dependencies are installed. + var deps []string + for _, p := range gofiles { + deps = append(deps, readimports(p)...) + } + for _, dir1 := range deps { + startInstall(dir1) + } + for _, dir1 := range deps { + install(dir1) + } + if goos != gohostos || goarch != gohostarch { // We've generated the right files; the go command can do the build. if vflag > 1 { @@ -691,7 +774,7 @@ func install(dir string) { // For package runtime, this writes go_asm.h, which // the assembly files will need. pkg := dir - if strings.HasPrefix(dir, "cmd/") { + if strings.HasPrefix(dir, "cmd/") && strings.Count(dir, "/") == 1 { pkg = "main" } b := pathf("%s/_go_.a", workdir) @@ -729,6 +812,11 @@ func install(dir string) { "-D", "GOOS_GOARCH_" + goos + "_" + goarch, } + if goarch == "mips" || goarch == "mipsle" { + // Define GOMIPS_value from gomips. + compile = append(compile, "-D", "GOMIPS_"+gomips) + } + doclean := true b := pathf("%s/%s", workdir, filepath.Base(p)) @@ -815,7 +903,7 @@ func shouldbuild(file, dir string) bool { } // Check file contents for // +build lines. - for _, p := range splitlines(readfile(file)) { + for _, p := range strings.Split(readfile(file), "\n") { p = strings.TrimSpace(p) if p == "" { continue @@ -837,7 +925,7 @@ func shouldbuild(file, dir string) bool { if !strings.Contains(p, "+build") { continue } - fields := splitfields(p[2:]) + fields := strings.Fields(p[2:]) if len(fields) < 1 || fields[0] != "+build" { continue } @@ -883,28 +971,21 @@ func dopack(dst, src string, extra []string) { writefile(bdst.String(), dst, 0) } -// builddeps records the build dependencies for the 'go bootstrap' command. -// It is a map[string][]string and generated by mkdeps.bash into deps.go. - -// buildlist is the list of directories being built, sorted by name. -var buildlist = makeBuildlist() - -func makeBuildlist() []string { - var all []string - for dir := range builddeps { - all = append(all, dir) - } - sort.Strings(all) - return all -} - var runtimegen = []string{ "zaexperiment.h", "zversion.go", } +// cleanlist is a list of packages with generated files and commands. +var cleanlist = []string{ + "runtime/internal/sys", + "cmd/cgo", + "cmd/go/internal/cfg", + "go/build", +} + func clean() { - for _, name := range buildlist { + for _, name := range cleanlist { path := pathf("%s/src/%s", goroot, name) // Remove generated files. for _, elem := range xreaddir(path) { @@ -946,24 +1027,6 @@ func clean() { * command implementations */ -func usage() { - xprintf("usage: go tool dist [command]\n" + - "Commands are:\n" + - "\n" + - "banner print installation banner\n" + - "bootstrap rebuild everything\n" + - "clean deletes all built files\n" + - "env [-p] print environment (-p: include $PATH)\n" + - "install [dir] install individual directory\n" + - "list [-json] list all supported platforms\n" + - "test [-h] run Go test(s)\n" + - "version print Go version\n" + - "\n" + - "All commands take -v flags to emit extra information.\n", - ) - xexit(2) -} - // The env command prints the default environment. func cmdenv() { path := flag.Bool("p", false, "emit updated PATH") @@ -979,8 +1042,6 @@ func cmdenv() { format = "set %s=%s\r\n" } - xprintf(format, "CC", defaultcc) - xprintf(format, "CC_FOR_TARGET", defaultcctarget) xprintf(format, "GOROOT", goroot) xprintf(format, "GOBIN", gobin) xprintf(format, "GOARCH", goarch) @@ -994,6 +1055,9 @@ func cmdenv() { if goarch == "386" { xprintf(format, "GO386", go386) } + if goarch == "mips" || goarch == "mipsle" { + xprintf(format, "GOMIPS", gomips) + } if *path { sep := ":" @@ -1004,14 +1068,79 @@ func cmdenv() { } } +var ( + timeLogEnabled = os.Getenv("GOBUILDTIMELOGFILE") != "" + timeLogMu sync.Mutex + timeLogFile *os.File + timeLogStart time.Time +) + +func timelog(op, name string) { + if !timeLogEnabled { + return + } + timeLogMu.Lock() + defer timeLogMu.Unlock() + if timeLogFile == nil { + f, err := os.OpenFile(os.Getenv("GOBUILDTIMELOGFILE"), os.O_RDWR|os.O_APPEND, 0666) + if err != nil { + log.Fatal(err) + } + buf := make([]byte, 100) + n, _ := f.Read(buf) + s := string(buf[:n]) + if i := strings.Index(s, "\n"); i >= 0 { + s = s[:i] + } + i := strings.Index(s, " start") + if i < 0 { + log.Fatalf("time log %s does not begin with start line", os.Getenv("GOBULDTIMELOGFILE")) + } + t, err := time.Parse(time.UnixDate, s[:i]) + if err != nil { + log.Fatalf("cannot parse time log line %q: %v", s, err) + } + timeLogStart = t + timeLogFile = f + } + t := time.Now() + fmt.Fprintf(timeLogFile, "%s %+.1fs %s %s\n", t.Format(time.UnixDate), t.Sub(timeLogStart).Seconds(), op, name) +} + +var toolchain = []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/link"} + // The bootstrap command runs a build from scratch, // stopping at having installed the go_bootstrap command. +// +// WARNING: This command runs after cmd/dist is built with Go 1.4. +// It rebuilds and installs cmd/dist with the new toolchain, so other +// commands (like "go tool dist test" in run.bash) can rely on bug fixes +// made since Go 1.4, but this function cannot. In particular, the uses +// of os/exec in this function cannot assume that +// cmd.Env = append(os.Environ(), "X=Y") +// sets $X to Y in the command's environment. That guarantee was +// added after Go 1.4, and in fact in Go 1.4 it was typically the opposite: +// if $X was already present in os.Environ(), most systems preferred +// that setting, not the new one. func cmdbootstrap() { + timelog("start", "dist bootstrap") + defer timelog("end", "dist bootstrap") + + var noBanner bool + var debug bool flag.BoolVar(&rebuildall, "a", rebuildall, "rebuild all") + flag.BoolVar(&debug, "d", debug, "enable debugging of bootstrap process") + flag.BoolVar(&noBanner, "no-banner", noBanner, "do not print banner") + xflagparse(0) + if debug { + // cmd/buildid is used in debug mode. + toolchain = append(toolchain, "cmd/buildid") + } + if isdir(pathf("%s/src/pkg", goroot)) { - fatal("\n\n"+ + fatalf("\n\n"+ "The Go package sources have moved to $GOROOT/src.\n"+ "*** %s still exists. ***\n"+ "It probably contains stale files that may confuse the build.\n"+ @@ -1026,9 +1155,13 @@ func cmdbootstrap() { setup() + timelog("build", "toolchain1") checkCC() bootstrapBuildTools() + // Remember old content of $GOROOT/bin for comparison below. + oldBinFiles, _ := filepath.Glob(pathf("%s/bin/*", goroot)) + // For the main bootstrap, building for host os/arch. oldgoos = goos oldgoarch = goarch @@ -1039,48 +1172,175 @@ func cmdbootstrap() { os.Setenv("GOARCH", goarch) os.Setenv("GOOS", goos) - // TODO(rsc): Enable when appropriate. - // This step is only needed if we believe that the Go compiler built from Go 1.4 - // will produce different object files than the Go compiler built from itself. - // In the absence of bugs, that should not happen. - // And if there are bugs, they're more likely in the current development tree - // than in a standard release like Go 1.4, so don't do this rebuild by default. - if false { - xprintf("##### Building Go toolchain using itself.\n") - for _, dir := range buildlist { - installed[dir] = make(chan struct{}) - } - var wg sync.WaitGroup - for _, dir := range builddeps["cmd/go"] { - wg.Add(1) - dir := dir - go func() { - defer wg.Done() - install(dir) - }() - } - wg.Wait() + timelog("build", "go_bootstrap") + xprintf("Building Go bootstrap cmd/go (go_bootstrap) using Go toolchain1.\n") + install("runtime") // dependency not visible in sources; also sets up textflag.h + install("cmd/go") + if vflag > 0 { xprintf("\n") } - xprintf("##### Building go_bootstrap for host, %s/%s.\n", gohostos, gohostarch) - for _, dir := range buildlist { - installed[dir] = make(chan struct{}) + gogcflags = os.Getenv("GO_GCFLAGS") // we were using $BOOT_GO_GCFLAGS until now + goldflags = os.Getenv("GO_LDFLAGS") + goBootstrap := pathf("%s/go_bootstrap", tooldir) + cmdGo := pathf("%s/go", gobin) + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") + copyfile(pathf("%s/compile1", tooldir), pathf("%s/compile", tooldir), writeExec) } - for _, dir := range buildlist { - go install(dir) + + // To recap, so far we have built the new toolchain + // (cmd/asm, cmd/cgo, cmd/compile, cmd/link) + // using Go 1.4's toolchain and go command. + // Then we built the new go command (as go_bootstrap) + // using the new toolchain and our own build logic (above). + // + // toolchain1 = mk(new toolchain, go1.4 toolchain, go1.4 cmd/go) + // go_bootstrap = mk(new cmd/go, toolchain1, cmd/dist) + // + // The toolchain1 we built earlier is built from the new sources, + // but because it was built using cmd/go it has no build IDs. + // The eventually installed toolchain needs build IDs, so we need + // to do another round: + // + // toolchain2 = mk(new toolchain, toolchain1, go_bootstrap) + // + timelog("build", "toolchain2") + if vflag > 0 { + xprintf("\n") + } + xprintf("Building Go toolchain2 using go_bootstrap and Go toolchain1.\n") + os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) + goInstall(goBootstrap, append([]string{"-i"}, toolchain...)...) + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") + run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch)) + copyfile(pathf("%s/compile2", tooldir), pathf("%s/compile", tooldir), writeExec) } - <-installed["cmd/go"] - goos = oldgoos - goarch = oldgoarch - os.Setenv("GOARCH", goarch) - os.Setenv("GOOS", goos) + // Toolchain2 should be semantically equivalent to toolchain1, + // but it was built using the new compilers instead of the Go 1.4 compilers, + // so it should at the least run faster. Also, toolchain1 had no build IDs + // in the binaries, while toolchain2 does. In non-release builds, the + // toolchain's build IDs feed into constructing the build IDs of built targets, + // so in non-release builds, everything now looks out-of-date due to + // toolchain2 having build IDs - that is, due to the go command seeing + // that there are new compilers. In release builds, the toolchain's reported + // version is used in place of the build ID, and the go command does not + // see that change from toolchain1 to toolchain2, so in release builds, + // nothing looks out of date. + // To keep the behavior the same in both non-release and release builds, + // we force-install everything here. + // + // toolchain3 = mk(new toolchain, toolchain2, go_bootstrap) + // + timelog("build", "toolchain3") + if vflag > 0 { + xprintf("\n") + } + xprintf("Building Go toolchain3 using go_bootstrap and Go toolchain2.\n") + goInstall(goBootstrap, append([]string{"-a", "-i"}, toolchain...)...) + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") + run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch)) + copyfile(pathf("%s/compile3", tooldir), pathf("%s/compile", tooldir), writeExec) + } + checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...) - // Build runtime for actual goos/goarch too. - if goos != gohostos || goarch != gohostarch { - installed["runtime"] = make(chan struct{}) - install("runtime") + if goos == oldgoos && goarch == oldgoarch { + // Common case - not setting up for cross-compilation. + timelog("build", "toolchain") + if vflag > 0 { + xprintf("\n") + } + xprintf("Building packages and commands for %s/%s.\n", goos, goarch) + } else { + // GOOS/GOARCH does not match GOHOSTOS/GOHOSTARCH. + // Finish GOHOSTOS/GOHOSTARCH installation and then + // run GOOS/GOARCH installation. + timelog("build", "host toolchain") + if vflag > 0 { + xprintf("\n") + } + xprintf("Building packages and commands for host, %s/%s.\n", goos, goarch) + goInstall(goBootstrap, "std", "cmd") + checkNotStale(goBootstrap, "std", "cmd") + checkNotStale(cmdGo, "std", "cmd") + + timelog("build", "target toolchain") + if vflag > 0 { + xprintf("\n") + } + goos = oldgoos + goarch = oldgoarch + os.Setenv("GOOS", goos) + os.Setenv("GOARCH", goarch) + os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) + xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch) + } + goInstall(goBootstrap, "std", "cmd") + checkNotStale(goBootstrap, "std", "cmd") + checkNotStale(cmdGo, "std", "cmd") + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") + run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch)) + checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...) + copyfile(pathf("%s/compile4", tooldir), pathf("%s/compile", tooldir), writeExec) + } + + // Check that there are no new files in $GOROOT/bin other than + // go and gofmt and $GOOS_$GOARCH (target bin when cross-compiling). + binFiles, _ := filepath.Glob(pathf("%s/bin/*", goroot)) + ok := map[string]bool{} + for _, f := range oldBinFiles { + ok[f] = true + } + for _, f := range binFiles { + elem := strings.TrimSuffix(filepath.Base(f), ".exe") + if !ok[f] && elem != "go" && elem != "gofmt" && elem != goos+"_"+goarch { + fatalf("unexpected new file in $GOROOT/bin: %s", elem) + } + } + + // Remove go_bootstrap now that we're done. + xremove(pathf("%s/go_bootstrap", tooldir)) + + // Print trailing banner unless instructed otherwise. + if !noBanner { + banner() + } +} + +func goInstall(goBinary string, args ...string) { + installCmd := []string{goBinary, "install", "-gcflags=all=" + gogcflags, "-ldflags=all=" + goldflags} + if vflag > 0 { + installCmd = append(installCmd, "-v") + } + + // Force only one process at a time on vx32 emulation. + if gohostos == "plan9" && os.Getenv("sysname") == "vx32" { + installCmd = append(installCmd, "-p=1") + } + + run(goroot, ShowOutput|CheckExit, append(installCmd, args...)...) +} + +func checkNotStale(goBinary string, targets ...string) { + out := run(goroot, CheckExit, + append([]string{ + goBinary, + "list", "-gcflags=all=" + gogcflags, "-ldflags=all=" + goldflags, + "-f={{if .Stale}}\tSTALE {{.ImportPath}}: {{.StaleReason}}{{end}}", + }, targets...)...) + if strings.Contains(out, "\tSTALE ") { + os.Setenv("GODEBUG", "gocachehash=1") + for _, target := range []string{"runtime/internal/sys", "cmd/dist", "cmd/link"} { + if strings.Contains(out, "STALE "+target) { + run(goroot, ShowOutput|CheckExit, goBinary, "list", "-f={{.ImportPath}} {{.Stale}}", target) + break + } + } + fatalf("unexpected stale targets reported by %s list -gcflags=\"%s\" -ldflags=\"%s\" for %v:\n%s", goBinary, gogcflags, goldflags, targets, out) } } @@ -1146,12 +1406,12 @@ func checkCC() { if !needCC() { return } - if output, err := exec.Command(defaultcc, "--help").CombinedOutput(); err != nil { + if output, err := exec.Command(defaultcc[""], "--help").CombinedOutput(); err != nil { outputHdr := "" if len(output) > 0 { outputHdr = "\nCommand output:\n\n" } - fatal("cannot invoke C compiler %q: %v\n\n"+ + fatalf("cannot invoke C compiler %q: %v\n\n"+ "Go needs a system C compiler for use with cgo.\n"+ "To set a C compiler, set CC=the-compiler.\n"+ "To disable cgo, set CGO_ENABLED=0.\n%s%s", defaultcc, err, outputHdr, output) @@ -1167,7 +1427,7 @@ func defaulttarg() string { src := pathf("%s/src/", goroot) real_src := xrealwd(src) if !strings.HasPrefix(pwd, real_src) { - fatal("current directory %s is not under %s", pwd, real_src) + fatalf("current directory %s is not under %s", pwd, real_src) } pwd = pwd[len(real_src):] // guard against xrealwd returning the directory without the trailing / @@ -1198,8 +1458,13 @@ func cmdclean() { // Banner prints the 'now you've installed Go' banner. func cmdbanner() { xflagparse(0) + banner() +} - xprintf("\n") +func banner() { + if vflag > 0 { + xprintf("\n") + } xprintf("---\n") xprintf("Installed Go for %s/%s in %s\n", goos, goarch, goroot) xprintf("Installed commands in %s\n", gobin) @@ -1271,9 +1536,9 @@ func cmdlist() { } out, err := json.MarshalIndent(results, "", "\t") if err != nil { - fatal("json marshal error: %v", err) + fatalf("json marshal error: %v", err) } if _, err := os.Stdout.Write(out); err != nil { - fatal("write failed: %v", err) + fatalf("write failed: %v", err) } } diff --git a/src/cmd/dist/buildgo.go b/src/cmd/dist/buildgo.go index 1de2c4eccd8..caafc13da88 100644 --- a/src/cmd/dist/buildgo.go +++ b/src/cmd/dist/buildgo.go @@ -8,7 +8,9 @@ import ( "bytes" "fmt" "os" + "path/filepath" "sort" + "strings" ) /* @@ -22,35 +24,53 @@ import ( // const defaultCXX = // const defaultPkgConfig = // -// It is invoked to write cmd/go/zdefaultcc.go +// It is invoked to write cmd/go/internal/cfg/zdefaultcc.go // but we also write cmd/cgo/zdefaultcc.go func mkzdefaultcc(dir, file string) { - outGo := fmt.Sprintf( - "// auto generated by go tool dist\n"+ - "\n"+ - "package cfg\n"+ - "\n"+ - "const DefaultCC = `%s`\n"+ - "const DefaultCXX = `%s`\n"+ - "const DefaultPkgConfig = `%s`\n", - defaultcctarget, defaultcxxtarget, defaultpkgconfigtarget) + if strings.Contains(file, filepath.FromSlash("go/internal/cfg")) { + var buf bytes.Buffer + fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "package cfg\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const DefaultPkgConfig = `%s`\n", defaultpkgconfig) + buf.WriteString(defaultCCFunc("DefaultCC", defaultcc)) + buf.WriteString(defaultCCFunc("DefaultCXX", defaultcxx)) + writefile(buf.String(), file, writeSkipSame) + return + } - writefile(outGo, file, writeSkipSame) + var buf bytes.Buffer + fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "package main\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const defaultPkgConfig = `%s`\n", defaultpkgconfig) + buf.WriteString(defaultCCFunc("defaultCC", defaultcc)) + buf.WriteString(defaultCCFunc("defaultCXX", defaultcxx)) + writefile(buf.String(), file, writeSkipSame) +} - // Convert file name to replace: turn go/internal/cfg into cgo. - outCgo := fmt.Sprintf( - "// auto generated by go tool dist\n"+ - "\n"+ - "package main\n"+ - "\n"+ - "const defaultCC = `%s`\n"+ - "const defaultCXX = `%s`\n"+ - "const defaultPkgConfig = `%s`\n", - defaultcctarget, defaultcxxtarget, defaultpkgconfigtarget) +func defaultCCFunc(name string, defaultcc map[string]string) string { + var buf bytes.Buffer - i := len(file) - len("go/internal/cfg/zdefaultcc.go") - file = file[:i] + "cgo/zdefaultcc.go" - writefile(outCgo, file, writeSkipSame) + fmt.Fprintf(&buf, "func %s(goos, goarch string) string {\n", name) + fmt.Fprintf(&buf, "\tswitch goos+`/`+goarch {\n") + var keys []string + for k := range defaultcc { + if k != "" { + keys = append(keys, k) + } + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(&buf, "\tcase %q:\n\t\treturn %q\n", k, defaultcc[k]) + } + fmt.Fprintf(&buf, "\t}\n") + fmt.Fprintf(&buf, "\treturn %q\n", defaultcc[""]) + fmt.Fprintf(&buf, "}\n") + + return buf.String() } // mkzcgo writes zosarch.go for cmd/go. @@ -63,13 +83,14 @@ func mkzosarch(dir, file string) { sort.Strings(list) var buf bytes.Buffer - buf.WriteString("// auto generated by go tool dist\n\n") - buf.WriteString("package cfg\n\n") + fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n\n") + fmt.Fprintf(&buf, "package cfg\n\n") fmt.Fprintf(&buf, "var OSArchSupportsCgo = map[string]bool{\n") for _, plat := range list { fmt.Fprintf(&buf, "\t%q: %v,\n", plat, cgoEnabled[plat]) } fmt.Fprintf(&buf, "}\n") + writefile(buf.String(), file, writeSkipSame) } @@ -90,18 +111,17 @@ func mkzcgo(dir, file string) { sort.Strings(list) var buf bytes.Buffer - - fmt.Fprintf(&buf, - "// auto generated by go tool dist\n"+ - "\n"+ - "package build\n"+ - "\n"+ - "const defaultCGO_ENABLED = %q\n\n"+ - "var cgoEnabled = map[string]bool{\n", os.Getenv("CGO_ENABLED")) + fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "package build\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const defaultCGO_ENABLED = %q\n", os.Getenv("CGO_ENABLED")) + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "var cgoEnabled = map[string]bool{\n") for _, plat := range list { fmt.Fprintf(&buf, "\t%q: true,\n", plat) } - fmt.Fprintf(&buf, "}") + fmt.Fprintf(&buf, "}\n") writefile(buf.String(), file, writeSkipSame) } diff --git a/src/cmd/dist/buildruntime.go b/src/cmd/dist/buildruntime.go index 1467c59c6c9..2f10fd0237d 100644 --- a/src/cmd/dist/buildruntime.go +++ b/src/cmd/dist/buildruntime.go @@ -5,6 +5,7 @@ package main import ( + "bytes" "fmt" "os" "strings" @@ -17,23 +18,25 @@ import ( // mkzversion writes zversion.go: // // package sys -// const DefaultGoroot = +// var DefaultGoroot = +// // const TheVersion = // const Goexperiment = // const StackGuardMultiplier = // func mkzversion(dir, file string) { - out := fmt.Sprintf( - "// auto generated by go tool dist\n"+ - "\n"+ - "package sys\n"+ - "\n"+ - "const DefaultGoroot = `%s`\n"+ - "const TheVersion = `%s`\n"+ - "const Goexperiment = `%s`\n"+ - "const StackGuardMultiplier = %d\n\n", goroot_final, findgoversion(), os.Getenv("GOEXPERIMENT"), stackGuardMultiplier()) + var buf bytes.Buffer + fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "package sys\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "var DefaultGoroot = `%s`\n", goroot_final) + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const TheVersion = `%s`\n", findgoversion()) + fmt.Fprintf(&buf, "const Goexperiment = `%s`\n", os.Getenv("GOEXPERIMENT")) + fmt.Fprintf(&buf, "const StackGuardMultiplier = %d\n", stackGuardMultiplier()) - writefile(out, file, writeSkipSame) + writefile(buf.String(), file, writeSkipSame) } // mkzbootstrap writes cmd/internal/objabi/zbootstrap.go: @@ -43,6 +46,7 @@ func mkzversion(dir, file string) { // const defaultGOROOT = // const defaultGO386 = // const defaultGOARM = +// const defaultGOMIPS = // const defaultGOOS = runtime.GOOS // const defaultGOARCH = runtime.GOARCH // const defaultGO_EXTLINK_ENABLED = @@ -60,25 +64,25 @@ func mkzversion(dir, file string) { // This is more useful than having it default to generating objects for the // original target (in this example, a Mac). func mkzbootstrap(file string) { - out := fmt.Sprintf( - "// auto generated by go tool dist\n"+ - "\n"+ - "package objabi\n"+ - "\n"+ - "import \"runtime\"\n"+ - "\n"+ - "const defaultGOROOT = `%s`\n"+ - "const defaultGO386 = `%s`\n"+ - "const defaultGOARM = `%s`\n"+ - "const defaultGOOS = runtime.GOOS\n"+ - "const defaultGOARCH = runtime.GOARCH\n"+ - "const defaultGO_EXTLINK_ENABLED = `%s`\n"+ - "const version = `%s`\n"+ - "const stackGuardMultiplier = %d\n"+ - "const goexperiment = `%s`\n", - goroot_final, go386, goarm, goextlinkenabled, findgoversion(), stackGuardMultiplier(), os.Getenv("GOEXPERIMENT")) + var buf bytes.Buffer + fmt.Fprintf(&buf, "// Code generated by go tool dist; DO NOT EDIT.\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "package objabi\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "import \"runtime\"\n") + fmt.Fprintln(&buf) + fmt.Fprintf(&buf, "const defaultGOROOT = `%s`\n", goroot_final) + fmt.Fprintf(&buf, "const defaultGO386 = `%s`\n", go386) + fmt.Fprintf(&buf, "const defaultGOARM = `%s`\n", goarm) + fmt.Fprintf(&buf, "const defaultGOMIPS = `%s`\n", gomips) + fmt.Fprintf(&buf, "const defaultGOOS = runtime.GOOS\n") + fmt.Fprintf(&buf, "const defaultGOARCH = runtime.GOARCH\n") + fmt.Fprintf(&buf, "const defaultGO_EXTLINK_ENABLED = `%s`\n", goextlinkenabled) + fmt.Fprintf(&buf, "const version = `%s`\n", findgoversion()) + fmt.Fprintf(&buf, "const stackGuardMultiplier = %d\n", stackGuardMultiplier()) + fmt.Fprintf(&buf, "const goexperiment = `%s`\n", os.Getenv("GOEXPERIMENT")) - writefile(out, file, writeSkipSame) + writefile(buf.String(), file, writeSkipSame) } // stackGuardMultiplier returns a multiplier to apply to the default diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index 8a3db32ad34..24d2e1e7d62 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -35,6 +35,7 @@ var bootstrapDirs = []string{ "cmd/asm/internal/asm", "cmd/asm/internal/flags", "cmd/asm/internal/lex", + "cmd/cgo", "cmd/compile", "cmd/compile/internal/amd64", "cmd/compile/internal/arm", @@ -51,6 +52,7 @@ var bootstrapDirs = []string{ "cmd/internal/bio", "cmd/internal/gcprog", "cmd/internal/dwarf", + "cmd/internal/edit", "cmd/internal/objabi", "cmd/internal/obj", "cmd/internal/obj/arm", @@ -66,14 +68,24 @@ var bootstrapDirs = []string{ "cmd/link/internal/arm", "cmd/link/internal/arm64", "cmd/link/internal/ld", + "cmd/link/internal/loadelf", + "cmd/link/internal/loadmacho", + "cmd/link/internal/loadpe", "cmd/link/internal/mips", "cmd/link/internal/mips64", + "cmd/link/internal/objfile", "cmd/link/internal/ppc64", "cmd/link/internal/s390x", + "cmd/link/internal/sym", "cmd/link/internal/x86", + "container/heap", + "debug/dwarf", + "debug/elf", + "debug/macho", "debug/pe", "math/big", "math/bits", + "sort", } // File prefixes that are ignored by go/build anyway, and cause @@ -95,7 +107,7 @@ func bootstrapBuildTools() { if goroot_bootstrap == "" { goroot_bootstrap = pathf("%s/go1.4", os.Getenv("HOME")) } - xprintf("##### Building Go toolchain using %s.\n", goroot_bootstrap) + xprintf("Building Go toolchain1 using %s.\n", goroot_bootstrap) mkzbootstrap(pathf("%s/src/cmd/internal/objabi/zbootstrap.go", goroot)) @@ -114,6 +126,11 @@ func bootstrapBuildTools() { src := pathf("%s/src/%s", goroot, dir) dst := pathf("%s/%s", base, dir) xmkdirall(dst) + if dir == "cmd/cgo" { + // Write to src because we need the file both for bootstrap + // and for later in the main build. + mkzdefaultcc("", pathf("%s/zdefaultcc.go", src)) + } Dir: for _, name := range xreaddirfiles(src) { for _, pre := range ignorePrefixes { @@ -128,8 +145,7 @@ func bootstrapBuildTools() { } srcFile := pathf("%s/%s", src, name) dstFile := pathf("%s/%s", dst, name) - text := readfile(srcFile) - text = bootstrapRewriteFile(text, srcFile) + text := bootstrapRewriteFile(srcFile) writefile(text, dstFile, 0) } } @@ -163,12 +179,17 @@ func bootstrapBuildTools() { // https://groups.google.com/d/msg/golang-dev/Ss7mCKsvk8w/Gsq7VYI0AwAJ // Use the math_big_pure_go build tag to disable the assembly in math/big // which may contain unsupported instructions. + // Note that if we are using Go 1.10 or later as bootstrap, the -gcflags=-l + // only applies to the final cmd/go binary, but that's OK: if this is Go 1.10 + // or later we don't need to disable inlining to work around bugs in the Go 1.4 compiler. cmd := []string{ pathf("%s/bin/go", goroot_bootstrap), "install", "-gcflags=-l", - "-tags=math_big_pure_go", - "-v", + "-tags=math_big_pure_go compiler_bootstrap", + } + if vflag > 0 { + cmd = append(cmd, "-v") } if tool := os.Getenv("GOBOOTSTRAP_TOOLEXEC"); tool != "" { cmd = append(cmd, "-toolexec="+tool) @@ -187,10 +208,12 @@ func bootstrapBuildTools() { } } - xprintf("\n") + if vflag > 0 { + xprintf("\n") + } } -var ssaRewriteFileSubstring = filepath.ToSlash("src/cmd/compile/internal/ssa/rewrite") +var ssaRewriteFileSubstring = filepath.FromSlash("src/cmd/compile/internal/ssa/rewrite") // isUnneededSSARewriteFile reports whether srcFile is a // src/cmd/compile/internal/ssa/rewriteARCHNAME.go file for an @@ -221,24 +244,26 @@ func isUnneededSSARewriteFile(srcFile string) (archCaps string, unneeded bool) { return archCaps, true } -func bootstrapRewriteFile(text, srcFile string) string { +func bootstrapRewriteFile(srcFile string) string { // During bootstrap, generate dummy rewrite files for // irrelevant architectures. We only need to build a bootstrap // binary that works for the current runtime.GOARCH. // This saves 6+ seconds of bootstrap. if archCaps, ok := isUnneededSSARewriteFile(srcFile); ok { - return fmt.Sprintf(`package ssa + return fmt.Sprintf(`// Code generated by go tool dist; DO NOT EDIT. + +package ssa func rewriteValue%s(v *Value) bool { panic("unused during bootstrap") } func rewriteBlock%s(b *Block) bool { panic("unused during bootstrap") } `, archCaps, archCaps) } - return bootstrapFixImports(text, srcFile) + return bootstrapFixImports(srcFile) } -func bootstrapFixImports(text, srcFile string) string { - lines := strings.SplitAfter(text, "\n") +func bootstrapFixImports(srcFile string) string { + lines := strings.SplitAfter(readfile(srcFile), "\n") inBlock := false for i, line := range lines { if strings.HasPrefix(line, "import (") { @@ -262,7 +287,7 @@ func bootstrapFixImports(text, srcFile string) string { } } - lines[0] = "// Do not edit. Bootstrap copy of " + srcFile + "\n\n//line " + srcFile + ":1\n" + lines[0] + lines[0] = "// Code generated by go tool dist; DO NOT EDIT.\n// This is a bootstrap copy of " + srcFile + "\n\n//line " + srcFile + ":1\n" + lines[0] return strings.Join(lines, "") } diff --git a/src/cmd/dist/deps.go b/src/cmd/dist/deps.go deleted file mode 100644 index 6e2169d6b6e..00000000000 --- a/src/cmd/dist/deps.go +++ /dev/null @@ -1,94 +0,0 @@ -// generated by mkdeps.bash - -package main - -var builddeps = map[string][]string{ - "bufio": {"bytes", "errors", "internal/cpu", "internal/race", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "bytes": {"errors", "internal/cpu", "internal/race", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "cmd/go/internal/base": {"bufio", "bytes", "cmd/go/internal/cfg", "cmd/go/internal/str", "cmd/internal/objabi", "context", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/bug": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/envcmd", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/web", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/buildid": {"bufio", "bytes", "cmd/go/internal/cfg", "cmd/internal/objabi", "compress/flate", "compress/zlib", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/cfg": {"bufio", "bytes", "cmd/internal/objabi", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/clean": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/cmdflag": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/cfg", "cmd/go/internal/str", "cmd/internal/objabi", "context", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/doc": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/cfg", "cmd/go/internal/str", "cmd/internal/objabi", "context", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/envcmd": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/fix": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/internal/objabi", "compress/flate", "compress/zlib", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/fmtcmd": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/internal/objabi", "compress/flate", "compress/zlib", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/generate": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/get": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/web", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "encoding/xml", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/singleflight", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/help": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/cfg", "cmd/go/internal/str", "cmd/internal/objabi", "context", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/list": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/load": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/str", "cmd/internal/objabi", "compress/flate", "compress/zlib", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/run": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/str": {"bytes", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/test": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/cmdflag", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/tool": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/cfg", "cmd/go/internal/str", "cmd/internal/objabi", "context", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/version": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/cfg", "cmd/go/internal/str", "cmd/internal/objabi", "context", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/vet": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/cmdflag", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/go/internal/web": {"errors", "internal/race", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"}, - "cmd/go/internal/work": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/load", "cmd/go/internal/str", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding/binary", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "cmd/internal/objabi": {"errors", "flag", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "log", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "compress/flate": {"bufio", "bytes", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "math/bits", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "compress/zlib": {"bufio", "bytes", "compress/flate", "errors", "fmt", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "math/bits", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "container/heap": {"errors", "internal/cpu", "internal/race", "math", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "context": {"errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "crypto": {"errors", "hash", "internal/cpu", "internal/race", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"}, - "crypto/sha1": {"crypto", "errors", "hash", "internal/cpu", "internal/race", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"}, - "debug/dwarf": {"encoding/binary", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "debug/elf": {"bufio", "bytes", "compress/flate", "compress/zlib", "debug/dwarf", "encoding/binary", "errors", "fmt", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "math/bits", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "debug/macho": {"bytes", "debug/dwarf", "encoding/binary", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "path", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "encoding": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "encoding/base64": {"errors", "internal/cpu", "internal/race", "io", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode/utf8"}, - "encoding/binary": {"errors", "internal/cpu", "internal/race", "io", "math", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "encoding/json": {"bytes", "encoding", "encoding/base64", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "encoding/xml": {"bufio", "bytes", "encoding", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "errors": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "flag": {"errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "fmt": {"errors", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "go/ast": {"bytes", "errors", "fmt", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "go/build": {"bufio", "bytes", "errors", "fmt", "go/ast", "go/doc", "go/parser", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "go/doc": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "math", "net/url", "os", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "go/parser": {"bytes", "errors", "fmt", "go/ast", "go/scanner", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "go/scanner": {"bytes", "errors", "fmt", "go/token", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "go/token": {"errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "hash": {"errors", "internal/race", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"}, - "hash/adler32": {"errors", "hash", "internal/race", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"}, - "internal/cpu": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "internal/poll": {"errors", "internal/race", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"}, - "internal/race": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "internal/singleflight": {"internal/race", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"}, - "internal/syscall/windows": {"errors", "internal/race", "internal/syscall/windows/sysdll", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"}, - "internal/syscall/windows/registry": {"errors", "internal/race", "internal/syscall/windows/sysdll", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"}, - "internal/syscall/windows/sysdll": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "io": {"errors", "internal/race", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic"}, - "io/ioutil": {"bytes", "errors", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "log": {"errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "math": {"internal/cpu", "runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "math/bits": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "net/url": {"bytes", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "os": {"errors", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"}, - "os/exec": {"bytes", "context", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "os/signal": {"errors", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "os", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "time", "unicode/utf16", "unicode/utf8"}, - "path": {"errors", "internal/cpu", "internal/race", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "path/filepath": {"errors", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "reflect": {"errors", "internal/cpu", "internal/race", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "regexp": {"bytes", "errors", "internal/cpu", "internal/race", "io", "math", "reflect", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "regexp/syntax": {"bytes", "errors", "internal/cpu", "internal/race", "io", "math", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "runtime": {"runtime/internal/atomic", "runtime/internal/sys"}, - "runtime/internal/atomic": {"runtime/internal/sys"}, - "runtime/internal/sys": {}, - "sort": {"errors", "internal/cpu", "internal/race", "math", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "strconv": {"errors", "internal/cpu", "math", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "unicode/utf8"}, - "strings": {"errors", "internal/cpu", "internal/race", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode", "unicode/utf8"}, - "sync": {"internal/race", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync/atomic"}, - "sync/atomic": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "syscall": {"errors", "internal/race", "internal/syscall/windows/sysdll", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "unicode/utf16"}, - "text/template": {"bytes", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "math", "net/url", "os", "path/filepath", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "text/template/parse": {"bytes", "errors", "fmt", "internal/cpu", "internal/poll", "internal/race", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "math", "os", "reflect", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "strconv", "strings", "sync", "sync/atomic", "syscall", "time", "unicode", "unicode/utf16", "unicode/utf8"}, - "time": {"errors", "internal/race", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sync", "sync/atomic", "syscall", "unicode/utf16"}, - "unicode": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "unicode/utf16": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "unicode/utf8": {"runtime", "runtime/internal/atomic", "runtime/internal/sys"}, - "cmd/go": {"bufio", "bytes", "cmd/go/internal/base", "cmd/go/internal/bug", "cmd/go/internal/buildid", "cmd/go/internal/cfg", "cmd/go/internal/clean", "cmd/go/internal/cmdflag", "cmd/go/internal/doc", "cmd/go/internal/envcmd", "cmd/go/internal/fix", "cmd/go/internal/fmtcmd", "cmd/go/internal/generate", "cmd/go/internal/get", "cmd/go/internal/help", "cmd/go/internal/list", "cmd/go/internal/load", "cmd/go/internal/run", "cmd/go/internal/str", "cmd/go/internal/test", "cmd/go/internal/tool", "cmd/go/internal/version", "cmd/go/internal/vet", "cmd/go/internal/web", "cmd/go/internal/work", "cmd/internal/objabi", "compress/flate", "compress/zlib", "container/heap", "context", "crypto", "crypto/sha1", "debug/dwarf", "debug/elf", "debug/macho", "encoding", "encoding/base64", "encoding/binary", "encoding/json", "encoding/xml", "errors", "flag", "fmt", "go/ast", "go/build", "go/doc", "go/parser", "go/scanner", "go/token", "hash", "hash/adler32", "internal/cpu", "internal/poll", "internal/race", "internal/singleflight", "internal/syscall/windows", "internal/syscall/windows/registry", "internal/syscall/windows/sysdll", "io", "io/ioutil", "log", "math", "math/bits", "net/url", "os", "os/exec", "os/signal", "path", "path/filepath", "reflect", "regexp", "regexp/syntax", "runtime", "runtime/internal/atomic", "runtime/internal/sys", "sort", "strconv", "strings", "sync", "sync/atomic", "syscall", "text/template", "text/template/parse", "time", "unicode", "unicode/utf16", "unicode/utf8"}, -} diff --git a/src/cmd/dist/doc.go b/src/cmd/dist/doc.go new file mode 100644 index 00000000000..a4e6aa5cbfd --- /dev/null +++ b/src/cmd/dist/doc.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Dist helps bootstrap, build, and test the Go distribution. +// +// Usage: +// go tool dist [command] +// +// The commands are: +// banner print installation banner +// bootstrap rebuild everything +// clean deletes all built files +// env [-p] print environment (-p: include $PATH) +// install [dir] install individual directory +// list [-json] list all supported platforms +// test [-h] run Go test(s) +// version print Go version +package main diff --git a/src/cmd/dist/imports.go b/src/cmd/dist/imports.go new file mode 100644 index 00000000000..bf64d6668aa --- /dev/null +++ b/src/cmd/dist/imports.go @@ -0,0 +1,245 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is forked from go/build/read.go. +// (cmd/dist must not import go/build because we do not want it to be +// sensitive to the specific version of go/build present in $GOROOT_BOOTSTRAP.) + +package main + +import ( + "bufio" + "errors" + "io" + "strconv" + "strings" + "unicode/utf8" +) + +type importReader struct { + b *bufio.Reader + buf []byte + peek byte + err error + eof bool + nerr int +} + +func isIdent(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf +} + +var ( + errSyntax = errors.New("syntax error") + errNUL = errors.New("unexpected NUL in input") +) + +// syntaxError records a syntax error, but only if an I/O error has not already been recorded. +func (r *importReader) syntaxError() { + if r.err == nil { + r.err = errSyntax + } +} + +// readByte reads the next byte from the input, saves it in buf, and returns it. +// If an error occurs, readByte records the error in r.err and returns 0. +func (r *importReader) readByte() byte { + c, err := r.b.ReadByte() + if err == nil { + r.buf = append(r.buf, c) + if c == 0 { + err = errNUL + } + } + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + c = 0 + } + return c +} + +// peekByte returns the next byte from the input reader but does not advance beyond it. +// If skipSpace is set, peekByte skips leading spaces and comments. +func (r *importReader) peekByte(skipSpace bool) byte { + if r.err != nil { + if r.nerr++; r.nerr > 10000 { + panic("go/build: import reader looping") + } + return 0 + } + + // Use r.peek as first input byte. + // Don't just return r.peek here: it might have been left by peekByte(false) + // and this might be peekByte(true). + c := r.peek + if c == 0 { + c = r.readByte() + } + for r.err == nil && !r.eof { + if skipSpace { + // For the purposes of this reader, semicolons are never necessary to + // understand the input and are treated as spaces. + switch c { + case ' ', '\f', '\t', '\r', '\n', ';': + c = r.readByte() + continue + + case '/': + c = r.readByte() + if c == '/' { + for c != '\n' && r.err == nil && !r.eof { + c = r.readByte() + } + } else if c == '*' { + var c1 byte + for (c != '*' || c1 != '/') && r.err == nil { + if r.eof { + r.syntaxError() + } + c, c1 = c1, r.readByte() + } + } else { + r.syntaxError() + } + c = r.readByte() + continue + } + } + break + } + r.peek = c + return r.peek +} + +// nextByte is like peekByte but advances beyond the returned byte. +func (r *importReader) nextByte(skipSpace bool) byte { + c := r.peekByte(skipSpace) + r.peek = 0 + return c +} + +// readKeyword reads the given keyword from the input. +// If the keyword is not present, readKeyword records a syntax error. +func (r *importReader) readKeyword(kw string) { + r.peekByte(true) + for i := 0; i < len(kw); i++ { + if r.nextByte(false) != kw[i] { + r.syntaxError() + return + } + } + if isIdent(r.peekByte(false)) { + r.syntaxError() + } +} + +// readIdent reads an identifier from the input. +// If an identifier is not present, readIdent records a syntax error. +func (r *importReader) readIdent() { + c := r.peekByte(true) + if !isIdent(c) { + r.syntaxError() + return + } + for isIdent(r.peekByte(false)) { + r.peek = 0 + } +} + +// readString reads a quoted string literal from the input. +// If an identifier is not present, readString records a syntax error. +func (r *importReader) readString(save *[]string) { + switch r.nextByte(true) { + case '`': + start := len(r.buf) - 1 + for r.err == nil { + if r.nextByte(false) == '`' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof { + r.syntaxError() + } + } + case '"': + start := len(r.buf) - 1 + for r.err == nil { + c := r.nextByte(false) + if c == '"' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof || c == '\n' { + r.syntaxError() + } + if c == '\\' { + r.nextByte(false) + } + } + default: + r.syntaxError() + } +} + +// readImport reads an import clause - optional identifier followed by quoted string - +// from the input. +func (r *importReader) readImport(imports *[]string) { + c := r.peekByte(true) + if c == '.' { + r.peek = 0 + } else if isIdent(c) { + r.readIdent() + } + r.readString(imports) +} + +// readComments is like ioutil.ReadAll, except that it only reads the leading +// block of comments in the file. +func readComments(f io.Reader) ([]byte, error) { + r := &importReader{b: bufio.NewReader(f)} + r.peekByte(true) + if r.err == nil && !r.eof { + // Didn't reach EOF, so must have found a non-space byte. Remove it. + r.buf = r.buf[:len(r.buf)-1] + } + return r.buf, r.err +} + +// readimports returns the imports found in the named file. +func readimports(file string) []string { + var imports []string + r := &importReader{b: bufio.NewReader(strings.NewReader(readfile(file)))} + r.readKeyword("package") + r.readIdent() + for r.peekByte(true) == 'i' { + r.readKeyword("import") + if r.peekByte(true) == '(' { + r.nextByte(false) + for r.peekByte(true) != ')' && r.err == nil { + r.readImport(&imports) + } + r.nextByte(false) + } else { + r.readImport(&imports) + } + } + + for i := range imports { + unquoted, err := strconv.Unquote(imports[i]) + if err != nil { + fatalf("reading imports from %s: %v", file, err) + } + imports[i] = unquoted + } + + return imports +} diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go index b0471bdc87b..a72a2607f9f 100644 --- a/src/cmd/dist/main.go +++ b/src/cmd/dist/main.go @@ -8,22 +8,158 @@ import ( "flag" "fmt" "os" + "runtime" "strconv" + "strings" ) -// cmdtab records the available commands. -var cmdtab = []struct { - name string - f func() -}{ - {"banner", cmdbanner}, - {"bootstrap", cmdbootstrap}, - {"clean", cmdclean}, - {"env", cmdenv}, - {"install", cmdinstall}, - {"list", cmdlist}, - {"test", cmdtest}, - {"version", cmdversion}, +func usage() { + xprintf(`usage: go tool dist [command] +Commands are: + +banner print installation banner +bootstrap rebuild everything +clean deletes all built files +env [-p] print environment (-p: include $PATH) +install [dir] install individual directory +list [-json] list all supported platforms +test [-h] run Go test(s) +version print Go version + +All commands take -v flags to emit extra information. +`) + xexit(2) +} + +// commands records the available commands. +var commands = map[string]func(){ + "banner": cmdbanner, + "bootstrap": cmdbootstrap, + "clean": cmdclean, + "env": cmdenv, + "install": cmdinstall, + "list": cmdlist, + "test": cmdtest, + "version": cmdversion, +} + +// main takes care of OS-specific startup and dispatches to xmain. +func main() { + os.Setenv("TERM", "dumb") // disable escape codes in clang errors + + // provide -check-armv6k first, before checking for $GOROOT so that + // it is possible to run this check without having $GOROOT available. + if len(os.Args) > 1 && os.Args[1] == "-check-armv6k" { + useARMv6K() // might fail with SIGILL + println("ARMv6K supported.") + os.Exit(0) + } + + gohostos = runtime.GOOS + switch gohostos { + case "darwin": + // Even on 64-bit platform, darwin uname -m prints i386. + // We don't support any of the OS X versions that run on 32-bit-only hardware anymore. + gohostarch = "amd64" + case "freebsd": + // Since FreeBSD 10 gcc is no longer part of the base system. + defaultclang = true + case "solaris": + // Even on 64-bit platform, solaris uname -m prints i86pc. + out := run("", CheckExit, "isainfo", "-n") + if strings.Contains(out, "amd64") { + gohostarch = "amd64" + } + if strings.Contains(out, "i386") { + gohostarch = "386" + } + case "plan9": + gohostarch = os.Getenv("objtype") + if gohostarch == "" { + fatalf("$objtype is unset") + } + case "windows": + exe = ".exe" + } + + sysinit() + + if gohostarch == "" { + // Default Unix system. + out := run("", CheckExit, "uname", "-m") + switch { + case strings.Contains(out, "x86_64"), strings.Contains(out, "amd64"): + gohostarch = "amd64" + case strings.Contains(out, "86"): + gohostarch = "386" + case strings.Contains(out, "arm"): + gohostarch = "arm" + case strings.Contains(out, "aarch64"): + gohostarch = "arm64" + case strings.Contains(out, "ppc64le"): + gohostarch = "ppc64le" + case strings.Contains(out, "ppc64"): + gohostarch = "ppc64" + case strings.Contains(out, "mips64"): + gohostarch = "mips64" + if elfIsLittleEndian(os.Args[0]) { + gohostarch = "mips64le" + } + case strings.Contains(out, "mips"): + gohostarch = "mips" + if elfIsLittleEndian(os.Args[0]) { + gohostarch = "mipsle" + } + case strings.Contains(out, "s390x"): + gohostarch = "s390x" + case gohostos == "darwin": + if strings.Contains(run("", CheckExit, "uname", "-v"), "RELEASE_ARM_") { + gohostarch = "arm" + } + default: + fatalf("unknown architecture: %s", out) + } + } + + if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" { + maxbg = min(maxbg, runtime.NumCPU()) + } + bginit() + + // The OS X 10.6 linker does not support external linking mode. + // See golang.org/issue/5130. + // + // OS X 10.6 does not work with clang either, but OS X 10.9 requires it. + // It seems to work with OS X 10.8, so we default to clang for 10.8 and later. + // See golang.org/issue/5822. + // + // Roughly, OS X 10.N shows up as uname release (N+4), + // so OS X 10.6 is uname version 10 and OS X 10.8 is uname version 12. + if gohostos == "darwin" { + rel := run("", CheckExit, "uname", "-r") + if i := strings.Index(rel, "."); i >= 0 { + rel = rel[:i] + } + osx, _ := strconv.Atoi(rel) + if osx <= 6+4 { + goextlinkenabled = "0" + } + if osx >= 8+4 { + defaultclang = true + } + } + + if len(os.Args) > 1 && os.Args[1] == "-check-goarm" { + useVFPv1() // might fail with SIGILL + println("VFPv1 OK.") + useVFPv3() // might fail with SIGILL + println("VFPv3 OK.") + os.Exit(0) + } + + xinit() + xmain() + xexit(0) } // The OS-specific main calls into the portable code here. @@ -33,55 +169,15 @@ func xmain() { } cmd := os.Args[1] os.Args = os.Args[1:] // for flag parsing during cmd - for _, ct := range cmdtab { - if ct.name == cmd { - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "usage: go tool dist %s [options]\n", cmd) - flag.PrintDefaults() - os.Exit(2) - } - ct.f() - return - } + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "usage: go tool dist %s [options]\n", cmd) + flag.PrintDefaults() + os.Exit(2) } - - xprintf("unknown command %s\n", cmd) - usage() -} - -func xflagparse(maxargs int) { - flag.Var((*count)(&vflag), "v", "verbosity") - flag.Parse() - if maxargs >= 0 && flag.NArg() > maxargs { - flag.Usage() + if f, ok := commands[cmd]; ok { + f() + } else { + xprintf("unknown command %s\n", cmd) + usage() } } - -// count is a flag.Value that is like a flag.Bool and a flag.Int. -// If used as -name, it increments the count, but -name=x sets the count. -// Used for verbose flag -v. -type count int - -func (c *count) String() string { - return fmt.Sprint(int(*c)) -} - -func (c *count) Set(s string) error { - switch s { - case "true": - *c++ - case "false": - *c = 0 - default: - n, err := strconv.Atoi(s) - if err != nil { - return fmt.Errorf("invalid count %q", s) - } - *c = count(n) - } - return nil -} - -func (c *count) IsBoolFlag() bool { - return true -} diff --git a/src/cmd/dist/mkdeps.bash b/src/cmd/dist/mkdeps.bash deleted file mode 100755 index 71d3c371e48..00000000000 --- a/src/cmd/dist/mkdeps.bash +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -# Copyright 2015 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -e - -# We need to test enough GOOS/GOARCH combinations to pick up all the -# package dependencies. -gooslist="windows linux darwin solaris" -goarchlist="386 amd64 arm arm64 ppc64" - -echo NOTE: errors about loading internal/syscall/windows are ok - -deps_of() { - for goos in $gooslist - do - for goarch in $goarchlist - do - GOOS=$goos GOARCH=$goarch go list -tags cmd_go_bootstrap -f '{{range .Deps}}{{$.ImportPath}} {{.}} -{{end}}' $* - done - done | sort -u | grep . | grep -v ' unsafe$' -} - -all="$(deps_of cmd/go | awk '{print $2}') cmd/go" -deps_of $all >tmp.all.deps - -( - echo '// generated by mkdeps.bash' - echo - echo 'package main' - echo - echo 'var builddeps = map[string][]string{' - for pkg in $all - do - echo -n "\"$pkg\": {" - for dep in $(awk -v pkg=$pkg '$1==pkg {print $2}' tmp.all.deps) - do - echo -n "\"$dep\"," - done - echo '},' - done - echo '}' -) |gofmt >deps.go - -rm -f tmp.all.deps diff --git a/src/cmd/dist/sys_windows.go b/src/cmd/dist/sys_windows.go index 05cb3e29792..6d1f82e0936 100644 --- a/src/cmd/dist/sys_windows.go +++ b/src/cmd/dist/sys_windows.go @@ -44,6 +44,6 @@ func sysinit() { case PROCESSOR_ARCHITECTURE_INTEL: gohostarch = "386" default: - fatal("unknown processor architecture") + fatalf("unknown processor architecture") } } diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index 73432d31ea3..f35fbd4cb5f 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -14,6 +14,7 @@ import ( "os" "os/exec" "path/filepath" + "reflect" "regexp" "runtime" "strconv" @@ -23,6 +24,8 @@ import ( ) func cmdtest() { + gogcflags = os.Getenv("GO_GCFLAGS") + var t tester var noRebuild bool flag.BoolVar(&t.listMode, "list", false, "list available tests") @@ -57,11 +60,6 @@ type tester struct { banner string // prefix, or "" for none lastHeading string // last dir heading printed - goroot string - goarch string - gohostarch string - goos string - gohostos string cgoEnabled bool partial bool haveTime bool // the 'time' binary is available @@ -89,20 +87,17 @@ type distTest struct { fn func(*distTest) error } -func mustEnv(k string) string { - v := os.Getenv(k) - if v == "" { - log.Fatalf("Unset environment variable %v", k) - } - return v -} - func (t *tester) run() { - t.goroot = mustEnv("GOROOT") - t.goos = mustEnv("GOOS") - t.gohostos = mustEnv("GOHOSTOS") - t.goarch = mustEnv("GOARCH") - t.gohostarch = mustEnv("GOHOSTARCH") + timelog("start", "dist test") + + var exeSuffix string + if goos == "windows" { + exeSuffix = ".exe" + } + if _, err := os.Stat(filepath.Join(gobin, "go"+exeSuffix)); err == nil { + os.Setenv("PATH", fmt.Sprintf("%s%c%s", gobin, os.PathListSeparator, os.Getenv("PATH"))) + } + slurp, err := exec.Command("go", "env", "CGO_ENABLED").Output() if err != nil { log.Fatalf("Error running go env CGO_ENABLED: %v", err) @@ -111,6 +106,7 @@ func (t *tester) run() { if flag.NArg() > 0 && t.runRxStr != "" { log.Fatalf("the -run regular expression flag is mutually exclusive with test name arguments") } + t.runNames = flag.Args() if t.hasBash() { @@ -121,39 +117,31 @@ func (t *tester) run() { if t.rebuild { t.out("Building packages and commands.") - cmd := exec.Command("go", "install", "-a", "-v", "std", "cmd") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - log.Fatalf("building packages and commands: %v", err) - } + // Force rebuild the whole toolchain. + goInstall("go", append([]string{"-a", "-i"}, toolchain...)...) } - if t.iOS() { - // Install the Mach exception handler used to intercept - // EXC_BAD_ACCESS and convert it into a Go panic. This is - // necessary for a Go program running under lldb (the way - // we run tests). It is disabled by default because iOS - // apps are not allowed to access the exc_server symbol. - cmd := exec.Command("go", "install", "-a", "-tags", "lldb", "runtime/cgo") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - log.Fatalf("building mach exception handler: %v", err) - } - - defer func() { - cmd := exec.Command("go", "install", "-a", "runtime/cgo") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - log.Fatalf("reverting mach exception handler: %v", err) - } - }() + // Complete rebuild bootstrap, even with -no-rebuild. + // If everything is up-to-date, this is a no-op. + // If everything is not up-to-date, the first checkNotStale + // during the test process will kill the tests, so we might + // as well install the world. + // Now that for example "go install cmd/compile" does not + // also install runtime (you need "go install -i cmd/compile" + // for that), it's easy for previous workflows like + // "rebuild the compiler and then run run.bash" + // to break if we don't automatically refresh things here. + // Rebuilding is a shortened bootstrap. + // See cmdbootstrap for a description of the overall process. + if !t.listMode { + goInstall("go", append([]string{"-i"}, toolchain...)...) + goInstall("go", append([]string{"-i"}, toolchain...)...) + goInstall("go", "std", "cmd") + checkNotStale("go", "std", "cmd") } t.timeoutScale = 1 - switch t.goarch { + switch goarch { case "arm": t.timeoutScale = 2 case "mips", "mipsle", "mips64", "mips64le": @@ -212,6 +200,7 @@ func (t *tester) run() { } } t.runPending(nil) + timelog("end", "dist test") if t.failed { fmt.Println("\nFAILED") os.Exit(1) @@ -237,6 +226,15 @@ func (t *tester) shouldRunTest(name string) bool { return false } +// goTest returns the beginning of the go test command line. +// Callers should use goTest and then pass flags overriding these +// defaults as later arguments in the command line. +func (t *tester) goTest() []string { + return []string{ + "go", "test", "-short", "-count=1", t.tags(), t.runFlag(""), + } +} + func (t *tester) tags() string { if t.iOS() { return "-tags=lldb" @@ -275,13 +273,15 @@ func (t *tester) registerStdTest(pkg string) { return nil } t.runPending(dt) + timelog("start", dt.name) + defer timelog("end", dt.name) ranGoTest = true args := []string{ "test", "-short", t.tags(), t.timeout(180), - "-gcflags=" + os.Getenv("GO_GCFLAGS"), + "-gcflags=all=" + gogcflags, } if t.race { args = append(args, "-race") @@ -311,6 +311,8 @@ func (t *tester) registerRaceBenchTest(pkg string) { return nil } t.runPending(dt) + timelog("start", dt.name) + defer timelog("end", dt.name) ranGoBench = true args := []string{ "test", @@ -343,7 +345,7 @@ func (t *tester) registerTests() { osarch := k t.tests = append(t.tests, distTest{ name: "vet/" + osarch, - heading: "go vet std cmd", + heading: "cmd/vet/all", fn: func(dt *distTest) error { t.addCmd(dt, "src/cmd/vet/all", "go", "run", "main.go", "-p="+osarch) return nil @@ -353,27 +355,6 @@ func (t *tester) registerTests() { return } - // This test needs its stdout/stderr to be terminals, so we don't run it from cmd/go's tests. - // See issue 18153. - if t.goos == "linux" { - t.tests = append(t.tests, distTest{ - name: "cmd_go_test_terminal", - heading: "cmd/go terminal test", - fn: func(dt *distTest) error { - t.runPending(dt) - if !stdOutErrAreTerminals() { - fmt.Println("skipping terminal test; stdout/stderr not terminals") - return nil - } - cmd := exec.Command("go", "test") - cmd.Dir = filepath.Join(os.Getenv("GOROOT"), "src/cmd/go/testdata/testterminal18153") - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return cmd.Run() - }, - }) - } - // Fast path to avoid the ~1 second of `go list std cmd` when // the caller lists specific tests to run. (as the continuous // build coordinator does). @@ -391,7 +372,7 @@ func (t *tester) registerTests() { const format = "{{if (or .TestGoFiles .XTestGoFiles)}}{{.ImportPath}}{{end}}" cmd := exec.Command("go", "list", "-f", format) if t.race { - cmd.Args = append(cmd.Args, "-tags", "race") + cmd.Args = append(cmd.Args, "-tags=race") } cmd.Args = append(cmd.Args, "std") if !t.race { @@ -423,9 +404,9 @@ func (t *tester) registerTests() { testName := "runtime:cpu124" t.tests = append(t.tests, distTest{ name: testName, - heading: "GOMAXPROCS=2 runtime -cpu=1,2,4", + heading: "GOMAXPROCS=2 runtime -cpu=1,2,4 -quick", fn: func(dt *distTest) error { - cmd := t.addCmd(dt, "src", "go", "test", "-short", t.timeout(300), t.tags(), "runtime", "-cpu=1,2,4") + cmd := t.addCmd(dt, "src", t.goTest(), t.timeout(300), "runtime", "-cpu=1,2,4", "-quick") // We set GOMAXPROCS=2 in addition to -cpu=1,2,4 in order to test runtime bootstrap code, // creation of first goroutines and first garbage collections in the parallel setting. cmd.Env = append(os.Environ(), "GOMAXPROCS=2") @@ -434,20 +415,45 @@ func (t *tester) registerTests() { }) } + // This test needs its stdout/stderr to be terminals, so we don't run it from cmd/go's tests. + // See issue 18153. + if goos == "linux" { + t.tests = append(t.tests, distTest{ + name: "cmd_go_test_terminal", + heading: "cmd/go terminal test", + fn: func(dt *distTest) error { + t.runPending(dt) + timelog("start", dt.name) + defer timelog("end", dt.name) + if !stdOutErrAreTerminals() { + fmt.Println("skipping terminal test; stdout/stderr not terminals") + return nil + } + cmd := exec.Command("go", "test") + cmd.Dir = filepath.Join(os.Getenv("GOROOT"), "src/cmd/go/testdata/testterminal18153") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() + }, + }) + } + // On the builders only, test that a moved GOROOT still works. // Fails on iOS because CC_FOR_TARGET refers to clangwrap.sh // in the unmoved GOROOT. // Fails on Android with an exec format error. // Fails on plan9 with "cannot find GOROOT" (issue #21016). - if os.Getenv("GO_BUILDER_NAME") != "" && t.goos != "android" && !t.iOS() && t.goos != "plan9" { + if os.Getenv("GO_BUILDER_NAME") != "" && goos != "android" && !t.iOS() && goos != "plan9" { t.tests = append(t.tests, distTest{ name: "moved_goroot", heading: "moved GOROOT", fn: func(dt *distTest) error { t.runPending(dt) - moved := t.goroot + "-moved" - if err := os.Rename(t.goroot, moved); err != nil { - if t.goos == "windows" { + timelog("start", dt.name) + defer timelog("end", dt.name) + moved := goroot + "-moved" + if err := os.Rename(goroot, moved); err != nil { + if goos == "windows" { // Fails on Windows (with "Access is denied") if a process // or binary is in this directory. For instance, using all.bat // when run from c:\workdir\go\src fails here @@ -462,18 +468,20 @@ func (t *tester) registerTests() { } // Run `go test fmt` in the moved GOROOT. + // Disable GOCACHE because it points back at the old GOROOT. cmd := exec.Command(filepath.Join(moved, "bin", "go"), "test", "fmt") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr // Don't set GOROOT in the environment. for _, e := range os.Environ() { - if !strings.HasPrefix(e, "GOROOT=") { + if !strings.HasPrefix(e, "GOROOT=") && !strings.HasPrefix(e, "GOCACHE=") { cmd.Env = append(cmd.Env, e) } } + cmd.Env = append(cmd.Env, "GOCACHE=off") err := cmd.Run() - if rerr := os.Rename(moved, t.goroot); rerr != nil { + if rerr := os.Rename(moved, goroot); rerr != nil { log.Fatalf("failed to restore GOROOT: %v", rerr) } return err @@ -491,7 +499,7 @@ func (t *tester) registerTests() { } // ARM libgcc may be Thumb, which internal linking does not support. - if t.goarch == "arm" { + if goarch == "arm" { break } @@ -504,14 +512,14 @@ func (t *tester) registerTests() { name: "nolibgcc:" + pkg, heading: "Testing without libgcc.", fn: func(dt *distTest) error { - t.addCmd(dt, "src", "go", "test", "-short", "-ldflags=-linkmode=internal -libgcc=none", t.tags(), pkg, t.runFlag(run)) + t.addCmd(dt, "src", t.goTest(), "-ldflags=-linkmode=internal -libgcc=none", pkg, t.runFlag(run)) return nil }, }) } // Test internal linking of PIE binaries where it is supported. - if t.goos == "linux" && t.goarch == "amd64" && !isAlpineLinux() { + if goos == "linux" && goarch == "amd64" && !isAlpineLinux() { // Issue 18243: We don't have a way to set the default // dynamic linker used in internal linking mode. So // this test is skipped on Alpine. @@ -519,7 +527,7 @@ func (t *tester) registerTests() { name: "pie_internal", heading: "internal linking of -buildmode=pie", fn: func(dt *distTest) error { - t.addCmd(dt, "src", "go", "test", "reflect", "-short", "-buildmode=pie", "-ldflags=-linkmode=internal", t.timeout(60), t.tags(), t.runFlag("")) + t.addCmd(dt, "src", t.goTest(), "reflect", "-buildmode=pie", "-ldflags=-linkmode=internal", t.timeout(60)) return nil }, }) @@ -530,11 +538,19 @@ func (t *tester) registerTests() { name: "sync_cpu", heading: "sync -cpu=10", fn: func(dt *distTest) error { - t.addCmd(dt, "src", "go", "test", "sync", "-short", t.timeout(120), t.tags(), "-cpu=10", t.runFlag("")) + t.addCmd(dt, "src", t.goTest(), "sync", t.timeout(120), "-cpu=10", t.runFlag("")) return nil }, }) + if t.raceDetectorSupported() { + t.tests = append(t.tests, distTest{ + name: "race", + heading: "Testing race detector", + fn: t.raceTest, + }) + } + if t.cgoEnabled && !t.iOS() { // Disabled on iOS. golang.org/issue/15919 t.tests = append(t.tests, distTest{ @@ -567,6 +583,26 @@ func (t *tester) registerTests() { }, }) } + if swig, _ := exec.LookPath("swig"); swig != "" && goos != "android" { + t.tests = append(t.tests, distTest{ + name: "swig_stdio", + heading: "../misc/swig/stdio", + fn: func(dt *distTest) error { + t.addCmd(dt, "misc/swig/stdio", t.goTest()) + return nil + }, + }) + if cxx, _ := exec.LookPath(compilerEnvLookup(defaultcxx, goos, goarch)); cxx != "" { + t.tests = append(t.tests, distTest{ + name: "swig_callback", + heading: "../misc/swig/callback", + fn: func(dt *distTest) error { + t.addCmd(dt, "misc/swig/callback", t.goTest()) + return nil + }, + }) + } + } } if t.cgoEnabled { t.tests = append(t.tests, distTest{ @@ -576,18 +612,15 @@ func (t *tester) registerTests() { }) } - if t.raceDetectorSupported() { - t.tests = append(t.tests, distTest{ - name: "race", - heading: "Testing race detector", - fn: t.raceTest, - }) - } - - if t.hasBash() && t.cgoEnabled && t.goos != "android" && t.goos != "darwin" { + if t.hasBash() && t.cgoEnabled && goos != "android" && goos != "darwin" { t.registerTest("testgodefs", "../misc/cgo/testgodefs", "./test.bash") } - if t.cgoEnabled { + + // Don't run these tests with $GO_GCFLAGS because most of them + // assume that they can run "go install" with no -gcflags and not + // recompile the entire standard library. If make.bash ran with + // special -gcflags, that's not true. + if t.cgoEnabled && gogcflags == "" { if t.cgoTestSOSupported() { t.tests = append(t.tests, distTest{ name: "testso", @@ -608,40 +641,40 @@ func (t *tester) registerTests() { t.registerHostTest("testcarchive", "../misc/cgo/testcarchive", "misc/cgo/testcarchive", "carchive_test.go") } if t.supportedBuildmode("c-shared") { - t.registerTest("testcshared", "../misc/cgo/testcshared", "./test.bash") + t.registerHostTest("testcshared", "../misc/cgo/testcshared", "misc/cgo/testcshared", "cshared_test.go") } if t.supportedBuildmode("shared") { - t.registerTest("testshared", "../misc/cgo/testshared", "go", "test") + t.registerTest("testshared", "../misc/cgo/testshared", t.goTest()) } if t.supportedBuildmode("plugin") { t.registerTest("testplugin", "../misc/cgo/testplugin", "./test.bash") } - if t.gohostos == "linux" && t.goarch == "amd64" { + if gohostos == "linux" && goarch == "amd64" { t.registerTest("testasan", "../misc/cgo/testasan", "go", "run", "main.go") } - if t.goos == "linux" && t.goarch == "amd64" { - t.registerTest("testsanitizers", "../misc/cgo/testsanitizers", "./test.bash") + if goos == "linux" && goarch == "amd64" { + t.registerHostTest("testsanitizers/msan", "../misc/cgo/testsanitizers", "misc/cgo/testsanitizers", ".") } - if t.hasBash() && t.goos != "android" && !t.iOS() && t.gohostos != "windows" { - t.registerTest("cgo_errors", "../misc/cgo/errors", "./test.bash") + if t.hasBash() && goos != "android" && !t.iOS() && gohostos != "windows" { + t.registerHostTest("cgo_errors", "../misc/cgo/errors", "misc/cgo/errors", ".") } - if t.gohostos == "linux" && t.extLink() { + if gohostos == "linux" && t.extLink() { t.registerTest("testsigfwd", "../misc/cgo/testsigfwd", "go", "run", "main.go") } } // Doc tests only run on builders. // They find problems approximately never. - if t.hasBash() && t.goos != "nacl" && t.goos != "android" && !t.iOS() && os.Getenv("GO_BUILDER_NAME") != "" { + if t.hasBash() && goos != "nacl" && goos != "android" && !t.iOS() && os.Getenv("GO_BUILDER_NAME") != "" { t.registerTest("doc_progs", "../doc/progs", "time", "go", "run", "run.go") t.registerTest("wiki", "../doc/articles/wiki", "./test.bash") t.registerTest("codewalk", "../doc/codewalk", "time", "./run") } - if t.goos != "android" && !t.iOS() { - t.registerTest("bench_go1", "../test/bench/go1", "go", "test", t.timeout(600), t.runFlag("")) + if goos != "android" && !t.iOS() { + t.registerTest("bench_go1", "../test/bench/go1", t.goTest(), t.timeout(600)) } - if t.goos != "android" && !t.iOS() { + if goos != "android" && !t.iOS() { // Only start multiple test dir shards on builders, // where they get distributed to multiple machines. // See issue 20141. @@ -658,16 +691,16 @@ func (t *tester) registerTests() { }) } } - if t.goos != "nacl" && t.goos != "android" && !t.iOS() { + if goos != "nacl" && goos != "android" && !t.iOS() { t.tests = append(t.tests, distTest{ name: "api", heading: "API check", fn: func(dt *distTest) error { if t.compileOnly { - t.addCmd(dt, "src", "go", "build", filepath.Join(t.goroot, "src/cmd/api/run.go")) + t.addCmd(dt, "src", "go", "build", filepath.Join(goroot, "src/cmd/api/run.go")) return nil } - t.addCmd(dt, "src", "go", "run", filepath.Join(t.goroot, "src/cmd/api/run.go")) + t.addCmd(dt, "src", "go", "run", filepath.Join(goroot, "src/cmd/api/run.go")) return nil }, }) @@ -685,7 +718,8 @@ func (t *tester) isRegisteredTestName(testName string) bool { return false } -func (t *tester) registerTest1(seq bool, name, dirBanner, bin string, args ...string) { +func (t *tester) registerTest1(seq bool, name, dirBanner string, cmdline ...interface{}) { + bin, args := flattenCmdline(cmdline) if bin == "time" && !t.haveTime { bin, args = args[0], args[1:] } @@ -698,20 +732,22 @@ func (t *tester) registerTest1(seq bool, name, dirBanner, bin string, args ...st fn: func(dt *distTest) error { if seq { t.runPending(dt) - return t.dirCmd(filepath.Join(t.goroot, "src", dirBanner), bin, args...).Run() + timelog("start", name) + defer timelog("end", name) + return t.dirCmd(filepath.Join(goroot, "src", dirBanner), bin, args).Run() } - t.addCmd(dt, filepath.Join(t.goroot, "src", dirBanner), bin, args...) + t.addCmd(dt, filepath.Join(goroot, "src", dirBanner), bin, args) return nil }, }) } -func (t *tester) registerTest(name, dirBanner, bin string, args ...string) { - t.registerTest1(false, name, dirBanner, bin, args...) +func (t *tester) registerTest(name, dirBanner string, cmdline ...interface{}) { + t.registerTest1(false, name, dirBanner, cmdline...) } -func (t *tester) registerSeqTest(name, dirBanner, bin string, args ...string) { - t.registerTest1(true, name, dirBanner, bin, args...) +func (t *tester) registerSeqTest(name, dirBanner string, cmdline ...interface{}) { + t.registerTest1(true, name, dirBanner, cmdline...) } func (t *tester) bgDirCmd(dir, bin string, args ...string) *exec.Cmd { @@ -719,12 +755,13 @@ func (t *tester) bgDirCmd(dir, bin string, args ...string) *exec.Cmd { if filepath.IsAbs(dir) { cmd.Dir = dir } else { - cmd.Dir = filepath.Join(t.goroot, dir) + cmd.Dir = filepath.Join(goroot, dir) } return cmd } -func (t *tester) dirCmd(dir, bin string, args ...string) *exec.Cmd { +func (t *tester) dirCmd(dir string, cmdline ...interface{}) *exec.Cmd { + bin, args := flattenCmdline(cmdline) cmd := t.bgDirCmd(dir, bin, args...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr @@ -734,7 +771,52 @@ func (t *tester) dirCmd(dir, bin string, args ...string) *exec.Cmd { return cmd } -func (t *tester) addCmd(dt *distTest, dir, bin string, args ...string) *exec.Cmd { +// flattenCmdline flattens a mixture of string and []string as single list +// and then interprets it as a command line: first element is binary, then args. +func flattenCmdline(cmdline []interface{}) (bin string, args []string) { + var list []string + for _, x := range cmdline { + switch x := x.(type) { + case string: + list = append(list, x) + case []string: + list = append(list, x...) + default: + panic("invalid addCmd argument type: " + reflect.TypeOf(x).String()) + } + } + + // The go command is too picky about duplicated flags. + // Drop all but the last of the allowed duplicated flags. + drop := make([]bool, len(list)) + have := map[string]int{} + for i := 1; i < len(list); i++ { + j := strings.Index(list[i], "=") + if j < 0 { + continue + } + flag := list[i][:j] + switch flag { + case "-run", "-tags": + if have[flag] != 0 { + drop[have[flag]] = true + } + have[flag] = i + } + } + out := list[:0] + for i, x := range list { + if !drop[i] { + out = append(out, x) + } + } + list = out + + return list[0], list[1:] +} + +func (t *tester) addCmd(dt *distTest, dir string, cmdline ...interface{}) *exec.Cmd { + bin, args := flattenCmdline(cmdline) w := &work{ dt: dt, cmd: t.bgDirCmd(dir, bin, args...), @@ -744,7 +826,7 @@ func (t *tester) addCmd(dt *distTest, dir, bin string, args ...string) *exec.Cmd } func (t *tester) iOS() bool { - return t.goos == "darwin" && (t.goarch == "arm" || t.goarch == "arm64") + return goos == "darwin" && (goarch == "arm" || goarch == "arm64") } func (t *tester) out(v string) { @@ -755,11 +837,11 @@ func (t *tester) out(v string) { } func (t *tester) extLink() bool { - pair := t.gohostos + "-" + t.goarch + pair := gohostos + "-" + goarch switch pair { case "android-arm", "darwin-arm", "darwin-arm64", - "dragonfly-386", "dragonfly-amd64", + "dragonfly-amd64", "freebsd-386", "freebsd-amd64", "freebsd-arm", "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-mips64", "linux-mips64le", "linux-mips", "linux-mipsle", "linux-s390x", "netbsd-386", "netbsd-amd64", @@ -780,25 +862,25 @@ func (t *tester) extLink() bool { } func (t *tester) internalLink() bool { - if t.gohostos == "dragonfly" { + if gohostos == "dragonfly" { // linkmode=internal fails on dragonfly since errno is a TLS relocation. return false } - if t.gohostarch == "ppc64le" { + if gohostarch == "ppc64le" { // linkmode=internal fails on ppc64le because cmd/link doesn't // handle the TOC correctly (issue 15409). return false } - if t.goos == "android" { + if goos == "android" { return false } - if t.goos == "darwin" && (t.goarch == "arm" || t.goarch == "arm64") { + if goos == "darwin" && (goarch == "arm" || goarch == "arm64") { return false } // Internally linking cgo is incomplete on some architectures. // https://golang.org/issue/10373 // https://golang.org/issue/14449 - if t.goarch == "arm64" || t.goarch == "mips64" || t.goarch == "mips64le" || t.goarch == "mips" || t.goarch == "mipsle" { + if goarch == "arm64" || goarch == "mips64" || goarch == "mips64le" || goarch == "mips" || goarch == "mipsle" { return false } if isAlpineLinux() { @@ -809,7 +891,7 @@ func (t *tester) internalLink() bool { } func (t *tester) supportedBuildmode(mode string) bool { - pair := t.goos + "-" + t.goarch + pair := goos + "-" + goarch switch mode { case "c-archive": if !t.extLink() { @@ -817,16 +899,17 @@ func (t *tester) supportedBuildmode(mode string) bool { } switch pair { case "darwin-386", "darwin-amd64", "darwin-arm", "darwin-arm64", - "linux-amd64", "linux-386", "linux-ppc64le", + "linux-amd64", "linux-386", "linux-ppc64le", "linux-s390x", "windows-amd64", "windows-386": return true } return false case "c-shared": switch pair { - case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", + case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x", "darwin-amd64", "darwin-386", - "android-arm", "android-arm64", "android-386": + "android-arm", "android-arm64", "android-386", + "windows-amd64", "windows-386": return true } return false @@ -837,21 +920,25 @@ func (t *tester) supportedBuildmode(mode string) bool { } return false case "plugin": - if os.Getenv("GO_BUILDER_NAME") == "linux-amd64-noopt" { - // Skip the plugin tests on noopt. They're - // causing build failures potentially - // obscuring other issues. This is hopefully a - // temporary workaround. See golang.org/issue/17937. - return false - } - // linux-arm64 is missing because it causes the external linker // to crash, see https://golang.org/issue/17138 switch pair { - case "linux-386", "linux-amd64", "linux-arm", "linux-s390x": + case "linux-386", "linux-amd64", "linux-arm", "linux-s390x", "linux-ppc64le": + return true + case "darwin-amd64": return true } return false + case "pie": + switch pair { + case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x", + "android-amd64", "android-arm", "android-arm64", "android-386": + return true + case "darwin-amd64": + return true + } + return false + default: log.Fatalf("internal error: unknown buildmode %s", mode) return false @@ -864,15 +951,17 @@ func (t *tester) registerHostTest(name, heading, dir, pkg string) { heading: heading, fn: func(dt *distTest) error { t.runPending(dt) + timelog("start", name) + defer timelog("end", name) return t.runHostTest(dir, pkg) }, }) } func (t *tester) runHostTest(dir, pkg string) error { - defer os.Remove(filepath.Join(t.goroot, dir, "test.test")) - cmd := t.dirCmd(dir, "go", "test", t.tags(), "-c", "-o", "test.test", pkg) - cmd.Env = append(os.Environ(), "GOARCH="+t.gohostarch, "GOOS="+t.gohostos) + defer os.Remove(filepath.Join(goroot, dir, "test.test")) + cmd := t.dirCmd(dir, t.goTest(), "-c", "-o", "test.test", pkg) + cmd.Env = append(os.Environ(), "GOARCH="+gohostarch, "GOOS="+gohostos) if err := cmd.Run(); err != nil { return err } @@ -880,17 +969,13 @@ func (t *tester) runHostTest(dir, pkg string) error { } func (t *tester) cgoTest(dt *distTest) error { - env := append(os.Environ(), "GOTRACEBACK=2") - - cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", t.tags(), "-ldflags", "-linkmode=auto", t.runFlag("")) - cmd.Env = env + t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=auto") if t.internalLink() { - cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=internal", t.runFlag("")) - cmd.Env = env + t.addCmd(dt, "misc/cgo/test", t.goTest(), "-tags=internal", "-ldflags", "-linkmode=internal") } - pair := t.gohostos + "-" + t.goarch + pair := gohostos + "-" + goarch switch pair { case "darwin-386", "darwin-amd64", "openbsd-386", "openbsd-amd64", @@ -899,24 +984,17 @@ func (t *tester) cgoTest(dt *distTest) error { if !t.extLink() { break } - cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=external") - cmd.Env = env - cmd = t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=external -s") - cmd.Env = env + t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external") + t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external -s") case "android-arm", - "dragonfly-386", "dragonfly-amd64", + "dragonfly-amd64", "freebsd-386", "freebsd-amd64", "freebsd-arm", "linux-386", "linux-amd64", "linux-arm", "linux-ppc64le", "linux-s390x", "netbsd-386", "netbsd-amd64": - cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=external") - cmd.Env = env - - cmd = t.addCmd(dt, "misc/cgo/testtls", "go", "test", "-ldflags", "-linkmode=auto") - cmd.Env = env - - cmd = t.addCmd(dt, "misc/cgo/testtls", "go", "test", "-ldflags", "-linkmode=external") - cmd.Env = env + t.addCmd(dt, "misc/cgo/test", t.goTest(), "-ldflags", "-linkmode=external") + t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=auto") + t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", "-linkmode=external") switch pair { case "netbsd-386", "netbsd-amd64": @@ -928,49 +1006,26 @@ func (t *tester) cgoTest(dt *distTest) error { // static linking on FreeBSD/ARM with clang. (cgo depends on // -fPIC fundamentally.) default: - cc := mustEnv("CC") cmd := t.dirCmd("misc/cgo/test", - cc, "-xc", "-o", "/dev/null", "-static", "-") - cmd.Env = env + compilerEnvLookup(defaultcc, goos, goarch), "-xc", "-o", "/dev/null", "-static", "-") cmd.Stdin = strings.NewReader("int main() {}") if err := cmd.Run(); err != nil { fmt.Println("No support for static linking found (lacks libc.a?), skip cgo static linking test.") } else { - if t.goos != "android" { - cmd = t.addCmd(dt, "misc/cgo/testtls", "go", "test", "-ldflags", `-linkmode=external -extldflags "-static -pthread"`) - cmd.Env = env + if goos != "android" { + t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-ldflags", `-linkmode=external -extldflags "-static -pthread"`) } - - cmd = t.addCmd(dt, "misc/cgo/nocgo", "go", "test") - cmd.Env = env - - cmd = t.addCmd(dt, "misc/cgo/nocgo", "go", "test", "-ldflags", `-linkmode=external`) - cmd.Env = env - - if t.goos != "android" { - cmd = t.addCmd(dt, "misc/cgo/nocgo", "go", "test", "-ldflags", `-linkmode=external -extldflags "-static -pthread"`) - cmd.Env = env + t.addCmd(dt, "misc/cgo/nocgo", t.goTest()) + t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-ldflags", `-linkmode=external`) + if goos != "android" { + t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-ldflags", `-linkmode=external -extldflags "-static -pthread"`) } } - if pair != "freebsd-amd64" { // clang -pie fails to link misc/cgo/test - cmd := t.dirCmd("misc/cgo/test", - cc, "-xc", "-o", "/dev/null", "-pie", "-") - cmd.Env = env - cmd.Stdin = strings.NewReader("int main() {}") - if err := cmd.Run(); err != nil { - fmt.Println("No support for -pie found, skip cgo PIE test.") - } else { - cmd = t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", `-linkmode=external -extldflags "-pie"`) - cmd.Env = env - - cmd = t.addCmd(dt, "misc/cgo/testtls", "go", "test", "-ldflags", `-linkmode=external -extldflags "-pie"`) - cmd.Env = env - - cmd = t.addCmd(dt, "misc/cgo/nocgo", "go", "test", "-ldflags", `-linkmode=external -extldflags "-pie"`) - cmd.Env = env - - } + if t.supportedBuildmode("pie") { + t.addCmd(dt, "misc/cgo/test", t.goTest(), "-buildmode=pie") + t.addCmd(dt, "misc/cgo/testtls", t.goTest(), "-buildmode=pie") + t.addCmd(dt, "misc/cgo/nocgo", t.goTest(), "-buildmode=pie") } } } @@ -985,6 +1040,7 @@ func (t *tester) cgoTest(dt *distTest) error { // running in parallel with earlier tests, or if it has some other reason // for needing the earlier tests to be done. func (t *tester) runPending(nextTest *distTest) { + checkNotStale("go", "std", "cmd") worklist := t.worklist t.worklist = nil for _, w := range worklist { @@ -992,10 +1048,13 @@ func (t *tester) runPending(nextTest *distTest) { w.end = make(chan bool) go func(w *work) { if !<-w.start { + timelog("skip", w.dt.name) w.out = []byte(fmt.Sprintf("skipped due to earlier error\n")) } else { + timelog("start", w.dt.name) w.out, w.err = w.cmd.CombinedOutput() } + timelog("end", w.dt.name) w.end <- true }(w) } @@ -1034,6 +1093,7 @@ func (t *tester) runPending(nextTest *distTest) { log.Printf("Failed: %v", w.err) t.failed = true } + checkNotStale("go", "std", "cmd") } if t.failed && !t.keepGoing { log.Fatal("FAILED") @@ -1051,15 +1111,15 @@ func (t *tester) runPending(nextTest *distTest) { } func (t *tester) cgoTestSOSupported() bool { - if t.goos == "android" || t.iOS() { + if goos == "android" || t.iOS() { // No exec facility on Android or iOS. return false } - if t.goarch == "ppc64" { + if goarch == "ppc64" { // External linking not implemented on ppc64 (issue #8912). return false } - if t.goarch == "mips64le" || t.goarch == "mips64" { + if goarch == "mips64le" || goarch == "mips64" { // External linking not implemented on mips64. return false } @@ -1069,7 +1129,10 @@ func (t *tester) cgoTestSOSupported() bool { func (t *tester) cgoTestSO(dt *distTest, testpath string) error { t.runPending(dt) - dir := filepath.Join(t.goroot, testpath) + timelog("start", dt.name) + defer timelog("end", dt.name) + + dir := filepath.Join(goroot, testpath) // build shared object output, err := exec.Command("go", "env", "CC").Output() @@ -1088,7 +1151,7 @@ func (t *tester) cgoTestSO(dt *distTest, testpath string) error { ext := "so" args := append(gogccflags, "-shared") - switch t.goos { + switch goos { case "darwin": ext = "dylib" args = append(args, "-undefined", "suppress", "-flat_namespace") @@ -1099,7 +1162,7 @@ func (t *tester) cgoTestSO(dt *distTest, testpath string) error { sofname := "libcgosotest." + ext args = append(args, "-o", sofname, "cgoso_c.c") - if err := t.dirCmd(dir, cc, args...).Run(); err != nil { + if err := t.dirCmd(dir, cc, args).Run(); err != nil { return err } defer os.Remove(filepath.Join(dir, sofname)) @@ -1110,16 +1173,16 @@ func (t *tester) cgoTestSO(dt *distTest, testpath string) error { defer os.Remove(filepath.Join(dir, "main.exe")) cmd := t.dirCmd(dir, "./main.exe") - if t.goos != "windows" { + if goos != "windows" { s := "LD_LIBRARY_PATH" - if t.goos == "darwin" { + if goos == "darwin" { s = "DYLD_LIBRARY_PATH" } cmd.Env = append(os.Environ(), s+"=.") // On FreeBSD 64-bit architectures, the 32-bit linker looks for // different environment variables. - if t.goos == "freebsd" && t.gohostarch == "386" { + if goos == "freebsd" && gohostarch == "386" { cmd.Env = append(cmd.Env, "LD_32_LIBRARY_PATH=.") } } @@ -1127,7 +1190,7 @@ func (t *tester) cgoTestSO(dt *distTest, testpath string) error { } func (t *tester) hasBash() bool { - switch t.gohostos { + switch gohostos { case "windows", "plan9": return false } @@ -1135,11 +1198,11 @@ func (t *tester) hasBash() bool { } func (t *tester) raceDetectorSupported() bool { - switch t.gohostos { + switch gohostos { case "linux", "darwin", "freebsd", "windows": // The race detector doesn't work on Alpine Linux: // golang.org/issue/14481 - return t.cgoEnabled && t.goarch == "amd64" && t.gohostos == t.goos && !isAlpineLinux() + return t.cgoEnabled && goarch == "amd64" && gohostos == goos && !isAlpineLinux() } return false } @@ -1160,21 +1223,21 @@ func (t *tester) runFlag(rx string) string { } func (t *tester) raceTest(dt *distTest) error { - t.addCmd(dt, "src", "go", "test", "-race", "-i", "runtime/race", "flag", "os", "os/exec") - t.addCmd(dt, "src", "go", "test", "-race", t.runFlag("Output"), "runtime/race") - t.addCmd(dt, "src", "go", "test", "-race", "-short", t.runFlag("TestParse|TestEcho|TestStdinCloseRace|TestClosedPipeRace"), "flag", "os", "os/exec") + t.addCmd(dt, "src", t.goTest(), "-race", "-i", "runtime/race", "flag", "os", "os/exec") + t.addCmd(dt, "src", t.goTest(), "-race", t.runFlag("Output"), "runtime/race") + t.addCmd(dt, "src", t.goTest(), "-race", t.runFlag("TestParse|TestEcho|TestStdinCloseRace|TestClosedPipeRace"), "flag", "os", "os/exec") // We don't want the following line, because it // slows down all.bash (by 10 seconds on my laptop). // The race builder should catch any error here, but doesn't. // TODO(iant): Figure out how to catch this. - // t.addCmd(dt, "src", "go", "test", "-race", "-run=TestParallelTest", "cmd/go") + // t.addCmd(dt, "src", t.goTest(), "-race", "-run=TestParallelTest", "cmd/go") if t.cgoEnabled { - cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-race", "-short", t.runFlag("")) + cmd := t.addCmd(dt, "misc/cgo/test", t.goTest(), "-race") cmd.Env = append(os.Environ(), "GOTRACEBACK=2") } if t.extLink() { // Test with external linking; see issue 9133. - t.addCmd(dt, "src", "go", "test", "-race", "-short", "-ldflags=-linkmode=external", t.runFlag("TestParse|TestEcho|TestStdinCloseRace"), "flag", "os/exec") + t.addCmd(dt, "src", t.goTest(), "-race", "-ldflags=-linkmode=external", t.runFlag("TestParse|TestEcho|TestStdinCloseRace"), "flag", "os/exec") } return nil } @@ -1189,7 +1252,7 @@ func (t *tester) testDirTest(dt *distTest, shard, shards int) error { runtest.Do(func() { const exe = "runtest.exe" // named exe for Windows, but harmless elsewhere cmd := t.dirCmd("test", "go", "build", "-o", exe, "run.go") - cmd.Env = append(os.Environ(), "GOOS="+t.gohostos, "GOARCH="+t.gohostarch) + cmd.Env = append(os.Environ(), "GOOS="+gohostos, "GOARCH="+gohostarch) runtest.exe = filepath.Join(cmd.Dir, exe) if err := cmd.Run(); err != nil { runtest.err = err @@ -1229,7 +1292,7 @@ var funcBenchmark = []byte("\nfunc Benchmark") // second or two per package, and this function returns false for // about 100 packages. func (t *tester) packageHasBenchmarks(pkg string) bool { - pkgDir := filepath.Join(t.goroot, "src", pkg) + pkgDir := filepath.Join(goroot, "src", pkg) d, err := os.Open(pkgDir) if err != nil { return true // conservatively diff --git a/src/cmd/dist/util.go b/src/cmd/dist/util.go index 511978f2f57..7e27bbb0644 100644 --- a/src/cmd/dist/util.go +++ b/src/cmd/dist/util.go @@ -6,13 +6,13 @@ package main import ( "bytes" + "flag" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" - "runtime" "sort" "strconv" "strings" @@ -51,18 +51,6 @@ func uniq(list []string) []string { return keep } -// splitlines returns a slice with the result of splitting -// the input p after each \n. -func splitlines(p string) []string { - return strings.SplitAfter(p, "\n") -} - -// splitfields replaces the vector v with the result of splitting -// the input p into non-empty fields containing no spaces. -func splitfields(p string) []string { - return strings.Fields(p) -} - const ( CheckExit = 1 << iota ShowOutput @@ -74,7 +62,7 @@ var outputLock sync.Mutex // run runs the command line cmd in dir. // If mode has ShowOutput set and Background unset, run passes cmd's output to // stdout/stderr directly. Otherwise, run returns cmd's output as a string. -// If mode has CheckExit set and the command fails, run calls fatal. +// If mode has CheckExit set and the command fails, run calls fatalf. // If mode has Background set, this command is being run as a // Background job. Only bgrun should use the Background mode, // not other callers. @@ -109,11 +97,11 @@ func run(dir string, mode int, cmd ...string) string { } outputLock.Unlock() if mode&Background != 0 { - // Prevent fatal from waiting on our own goroutine's + // Prevent fatalf from waiting on our own goroutine's // bghelper to exit: bghelpers.Done() } - fatal("FAILED: %v: %v", strings.Join(cmd, " "), err) + fatalf("FAILED: %v: %v", strings.Join(cmd, " "), err) } if mode&ShowOutput != 0 { outputLock.Lock() @@ -191,7 +179,7 @@ func bgwait(wg *sync.WaitGroup) { func xgetwd() string { wd, err := os.Getwd() if err != nil { - fatal("%s", err) + fatalf("%s", err) } return wd } @@ -201,11 +189,11 @@ func xgetwd() string { func xrealwd(path string) string { old := xgetwd() if err := os.Chdir(path); err != nil { - fatal("chdir %s: %v", path, err) + fatalf("chdir %s: %v", path, err) } real := xgetwd() if err := os.Chdir(old); err != nil { - fatal("chdir %s: %v", old, err) + fatalf("chdir %s: %v", old, err) } return real } @@ -231,16 +219,11 @@ func mtime(p string) time.Time { return fi.ModTime() } -// isabs reports whether p is an absolute path. -func isabs(p string) bool { - return filepath.IsAbs(p) -} - // readfile returns the content of the named file. func readfile(file string) string { data, err := ioutil.ReadFile(file) if err != nil { - fatal("%v", err) + fatalf("%v", err) } return string(data) } @@ -250,12 +233,12 @@ const ( writeSkipSame ) -// writefile writes b to the named file, creating it if needed. +// writefile writes text to the named file, creating it if needed. // if exec is non-zero, marks the file as executable. // If the file already exists and has the expected content, // it is not rewritten, to avoid changing the time stamp. -func writefile(b, file string, flag int) { - new := []byte(b) +func writefile(text, file string, flag int) { + new := []byte(text) if flag&writeSkipSame != 0 { old, err := ioutil.ReadFile(file) if err == nil && bytes.Equal(old, new) { @@ -268,7 +251,7 @@ func writefile(b, file string, flag int) { } err := ioutil.WriteFile(file, new, mode) if err != nil { - fatal("%v", err) + fatalf("%v", err) } } @@ -276,7 +259,7 @@ func writefile(b, file string, flag int) { func xmkdir(p string) { err := os.Mkdir(p, 0777) if err != nil { - fatal("%v", err) + fatalf("%v", err) } } @@ -284,7 +267,7 @@ func xmkdir(p string) { func xmkdirall(p string) { err := os.MkdirAll(p, 0777) if err != nil { - fatal("%v", err) + fatalf("%v", err) } } @@ -309,12 +292,12 @@ func xremoveall(p string) { func xreaddir(dir string) []string { f, err := os.Open(dir) if err != nil { - fatal("%v", err) + fatalf("%v", err) } defer f.Close() names, err := f.Readdirnames(-1) if err != nil { - fatal("reading %s: %v", dir, err) + fatalf("reading %s: %v", dir, err) } return names } @@ -324,12 +307,12 @@ func xreaddir(dir string) []string { func xreaddirfiles(dir string) []string { f, err := os.Open(dir) if err != nil { - fatal("%v", err) + fatalf("%v", err) } defer f.Close() infos, err := f.Readdir(-1) if err != nil { - fatal("reading %s: %v", dir, err) + fatalf("reading %s: %v", dir, err) } var names []string for _, fi := range infos { @@ -343,15 +326,15 @@ func xreaddirfiles(dir string) []string { // xworkdir creates a new temporary directory to hold object files // and returns the name of that directory. func xworkdir() string { - name, err := ioutil.TempDir("", "go-tool-dist-") + name, err := ioutil.TempDir(os.Getenv("GOTMPDIR"), "go-tool-dist-") if err != nil { - fatal("%v", err) + fatalf("%v", err) } return name } -// fatal prints an error message to standard error and exits. -func fatal(format string, args ...interface{}) { +// fatalf prints an error message to standard error and exits. +func fatalf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, "go tool dist: %s\n", fmt.Sprintf(format, args...)) dieOnce.Do(func() { close(dying) }) @@ -389,127 +372,6 @@ func errprintf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) } -// main takes care of OS-specific startup and dispatches to xmain. -func main() { - os.Setenv("TERM", "dumb") // disable escape codes in clang errors - - // provide -check-armv6k first, before checking for $GOROOT so that - // it is possible to run this check without having $GOROOT available. - if len(os.Args) > 1 && os.Args[1] == "-check-armv6k" { - useARMv6K() // might fail with SIGILL - println("ARMv6K supported.") - os.Exit(0) - } - - slash = string(filepath.Separator) - - gohostos = runtime.GOOS - switch gohostos { - case "darwin": - // Even on 64-bit platform, darwin uname -m prints i386. - // We don't support any of the OS X versions that run on 32-bit-only hardware anymore. - gohostarch = "amd64" - case "freebsd": - // Since FreeBSD 10 gcc is no longer part of the base system. - defaultclang = true - case "solaris": - // Even on 64-bit platform, solaris uname -m prints i86pc. - out := run("", CheckExit, "isainfo", "-n") - if strings.Contains(out, "amd64") { - gohostarch = "amd64" - } - if strings.Contains(out, "i386") { - gohostarch = "386" - } - case "plan9": - gohostarch = os.Getenv("objtype") - if gohostarch == "" { - fatal("$objtype is unset") - } - case "windows": - exe = ".exe" - } - - sysinit() - - if gohostarch == "" { - // Default Unix system. - out := run("", CheckExit, "uname", "-m") - switch { - case strings.Contains(out, "x86_64"), strings.Contains(out, "amd64"): - gohostarch = "amd64" - case strings.Contains(out, "86"): - gohostarch = "386" - case strings.Contains(out, "arm"): - gohostarch = "arm" - case strings.Contains(out, "aarch64"): - gohostarch = "arm64" - case strings.Contains(out, "ppc64le"): - gohostarch = "ppc64le" - case strings.Contains(out, "ppc64"): - gohostarch = "ppc64" - case strings.Contains(out, "mips64"): - gohostarch = "mips64" - if elfIsLittleEndian(os.Args[0]) { - gohostarch = "mips64le" - } - case strings.Contains(out, "mips"): - gohostarch = "mips" - if elfIsLittleEndian(os.Args[0]) { - gohostarch = "mipsle" - } - case strings.Contains(out, "s390x"): - gohostarch = "s390x" - case gohostos == "darwin": - if strings.Contains(run("", CheckExit, "uname", "-v"), "RELEASE_ARM_") { - gohostarch = "arm" - } - default: - fatal("unknown architecture: %s", out) - } - } - - if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" { - maxbg = min(maxbg, runtime.NumCPU()) - } - bginit() - - // The OS X 10.6 linker does not support external linking mode. - // See golang.org/issue/5130. - // - // OS X 10.6 does not work with clang either, but OS X 10.9 requires it. - // It seems to work with OS X 10.8, so we default to clang for 10.8 and later. - // See golang.org/issue/5822. - // - // Roughly, OS X 10.N shows up as uname release (N+4), - // so OS X 10.6 is uname version 10 and OS X 10.8 is uname version 12. - if gohostos == "darwin" { - rel := run("", CheckExit, "uname", "-r") - if i := strings.Index(rel, "."); i >= 0 { - rel = rel[:i] - } - osx, _ := strconv.Atoi(rel) - if osx <= 6+4 { - goextlinkenabled = "0" - } - if osx >= 8+4 { - defaultclang = true - } - } - - if len(os.Args) > 1 && os.Args[1] == "-check-goarm" { - useVFPv1() // might fail with SIGILL - println("VFPv1 OK.") - useVFPv3() // might fail with SIGILL - println("VFPv3 OK.") - os.Exit(0) - } - - xinit() - xmain() - xexit(0) -} - // xsamefile reports whether f1 and f2 are the same file (or dir) func xsamefile(f1, f2 string) bool { fi1, err1 := os.Stat(f1) @@ -570,17 +432,17 @@ func elfIsLittleEndian(fn string) bool { // debug/elf package. file, err := os.Open(fn) if err != nil { - fatal("failed to open file to determine endianness: %v", err) + fatalf("failed to open file to determine endianness: %v", err) } defer file.Close() var hdr [16]byte if _, err := io.ReadFull(file, hdr[:]); err != nil { - fatal("failed to read ELF header to determine endianness: %v", err) + fatalf("failed to read ELF header to determine endianness: %v", err) } // hdr[5] is EI_DATA byte, 1 is ELFDATA2LSB and 2 is ELFDATA2MSB switch hdr[5] { default: - fatal("unknown ELF endianness of %s: EI_DATA = %d", fn, hdr[5]) + fatalf("unknown ELF endianness of %s: EI_DATA = %d", fn, hdr[5]) case 1: return true case 2: @@ -588,3 +450,40 @@ func elfIsLittleEndian(fn string) bool { } panic("unreachable") } + +// count is a flag.Value that is like a flag.Bool and a flag.Int. +// If used as -name, it increments the count, but -name=x sets the count. +// Used for verbose flag -v. +type count int + +func (c *count) String() string { + return fmt.Sprint(int(*c)) +} + +func (c *count) Set(s string) error { + switch s { + case "true": + *c++ + case "false": + *c = 0 + default: + n, err := strconv.Atoi(s) + if err != nil { + return fmt.Errorf("invalid count %q", s) + } + *c = count(n) + } + return nil +} + +func (c *count) IsBoolFlag() bool { + return true +} + +func xflagparse(maxargs int) { + flag.Var((*count)(&vflag), "v", "verbosity") + flag.Parse() + if maxargs >= 0 && flag.NArg() > maxargs { + flag.Usage() + } +} diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go index 89282529983..ee7c430cbd9 100644 --- a/src/cmd/doc/doc_test.go +++ b/src/cmd/doc/doc_test.go @@ -445,6 +445,19 @@ var tests = []test{ `CaseMatch`, }, }, + + // No dups with -u. Issue 21797. + { + "case matching on, no dups", + []string{"-u", p, `duplicate`}, + []string{ + `Duplicate`, + `duplicate`, + }, + []string{ + "\\)\n+const", // This will appear if the const decl appears twice. + }, + }, } func TestDoc(t *testing.T) { @@ -541,6 +554,54 @@ func TestMultiplePackages(t *testing.T) { } } +// Test the code to look up packages when given two args. First test case is +// go doc binary BigEndian +// This needs to find encoding/binary.BigEndian, which means +// finding the package encoding/binary given only "binary". +// Second case is +// go doc rand Float64 +// which again needs to find math/rand and not give up after crypto/rand, +// which has no such function. +func TestTwoArgLookup(t *testing.T) { + if testing.Short() { + t.Skip("scanning file system takes too long") + } + maybeSkip(t) + var b bytes.Buffer // We don't care about the output. + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"binary", "BigEndian"}) + if err != nil { + t.Errorf("unexpected error %q from binary BigEndian", err) + } + } + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"rand", "Float64"}) + if err != nil { + t.Errorf("unexpected error %q from rand Float64", err) + } + } + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"bytes", "Foo"}) + if err == nil { + t.Errorf("expected error from bytes Foo") + } else if !strings.Contains(err.Error(), "no symbol Foo") { + t.Errorf("unexpected error %q from bytes Foo", err) + } + } + { + var flagSet flag.FlagSet + err := do(&b, &flagSet, []string{"nosuchpackage", "Foo"}) + if err == nil { + // actually present in the user's filesystem + } else if !strings.Contains(err.Error(), "no such package") { + t.Errorf("unexpected error %q from nosuchpackage Foo", err) + } + } +} + type trimTest struct { path string prefix string diff --git a/src/cmd/doc/main.go b/src/cmd/doc/main.go index 76c7dba2d95..809a719a58b 100644 --- a/src/cmd/doc/main.go +++ b/src/cmd/doc/main.go @@ -93,6 +93,9 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) { if i > 0 && !more { // Ignore the "more" bit on the first iteration. return failMessage(paths, symbol, method) } + if buildPackage == nil { + return fmt.Errorf("no such package: %s", userPath) + } symbol, method = parseSymbol(sym) pkg := parsePackage(writer, buildPackage, userPath) paths = append(paths, pkg.prettyPath()) @@ -176,12 +179,12 @@ func parseArgs(args []string) (pkg *build.Package, path, symbol string, more boo case 1: // Done below. case 2: - // Package must be importable. - pkg, err := build.Import(args[0], "", build.ImportComment) - if err != nil { - log.Fatalf("%s", err) + // Package must be findable and importable. + packagePath, ok := findPackage(args[0]) + if !ok { + return nil, args[0], args[1], false } - return pkg, args[0], args[1], false + return importDir(packagePath), args[0], args[1], true } // Usual case: one argument. arg := args[0] @@ -230,7 +233,6 @@ func parseArgs(args []string) (pkg *build.Package, path, symbol string, more boo } // See if we have the basename or tail of a package, as in json for encoding/json // or ivy/value for robpike.io/ivy/value. - // Launch findPackage as a goroutine so it can return multiple paths if required. path, ok := findPackage(arg[0:period]) if ok { return importDir(path), arg[0:period], symbol, true diff --git a/src/cmd/doc/pkg.go b/src/cmd/doc/pkg.go index 5a14d6e7cf5..99a00c56326 100644 --- a/src/cmd/doc/pkg.go +++ b/src/cmd/doc/pkg.go @@ -594,6 +594,11 @@ func (pkg *Package) symbolDoc(symbol string) bool { // Constants and variables behave the same. values := pkg.findValues(symbol, pkg.doc.Consts) values = append(values, pkg.findValues(symbol, pkg.doc.Vars)...) + // A declaration like + // const ( c = 1; C = 2 ) + // could be printed twice if the -u flag is set, as it matches twice. + // So we remember which declarations we've printed to avoid duplication. + printed := make(map[*ast.GenDecl]bool) for _, value := range values { // Print each spec only if there is at least one exported symbol in it. // (See issue 11008.) @@ -628,7 +633,7 @@ func (pkg *Package) symbolDoc(symbol string) bool { } } } - if len(specs) == 0 { + if len(specs) == 0 || printed[value.Decl] { continue } value.Decl.Specs = specs @@ -636,6 +641,7 @@ func (pkg *Package) symbolDoc(symbol string) bool { pkg.packageClause(true) } pkg.emit(value.Doc, value.Decl) + printed[value.Decl] = true found = true } // Types. diff --git a/src/cmd/doc/testdata/pkg.go b/src/cmd/doc/testdata/pkg.go index 99755b11c56..d0995bbf7d2 100644 --- a/src/cmd/doc/testdata/pkg.go +++ b/src/cmd/doc/testdata/pkg.go @@ -193,3 +193,8 @@ var LongLine = newLongLine( type T2 int type T1 = T2 + +const ( + Duplicate = iota + duplicate +) diff --git a/src/cmd/fix/cftype.go b/src/cmd/fix/cftype.go new file mode 100644 index 00000000000..da1627fbfb4 --- /dev/null +++ b/src/cmd/fix/cftype.go @@ -0,0 +1,93 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "go/ast" + "go/token" + "reflect" + "strings" +) + +func init() { + register(cftypeFix) +} + +var cftypeFix = fix{ + name: "cftype", + date: "2017-09-27", + f: cftypefix, + desc: `Fixes initializers of C.CF*Ptr types`, + disabled: false, +} + +// Old state: +// type CFTypeRef unsafe.Pointer +// New state: +// type CFTypeRef uintptr +// and similar for other CF*Ref types. +// This fix finds nils initializing these types and replaces the nils with 0s. +func cftypefix(f *ast.File) bool { + if !imports(f, "C") { + return false + } + typeof, _ := typecheck(&TypeConfig{}, f) + + // step 1: Find all the nils with the offending types. + // Compute their replacement. + badNils := map[interface{}]ast.Expr{} + walk(f, func(n interface{}) { + if i, ok := n.(*ast.Ident); ok && i.Name == "nil" && badPointerType(typeof[n]) { + badNils[n] = &ast.BasicLit{ValuePos: i.NamePos, Kind: token.INT, Value: "0"} + } + }) + if len(badNils) == 0 { + return false + } + + // step 2: find all uses of the bad nils, replace them with 0. + // There's no easy way to map from an ast.Expr to all the places that use them, so + // we use reflect to find all such references. + exprType := reflect.TypeOf((*ast.Expr)(nil)).Elem() + exprSliceType := reflect.TypeOf(([]ast.Expr)(nil)) + walk(f, func(n interface{}) { + if n == nil { + return + } + v := reflect.ValueOf(n) + if v.Type().Kind() != reflect.Ptr { + return + } + if v.IsNil() { + return + } + v = v.Elem() + if v.Type().Kind() != reflect.Struct { + return + } + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + if f.Type() == exprType { + if r := badNils[f.Interface()]; r != nil { + f.Set(reflect.ValueOf(r)) + } + } + if f.Type() == exprSliceType { + for j := 0; j < f.Len(); j++ { + e := f.Index(j) + if r := badNils[e.Interface()]; r != nil { + e.Set(reflect.ValueOf(r)) + } + } + } + } + }) + + return true +} + +func badPointerType(s string) bool { + return strings.HasPrefix(s, "C.CF") && strings.HasSuffix(s, "Ref") +} diff --git a/src/cmd/fix/cftype_test.go b/src/cmd/fix/cftype_test.go new file mode 100644 index 00000000000..adaed2114f1 --- /dev/null +++ b/src/cmd/fix/cftype_test.go @@ -0,0 +1,185 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func init() { + addTestCases(cftypeTests, cftypefix) +} + +var cftypeTests = []testCase{ + { + Name: "cftype.localVariable", + In: `package main + +import "C" + +func f() { + var x C.CFTypeRef = nil + x = nil + x, x = nil, nil +} +`, + Out: `package main + +import "C" + +func f() { + var x C.CFTypeRef = 0 + x = 0 + x, x = 0, 0 +} +`, + }, + { + Name: "cftype.globalVariable", + In: `package main + +import "C" + +var x C.CFTypeRef = nil + +func f() { + x = nil +} +`, + Out: `package main + +import "C" + +var x C.CFTypeRef = 0 + +func f() { + x = 0 +} +`, + }, + { + Name: "cftype.EqualArgument", + In: `package main + +import "C" + +var x C.CFTypeRef +var y = x == nil +var z = x != nil +`, + Out: `package main + +import "C" + +var x C.CFTypeRef +var y = x == 0 +var z = x != 0 +`, + }, + { + Name: "cftype.StructField", + In: `package main + +import "C" + +type T struct { + x C.CFTypeRef +} + +var t = T{x: nil} +`, + Out: `package main + +import "C" + +type T struct { + x C.CFTypeRef +} + +var t = T{x: 0} +`, + }, + { + Name: "cftype.FunctionArgument", + In: `package main + +import "C" + +func f(x C.CFTypeRef) { +} + +func g() { + f(nil) +} +`, + Out: `package main + +import "C" + +func f(x C.CFTypeRef) { +} + +func g() { + f(0) +} +`, + }, + { + Name: "cftype.ArrayElement", + In: `package main + +import "C" + +var x = [3]C.CFTypeRef{nil, nil, nil} +`, + Out: `package main + +import "C" + +var x = [3]C.CFTypeRef{0, 0, 0} +`, + }, + { + Name: "cftype.SliceElement", + In: `package main + +import "C" + +var x = []C.CFTypeRef{nil, nil, nil} +`, + Out: `package main + +import "C" + +var x = []C.CFTypeRef{0, 0, 0} +`, + }, + { + Name: "cftype.MapKey", + In: `package main + +import "C" + +var x = map[C.CFTypeRef]int{nil: 0} +`, + Out: `package main + +import "C" + +var x = map[C.CFTypeRef]int{0: 0} +`, + }, + { + Name: "cftype.MapValue", + In: `package main + +import "C" + +var x = map[int]C.CFTypeRef{0: nil} +`, + Out: `package main + +import "C" + +var x = map[int]C.CFTypeRef{0: 0} +`, + }, +} diff --git a/src/cmd/fix/context.go b/src/cmd/fix/context.go index 926a06cccf6..1107f4d66c0 100644 --- a/src/cmd/fix/context.go +++ b/src/cmd/fix/context.go @@ -17,7 +17,7 @@ var contextFix = fix{ date: "2016-09-09", f: ctxfix, desc: `Change imports of golang.org/x/net/context to context`, - disabled: true, + disabled: false, } func ctxfix(f *ast.File) bool { diff --git a/src/cmd/fix/typecheck.go b/src/cmd/fix/typecheck.go index 0352c49db0f..58d915869d6 100644 --- a/src/cmd/fix/typecheck.go +++ b/src/cmd/fix/typecheck.go @@ -498,6 +498,50 @@ func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, a // T{...} has type T. typeof[n] = gofmt(n.Type) + // Propagate types down to values used in the composite literal. + t := expand(typeof[n]) + if strings.HasPrefix(t, "[") { // array or slice + // Lazy: assume there are no nested [] in the array length. + if i := strings.Index(t, "]"); i >= 0 { + et := t[i+1:] + for _, e := range n.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + e = kv.Value + } + if typeof[e] == "" { + typeof[e] = et + } + } + } + } + if strings.HasPrefix(t, "map[") { // map + // Lazy: assume there are no nested [] in the map key type. + if i := strings.Index(t, "]"); i >= 0 { + kt, vt := t[4:i], t[i+1:] + for _, e := range n.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + if typeof[kv.Key] == "" { + typeof[kv.Key] = kt + } + if typeof[kv.Value] == "" { + typeof[kv.Value] = vt + } + } + } + } + } + if typ := cfg.Type[t]; typ != nil && len(typ.Field) > 0 { // struct + for _, e := range n.Elts { + if kv, ok := e.(*ast.KeyValueExpr); ok { + if ft := typ.Field[fmt.Sprintf("%s", kv.Key)]; ft != "" { + if typeof[kv.Value] == "" { + typeof[kv.Value] = ft + } + } + } + } + } + case *ast.ParenExpr: // (x) has type of x. typeof[n] = typeof[n.X] @@ -579,6 +623,18 @@ func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, a set(res[i], t[i], false) } } + + case *ast.BinaryExpr: + // Propagate types across binary ops that require two args of the same type. + switch n.Op { + case token.EQL, token.NEQ: // TODO: more cases. This is enough for the cftype fix. + if typeof[n.X] != "" && typeof[n.Y] == "" { + typeof[n.Y] = typeof[n.X] + } + if typeof[n.X] == "" && typeof[n.Y] != "" { + typeof[n.X] = typeof[n.Y] + } + } } } walkBeforeAfter(f, before, after) diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index 7dd7ba90f09..fd5b01c92a1 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -14,12 +14,12 @@ // The commands are: // // build compile packages and dependencies -// clean remove object files +// clean remove object files and cached files // doc show documentation for package or symbol // env print Go environment information // bug start a bug report -// fix run go tool fix on packages -// fmt run gofmt on package sources +// fix update packages to use new APIs +// fmt gofmt (reformat) package sources // generate generate Go files by processing source // get download and install packages and dependencies // install compile and install packages and dependencies @@ -28,7 +28,7 @@ // test test packages // tool run specified go tool // version print Go version -// vet run go tool vet on packages +// vet report likely mistakes in packages // // Use "go help [command]" for more information about a command. // @@ -104,15 +104,15 @@ // -x // print the commands. // -// -asmflags 'flag list' +// -asmflags '[pattern=]arg list' // arguments to pass on each go tool asm invocation. // -buildmode mode // build mode to use. See 'go help buildmode' for more. // -compiler name // name of compiler to use, as in runtime.Compiler (gccgo or gc). -// -gccgoflags 'arg list' +// -gccgoflags '[pattern=]arg list' // arguments to pass on each gccgo compiler/linker invocation. -// -gcflags 'arg list' +// -gcflags '[pattern=]arg list' // arguments to pass on each go tool compile invocation. // -installsuffix suffix // a suffix to use in the name of the package installation directory, @@ -121,7 +121,7 @@ // or, if set explicitly, has _race appended to it. Likewise for the -msan // flag. Using a -buildmode option that requires non-default compile flags // has a similar effect. -// -ldflags 'flag list' +// -ldflags '[pattern=]arg list' // arguments to pass on each go tool link invocation. // -linkshared // link against shared libraries previously created with @@ -139,9 +139,21 @@ // For example, instead of running asm, the go command will run // 'cmd args /path/to/asm '. // -// All the flags that take a list of arguments accept a space-separated -// list of strings. To embed spaces in an element in the list, surround -// it with either single or double quotes. +// The -asmflags, -gccgoflags, -gcflags, and -ldflags flags accept a +// space-separated list of arguments to pass to an underlying tool +// during the build. To embed spaces in an element in the list, surround +// it with either single or double quotes. The argument list may be +// preceded by a package pattern and an equal sign, which restricts +// the use of that argument list to the building of packages matching +// that pattern (see 'go help packages' for a description of package +// patterns). Without a pattern, the argument list applies only to the +// packages named on the command line. The flags may be repeated +// with different patterns in order to specify different arguments for +// different sets of packages. If a package matches patterns given in +// multiple flags, the latest match on the command line wins. +// For example, 'go build -gcflags=-S fmt' prints the disassembly +// only for package fmt, while 'go build -gcflags=all=-S fmt' +// prints the disassembly for fmt and all its dependencies. // // For more about specifying packages, see 'go help packages'. // For more about where packages and binaries are installed, @@ -158,11 +170,11 @@ // See also: go install, go get, go clean. // // -// Remove object files +// Remove object files and cached files // // Usage: // -// go clean [-i] [-r] [-n] [-x] [build flags] [packages] +// go clean [-i] [-r] [-n] [-x] [-cache] [-testcache] [build flags] [packages] // // Clean removes object files from package source directories. // The go command builds most objects in a temporary directory, @@ -200,6 +212,11 @@ // // The -x flag causes clean to print remove commands as it executes them. // +// The -cache flag causes clean to remove the entire go build cache. +// +// The -testcache flag causes clean to expire all test results in the +// go build cache. +// // For more about build flags, see 'go help build'. // // For more about specifying packages, see 'go help packages'. @@ -328,6 +345,8 @@ // The -json flag prints the environment in JSON format // instead of as a shell script. // +// For more about environment variables, see 'go help environment'. +// // // Start a bug report // @@ -339,7 +358,7 @@ // The report includes useful system information. // // -// Run go tool fix on packages +// Update packages to use new APIs // // Usage: // @@ -355,7 +374,7 @@ // See also: go fmt, go vet. // // -// Run gofmt on package sources +// Gofmt (reformat) package sources // // Usage: // @@ -543,10 +562,11 @@ // // Usage: // -// go install [build flags] [packages] +// go install [-i] [build flags] [packages] // -// Install compiles and installs the packages named by the import paths, -// along with their dependencies. +// Install compiles and installs the packages named by the import paths. +// +// The -i flag installs the dependencies of the named packages as well. // // For more about the build flags, see 'go help build'. // For more about specifying packages, see 'go help packages'. @@ -719,10 +739,10 @@ // // 'Go test' recompiles each package along with any files with names matching // the file pattern "*_test.go". -// Files whose names begin with "_" (including "_test.go") or "." are ignored. // These additional files can contain test functions, benchmark functions, and // example functions. See 'go help testfunc' for more. // Each listed package causes the execution of a separate test binary. +// Files whose names begin with "_" (including "_test.go") or "." are ignored. // // Test files that declare a package with the suffix "_test" will be compiled as a // separate package, and then linked and run with the main test binary. @@ -730,11 +750,46 @@ // The go tool will ignore a directory named "testdata", making it available // to hold ancillary data needed by the tests. // -// By default, go test needs no arguments. It compiles and tests the package -// with source in the current directory, including tests, and runs the tests. +// As part of building a test binary, go test runs go vet on the package +// and its test source files to identify significant problems. If go vet +// finds any problems, go test reports those and does not run the test binary. +// Only a high-confidence subset of the default go vet checks are used. +// To disable the running of go vet, use the -vet=off flag. // -// The package is built in a temporary directory so it does not interfere with the -// non-test installation. +// Go test runs in two different modes: local directory mode when invoked with +// no package arguments (for example, 'go test'), and package list mode when +// invoked with package arguments (for example 'go test math', 'go test ./...', +// and even 'go test .'). +// +// In local directory mode, go test compiles and tests the package sources +// found in the current directory and then runs the resulting test binary. +// In this mode, caching (discussed below) is disabled. After the package test +// finishes, go test prints a summary line showing the test status ('ok' or 'FAIL'), +// package name, and elapsed time. +// +// In package list mode, go test compiles and tests each of the packages +// listed on the command line. If a package test passes, go test prints only +// the final 'ok' summary line. If a package test fails, go test prints the +// full test output. If invoked with the -bench or -v flag, go test prints +// the full output even for passing package tests, in order to display the +// requested benchmark results or verbose logging. +// +// All test output and summary lines are printed to the go command's standard +// output, even if the test printed them to its own standard error. +// (The go command's standard error is reserved for printing errors building +// the tests.) +// +// In package list mode, go test also caches successful package test results. +// If go test has cached a previous test run using the same test binary and +// the same command line consisting entirely of cacheable test flags +// (defined as -cpu, -list, -parallel, -run, -short, and -v), +// go test will redisplay the previous output instead of running the test +// binary again. In the summary line, go test prints '(cached)' in place of +// the elapsed time. To disable test caching, use any test flag or argument +// other than the cacheable flags. The idiomatic way to disable test caching +// explicitly is to use -count=1. A cached result is treated as executing in +// no time at all, so a successful package test result will be cached and reused +// regardless of -timeout setting. // // In addition to the build flags, the flags handled by 'go test' itself are: // @@ -757,6 +812,10 @@ // Install packages that are dependencies of the test. // Do not run the test. // +// -json +// Convert test output to JSON suitable for automated processing. +// See 'go doc test2json' for the encoding details. +// // -o file // Compile the test binary to the named file. // The test still runs (unless -c or -i is specified). @@ -782,7 +841,7 @@ // The -n flag causes tool to print the command that would be // executed but not execute it. // -// For more about each tool command, see 'go tool command -h'. +// For more about each tool command, see 'go doc cmd/'. // // // Print Go version @@ -794,7 +853,7 @@ // Version prints the Go version, as reported by runtime.Version. // // -// Run go tool vet on packages +// Report likely mistakes in packages // // Usage: // @@ -808,7 +867,9 @@ // The -n flag prints commands that would be executed. // The -x flag prints commands as they are executed. // -// For more about build flags, see 'go help build'. +// The build flags supported by go vet are those that control package resolution +// and execution, such as -n, -x, -v, -tags, and -toolexec. +// For more about these flags, see 'go help build'. // // See also: go fmt, go fix. // @@ -917,8 +978,10 @@ // comment, indicating that the package sources are included // for documentation only and must not be used to build the // package binary. This enables distribution of Go packages in -// their compiled form alone. See the go/build package documentation -// for more details. +// their compiled form alone. Even binary-only packages require +// accurate import blocks listing required dependencies, so that +// those dependencies can be supplied when linking the resulting +// command. // // // GOPATH environment variable @@ -1096,6 +1159,12 @@ // See https://golang.org/doc/articles/race_detector.html. // GOROOT // The root of the go tree. +// GOTMPDIR +// The directory where the go command will write +// temporary source files, packages, and binaries. +// GOCACHE +// The directory where the go command will store +// cached information for reuse in future builds. // // Environment variables for use with cgo: // @@ -1130,6 +1199,9 @@ // GO386 // For GOARCH=386, the floating point instruction set. // Valid values are 387, sse2. +// GOMIPS +// For GOARCH=mips{,le}, whether to use floating point instructions. +// Valid values are hardfloat (default), softfloat. // // Special-purpose environment variables: // @@ -1457,10 +1529,10 @@ // significantly more expensive. // Sets -cover. // -// -coverpkg pkg1,pkg2,pkg3 -// Apply coverage analysis in each test to the given list of packages. +// -coverpkg pattern1,pattern2,pattern3 +// Apply coverage analysis in each test to packages matching the patterns. // The default is for each test to analyze only the package being tested. -// Packages are specified as import paths. +// See 'go help packages' for a description of package patterns. // Sets -cover. // // -cpu 1,2,4 @@ -1468,6 +1540,9 @@ // benchmarks should be executed. The default is the current value // of GOMAXPROCS. // +// -failfast +// Do not start new tests after the first test failure. +// // -list regexp // List tests, benchmarks, or examples matching the regular expression. // No tests, benchmarks or examples will be run. This will only @@ -1500,12 +1575,20 @@ // // -timeout d // If a test binary runs longer than duration d, panic. +// If d is 0, the timeout is disabled. // The default is 10 minutes (10m). // // -v // Verbose output: log all tests as they are run. Also print all // text from Log and Logf calls even if the test succeeds. // +// -vet list +// Configure the invocation of "go vet" during "go test" +// to use the comma-separated list of vet checks. +// If list is empty, "go test" runs "go vet" with a curated list of +// checks believed to be always worth addressing. +// If list is "off", "go test" does not run "go vet" at all. +// // The following flags are also recognized by 'go test' and can be used to // profile the tests during execution: // diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index a12df2988c7..75d65b7235a 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -6,6 +6,8 @@ package main_test import ( "bytes" + "debug/elf" + "debug/macho" "fmt" "go/format" "internal/race" @@ -27,6 +29,7 @@ var ( canRun = true // whether we can run go or ./testgo canRace = false // whether we can run the race detector canCgo = false // whether we can use cgo + canMSan = false // whether we can run the memory sanitizer exeSuffix string // ".exe" on Windows @@ -83,25 +86,61 @@ var testCC string // The TestMain function creates a go command for testing purposes and // deletes it after the tests have been run. func TestMain(m *testing.M) { + if os.Getenv("GO_GCFLAGS") != "" { + fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go + fmt.Printf("cmd/go test is not compatible with $GO_GCFLAGS being set\n") + fmt.Printf("SKIP\n") + return + } + if canRun { args := []string{"build", "-tags", "testgo", "-o", "testgo" + exeSuffix} if race.Enabled { args = append(args, "-race") } - out, err := exec.Command("go", args...).CombinedOutput() + gotool, err := testenv.GoTool() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(2) + } + + goEnv := func(name string) string { + out, err := exec.Command(gotool, "env", name).CombinedOutput() + if err != nil { + fmt.Fprintf(os.Stderr, "go env %s: %v\n%s", name, err, out) + os.Exit(2) + } + return strings.TrimSpace(string(out)) + } + testGOROOT = goEnv("GOROOT") + + // The whole GOROOT/pkg tree was installed using the GOHOSTOS/GOHOSTARCH + // toolchain (installed in GOROOT/pkg/tool/GOHOSTOS_GOHOSTARCH). + // The testgo.exe we are about to create will be built for GOOS/GOARCH, + // which means it will use the GOOS/GOARCH toolchain + // (installed in GOROOT/pkg/tool/GOOS_GOARCH). + // If these are not the same toolchain, then the entire standard library + // will look out of date (the compilers in those two different tool directories + // are built for different architectures and have different buid IDs), + // which will cause many tests to do unnecessary rebuilds and some + // tests to attempt to overwrite the installed standard library. + // Bail out entirely in this case. + hostGOOS := goEnv("GOHOSTOS") + hostGOARCH := goEnv("GOHOSTARCH") + if hostGOOS != runtime.GOOS || hostGOARCH != runtime.GOARCH { + fmt.Fprintf(os.Stderr, "testing: warning: no tests to run\n") // magic string for cmd/go + fmt.Printf("cmd/go test is not compatible with GOOS/GOARCH != GOHOSTOS/GOHOSTARCH (%s/%s != %s/%s)\n", runtime.GOOS, runtime.GOARCH, hostGOOS, hostGOARCH) + fmt.Printf("SKIP\n") + return + } + + out, err := exec.Command(gotool, args...).CombinedOutput() if err != nil { fmt.Fprintf(os.Stderr, "building testgo failed: %v\n%s", err, out) os.Exit(2) } - out, err = exec.Command("go", "env", "GOROOT").CombinedOutput() - if err != nil { - fmt.Fprintf(os.Stderr, "could not find testing GOROOT: %v\n%s", err, out) - os.Exit(2) - } - testGOROOT = strings.TrimSpace(string(out)) - - out, err = exec.Command("go", "env", "CC").CombinedOutput() + out, err = exec.Command(gotool, "env", "CC").CombinedOutput() if err != nil { fmt.Fprintf(os.Stderr, "could not find testing CC: %v\n%s", err, out) os.Exit(2) @@ -118,6 +157,10 @@ func TestMain(m *testing.M) { } } + // As of Sept 2017, MSan is only supported on linux/amd64. + // https://github.com/google/sanitizers/wiki/MemorySanitizer#getting-memorysanitizer + canMSan = canCgo && runtime.GOOS == "linux" && runtime.GOARCH == "amd64" + switch runtime.GOOS { case "linux", "darwin", "freebsd", "windows": // The race detector doesn't work on Alpine Linux: @@ -125,7 +168,6 @@ func TestMain(m *testing.M) { canRace = canCgo && runtime.GOARCH == "amd64" && !isAlpineLinux() } } - // Don't let these environment variables confuse the test. os.Unsetenv("GOBIN") os.Unsetenv("GOPATH") @@ -137,6 +179,9 @@ func TestMain(m *testing.M) { os.Setenv("CCACHE_DIR", filepath.Join(home, ".ccache")) } os.Setenv("HOME", "/test-go-home-does-not-exist") + if os.Getenv("GOCACHE") == "" { + os.Setenv("GOCACHE", "off") // because $HOME is gone + } r := m.Run() @@ -175,6 +220,7 @@ type testgoData struct { // testgo sets up for a test that runs testgo. func testgo(t *testing.T) *testgoData { + t.Helper() testenv.MustHaveGoBuild(t) if skipExternal { @@ -186,6 +232,7 @@ func testgo(t *testing.T) *testgoData { // must gives a fatal error if err is not nil. func (tg *testgoData) must(err error) { + tg.t.Helper() if err != nil { tg.t.Fatal(err) } @@ -193,6 +240,7 @@ func (tg *testgoData) must(err error) { // check gives a test non-fatal error if err is not nil. func (tg *testgoData) check(err error) { + tg.t.Helper() if err != nil { tg.t.Error(err) } @@ -200,6 +248,7 @@ func (tg *testgoData) check(err error) { // parallel runs the test in parallel by calling t.Parallel. func (tg *testgoData) parallel() { + tg.t.Helper() if tg.ran { tg.t.Fatal("internal testsuite error: call to parallel after run") } @@ -220,6 +269,7 @@ func (tg *testgoData) parallel() { // pwd returns the current directory. func (tg *testgoData) pwd() string { + tg.t.Helper() wd, err := os.Getwd() if err != nil { tg.t.Fatalf("could not get working directory: %v", err) @@ -231,6 +281,7 @@ func (tg *testgoData) pwd() string { // using this means that the test must not be run in parallel with any // other tests. func (tg *testgoData) cd(dir string) { + tg.t.Helper() if tg.inParallel { tg.t.Fatal("internal testsuite error: changing directory when running in parallel") } @@ -254,6 +305,7 @@ func (tg *testgoData) sleep() { // setenv sets an environment variable to use when running the test go // command. func (tg *testgoData) setenv(name, val string) { + tg.t.Helper() if tg.inParallel && (name == "GOROOT" || name == "GOPATH" || name == "GOBIN") && (strings.HasPrefix(val, "testdata") || strings.HasPrefix(val, "./testdata")) { tg.t.Fatalf("internal testsuite error: call to setenv with testdata (%s=%s) after parallel", name, val) } @@ -284,6 +336,7 @@ func (tg *testgoData) goTool() string { // doRun runs the test go command, recording stdout and stderr and // returning exit status. func (tg *testgoData) doRun(args []string) error { + tg.t.Helper() if !canRun { panic("testgoData.doRun called but canRun false") } @@ -329,6 +382,7 @@ func (tg *testgoData) doRun(args []string) error { // run runs the test go command, and expects it to succeed. func (tg *testgoData) run(args ...string) { + tg.t.Helper() if status := tg.doRun(args); status != nil { tg.t.Logf("go %v failed unexpectedly: %v", args, status) tg.t.FailNow() @@ -337,6 +391,7 @@ func (tg *testgoData) run(args ...string) { // runFail runs the test go command, and expects it to fail. func (tg *testgoData) runFail(args ...string) { + tg.t.Helper() if status := tg.doRun(args); status == nil { tg.t.Fatal("testgo succeeded unexpectedly") } else { @@ -346,6 +401,7 @@ func (tg *testgoData) runFail(args ...string) { // runGit runs a git command, and expects it to succeed. func (tg *testgoData) runGit(dir string, args ...string) { + tg.t.Helper() cmd := exec.Command("git", args...) tg.stdout.Reset() tg.stderr.Reset() @@ -370,6 +426,7 @@ func (tg *testgoData) runGit(dir string, args ...string) { // getStdout returns standard output of the testgo run as a string. func (tg *testgoData) getStdout() string { + tg.t.Helper() if !tg.ran { tg.t.Fatal("internal testsuite error: stdout called before run") } @@ -378,6 +435,7 @@ func (tg *testgoData) getStdout() string { // getStderr returns standard error of the testgo run as a string. func (tg *testgoData) getStderr() string { + tg.t.Helper() if !tg.ran { tg.t.Fatal("internal testsuite error: stdout called before run") } @@ -388,6 +446,7 @@ func (tg *testgoData) getStderr() string { // whether it is found. The regular expression is matched against // each line separately, as with the grep command. func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool { + tg.t.Helper() if !tg.ran { tg.t.Fatal("internal testsuite error: grep called before run") } @@ -405,6 +464,7 @@ func (tg *testgoData) doGrepMatch(match string, b *bytes.Buffer) bool { // searching, "output" or "error". The msg argument is logged on // failure. func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) { + tg.t.Helper() if !tg.doGrepMatch(match, b) { tg.t.Log(msg) tg.t.Logf("pattern %v not found in standard %s", match, name) @@ -415,18 +475,21 @@ func (tg *testgoData) doGrep(match string, b *bytes.Buffer, name, msg string) { // grepStdout looks for a regular expression in the test run's // standard output and fails, logging msg, if it is not found. func (tg *testgoData) grepStdout(match, msg string) { + tg.t.Helper() tg.doGrep(match, &tg.stdout, "output", msg) } // grepStderr looks for a regular expression in the test run's // standard error and fails, logging msg, if it is not found. func (tg *testgoData) grepStderr(match, msg string) { + tg.t.Helper() tg.doGrep(match, &tg.stderr, "error", msg) } // grepBoth looks for a regular expression in the test run's standard // output or stand error and fails, logging msg, if it is not found. func (tg *testgoData) grepBoth(match, msg string) { + tg.t.Helper() if !tg.doGrepMatch(match, &tg.stdout) && !tg.doGrepMatch(match, &tg.stderr) { tg.t.Log(msg) tg.t.Logf("pattern %v not found in standard output or standard error", match) @@ -437,6 +500,7 @@ func (tg *testgoData) grepBoth(match, msg string) { // doGrepNot looks for a regular expression in a buffer and fails if // it is found. The name and msg arguments are as for doGrep. func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) { + tg.t.Helper() if tg.doGrepMatch(match, b) { tg.t.Log(msg) tg.t.Logf("pattern %v found unexpectedly in standard %s", match, name) @@ -447,12 +511,14 @@ func (tg *testgoData) doGrepNot(match string, b *bytes.Buffer, name, msg string) // grepStdoutNot looks for a regular expression in the test run's // standard output and fails, logging msg, if it is found. func (tg *testgoData) grepStdoutNot(match, msg string) { + tg.t.Helper() tg.doGrepNot(match, &tg.stdout, "output", msg) } // grepStderrNot looks for a regular expression in the test run's // standard error and fails, logging msg, if it is found. func (tg *testgoData) grepStderrNot(match, msg string) { + tg.t.Helper() tg.doGrepNot(match, &tg.stderr, "error", msg) } @@ -460,6 +526,7 @@ func (tg *testgoData) grepStderrNot(match, msg string) { // standard output or stand error and fails, logging msg, if it is // found. func (tg *testgoData) grepBothNot(match, msg string) { + tg.t.Helper() if tg.doGrepMatch(match, &tg.stdout) || tg.doGrepMatch(match, &tg.stderr) { tg.t.Log(msg) tg.t.Fatalf("pattern %v found unexpectedly in standard output or standard error", match) @@ -468,6 +535,7 @@ func (tg *testgoData) grepBothNot(match, msg string) { // doGrepCount counts the number of times a regexp is seen in a buffer. func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int { + tg.t.Helper() if !tg.ran { tg.t.Fatal("internal testsuite error: doGrepCount called before run") } @@ -484,6 +552,7 @@ func (tg *testgoData) doGrepCount(match string, b *bytes.Buffer) int { // grepCountBoth returns the number of times a regexp is seen in both // standard output and standard error. func (tg *testgoData) grepCountBoth(match string) int { + tg.t.Helper() return tg.doGrepCount(match, &tg.stdout) + tg.doGrepCount(match, &tg.stderr) } @@ -492,6 +561,7 @@ func (tg *testgoData) grepCountBoth(match string) int { // removed. When the test completes, the file or directory will be // removed if it exists. func (tg *testgoData) creatingTemp(path string) { + tg.t.Helper() if filepath.IsAbs(path) && !strings.HasPrefix(path, tg.tempdir) { tg.t.Fatalf("internal testsuite error: creatingTemp(%q) with absolute path not in temporary directory", path) } @@ -508,6 +578,7 @@ func (tg *testgoData) creatingTemp(path string) { // makeTempdir makes a temporary directory for a run of testgo. If // the temporary directory was already created, this does nothing. func (tg *testgoData) makeTempdir() { + tg.t.Helper() if tg.tempdir == "" { var err error tg.tempdir, err = ioutil.TempDir("", "gotest") @@ -517,6 +588,7 @@ func (tg *testgoData) makeTempdir() { // tempFile adds a temporary file for a run of testgo. func (tg *testgoData) tempFile(path, contents string) { + tg.t.Helper() tg.makeTempdir() tg.must(os.MkdirAll(filepath.Join(tg.tempdir, filepath.Dir(path)), 0755)) bytes := []byte(contents) @@ -531,6 +603,7 @@ func (tg *testgoData) tempFile(path, contents string) { // tempDir adds a temporary directory for a run of testgo. func (tg *testgoData) tempDir(path string) { + tg.t.Helper() tg.makeTempdir() if err := os.MkdirAll(filepath.Join(tg.tempdir, path), 0755); err != nil && !os.IsExist(err) { tg.t.Fatal(err) @@ -540,6 +613,7 @@ func (tg *testgoData) tempDir(path string) { // path returns the absolute pathname to file with the temporary // directory. func (tg *testgoData) path(name string) string { + tg.t.Helper() if tg.tempdir == "" { tg.t.Fatalf("internal testsuite error: path(%q) with no tempdir", name) } @@ -551,6 +625,7 @@ func (tg *testgoData) path(name string) string { // mustExist fails if path does not exist. func (tg *testgoData) mustExist(path string) { + tg.t.Helper() if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { tg.t.Fatalf("%s does not exist but should", path) @@ -561,13 +636,28 @@ func (tg *testgoData) mustExist(path string) { // mustNotExist fails if path exists. func (tg *testgoData) mustNotExist(path string) { + tg.t.Helper() if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) { tg.t.Fatalf("%s exists but should not (%v)", path, err) } } +// mustHaveContent succeeds if filePath is a path to a file, +// and that file is readable and not empty. +func (tg *testgoData) mustHaveContent(filePath string) { + tg.mustExist(filePath) + f, err := os.Stat(filePath) + if err != nil { + tg.t.Fatal(err) + } + if f.Size() == 0 { + tg.t.Fatalf("expected %s to have data, but is empty", filePath) + } +} + // wantExecutable fails with msg if path is not executable. func (tg *testgoData) wantExecutable(path, msg string) { + tg.t.Helper() if st, err := os.Stat(path); err != nil { if !os.IsNotExist(err) { tg.t.Log(err) @@ -582,6 +672,7 @@ func (tg *testgoData) wantExecutable(path, msg string) { // wantArchive fails if path is not an archive. func (tg *testgoData) wantArchive(path string) { + tg.t.Helper() f, err := os.Open(path) if err != nil { tg.t.Fatal(err) @@ -596,6 +687,7 @@ func (tg *testgoData) wantArchive(path string) { // isStale reports whether pkg is stale, and why func (tg *testgoData) isStale(pkg string) (bool, string) { + tg.t.Helper() tg.run("list", "-f", "{{.Stale}}:{{.StaleReason}}", pkg) v := strings.TrimSpace(tg.getStdout()) f := strings.SplitN(v, ":", 2) @@ -613,6 +705,7 @@ func (tg *testgoData) isStale(pkg string) (bool, string) { // wantStale fails with msg if pkg is not stale. func (tg *testgoData) wantStale(pkg, reason, msg string) { + tg.t.Helper() stale, why := tg.isStale(pkg) if !stale { tg.t.Fatal(msg) @@ -624,6 +717,7 @@ func (tg *testgoData) wantStale(pkg, reason, msg string) { // wantNotStale fails with msg if pkg is stale. func (tg *testgoData) wantNotStale(pkg, reason, msg string) { + tg.t.Helper() stale, why := tg.isStale(pkg) if stale { tg.t.Fatal(msg) @@ -635,6 +729,7 @@ func (tg *testgoData) wantNotStale(pkg, reason, msg string) { // cleanup cleans up a test that runs testgo. func (tg *testgoData) cleanup() { + tg.t.Helper() if tg.wd != "" { if err := os.Chdir(tg.wd); err != nil { // We are unlikely to be able to continue. @@ -653,6 +748,7 @@ func (tg *testgoData) cleanup() { // failSSH puts an ssh executable in the PATH that always fails. // This is to stub out uses of ssh by go get. func (tg *testgoData) failSSH() { + tg.t.Helper() wd, err := os.Getwd() if err != nil { tg.t.Fatal(err) @@ -661,6 +757,20 @@ func (tg *testgoData) failSSH() { tg.setenv("PATH", fmt.Sprintf("%v%c%v", fail, filepath.ListSeparator, os.Getenv("PATH"))) } +func TestBuildComplex(t *testing.T) { + // Simple smoke test for build configuration. + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.run("build", "-x", "-o", os.DevNull, "complex") + + if _, err := exec.LookPath("gccgo"); err == nil { + t.Skip("golang.org/issue/22472") + tg.run("build", "-x", "-o", os.DevNull, "-compiler=gccgo", "complex") + } +} + func TestFileLineInErrorMessages(t *testing.T) { tg := testgo(t) defer tg.cleanup() @@ -759,40 +869,36 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { } } - tg.setenv("TESTGO_IS_GO_RELEASE", "1") - tg.tempFile("d1/src/p1/p1.go", `package p1`) tg.setenv("GOPATH", tg.path("d1")) tg.run("install", "-a", "p1") - tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly") - tg.sleep() + tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, before any changes") - // Changing mtime and content of runtime/internal/sys/sys.go - // should have no effect: we're in a release, which doesn't rebuild - // for general mtime or content changes. + // Changing mtime of runtime/internal/sys/sys.go + // should have no effect: only the content matters. + // In fact this should be true even outside a release branch. sys := runtime.GOROOT() + "/src/runtime/internal/sys/sys.go" + tg.sleep() restore := addNL(sys) - defer restore() - tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating runtime/internal/sys/sys.go") restore() - tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after restoring runtime/internal/sys/sys.go") + tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after updating mtime of runtime/internal/sys/sys.go") - // But changing runtime/internal/sys/zversion.go should have an effect: - // that's how we tell when we flip from one release to another. - zversion := runtime.GOROOT() + "/src/runtime/internal/sys/zversion.go" - restore = addNL(zversion) + // But changing content of any file should have an effect. + // Previously zversion.go was the only one that mattered; + // now they all matter, so keep using sys.go. + restore = addNL(sys) defer restore() - tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing to new release") + tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go") restore() tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release") - addNL(zversion) - tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing again to new release") + addNL(sys) + tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again") tg.run("install", "p1") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release") // Restore to "old" release. restore() - tg.wantStale("p1", "build ID mismatch", "./testgo list claims p1 is NOT stale, incorrectly, after changing to old release after new build") + tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go") tg.run("install", "p1") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release") @@ -878,7 +984,7 @@ func TestGoInstallRebuildsStalePackagesInOtherGOPATH(t *testing.T) { func F() {}`) sep := string(filepath.ListSeparator) tg.setenv("GOPATH", tg.path("d1")+sep+tg.path("d2")) - tg.run("install", "p1") + tg.run("install", "-i", "p1") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly") tg.wantNotStale("p2", "", "./testgo list claims p2 is stale, incorrectly") tg.sleep() @@ -889,10 +995,10 @@ func TestGoInstallRebuildsStalePackagesInOtherGOPATH(t *testing.T) { } else { tg.must(f.Close()) } - tg.wantStale("p2", "newer source file", "./testgo list claims p2 is NOT stale, incorrectly") - tg.wantStale("p1", "stale dependency", "./testgo list claims p1 is NOT stale, incorrectly") + tg.wantStale("p2", "build ID mismatch", "./testgo list claims p2 is NOT stale, incorrectly") + tg.wantStale("p1", "stale dependency: p2", "./testgo list claims p1 is NOT stale, incorrectly") - tg.run("install", "p1") + tg.run("install", "-i", "p1") tg.wantNotStale("p2", "", "./testgo list claims p2 is stale after reinstall, incorrectly") tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after reinstall, incorrectly") } @@ -988,6 +1094,7 @@ func TestGoInstallDetectsRemovedFilesInPackageMain(t *testing.T) { } func testLocalRun(tg *testgoData, exepath, local, match string) { + tg.t.Helper() out, err := exec.Command(exepath).Output() if err != nil { tg.t.Fatalf("error running %v: %v", exepath, err) @@ -999,6 +1106,7 @@ func testLocalRun(tg *testgoData, exepath, local, match string) { } func testLocalEasy(tg *testgoData, local string) { + tg.t.Helper() exepath := "./easy" + exeSuffix tg.creatingTemp(exepath) tg.run("build", "-o", exepath, filepath.Join("testdata", local, "easy.go")) @@ -1006,6 +1114,7 @@ func testLocalEasy(tg *testgoData, local string) { } func testLocalEasySub(tg *testgoData, local string) { + tg.t.Helper() exepath := "./easysub" + exeSuffix tg.creatingTemp(exepath) tg.run("build", "-o", exepath, filepath.Join("testdata", local, "easysub", "main.go")) @@ -1013,6 +1122,7 @@ func testLocalEasySub(tg *testgoData, local string) { } func testLocalHard(tg *testgoData, local string) { + tg.t.Helper() exepath := "./hard" + exeSuffix tg.creatingTemp(exepath) tg.run("build", "-o", exepath, filepath.Join("testdata", local, "hard.go")) @@ -1020,6 +1130,7 @@ func testLocalHard(tg *testgoData, local string) { } func testLocalInstall(tg *testgoData, local string) { + tg.t.Helper() tg.runFail("install", filepath.Join("testdata", local, "easy.go")) } @@ -1050,6 +1161,7 @@ func TestLocalImportsGoInstallShouldFail(t *testing.T) { const badDirName = `#$%:, &()*;<=>?\^{}` func copyBad(tg *testgoData) { + tg.t.Helper() if runtime.GOOS == "windows" { tg.t.Skipf("skipping test because %q is an invalid directory name", badDirName) } @@ -1361,6 +1473,10 @@ func TestRelativeImportsGoTest(t *testing.T) { func TestRelativeImportsGoTestDashI(t *testing.T) { tg := testgo(t) defer tg.cleanup() + + // don't let test -i overwrite runtime + tg.wantNotStale("runtime", "", "must be non-stale before test -i") + tg.run("test", "-i", "./testdata/testimport") } @@ -1407,6 +1523,28 @@ func TestInstallFailsWithNoBuildableFiles(t *testing.T) { tg.grepStderr("build constraints exclude all Go files", "go install cgotest did not report 'build constraints exclude all Go files'") } +// Issue 21895 +func TestMSanAndRaceRequireCgo(t *testing.T) { + if !canMSan && !canRace { + t.Skip("skipping because both msan and the race detector are not supported") + } + + tg := testgo(t) + defer tg.cleanup() + tg.tempFile("triv.go", `package main; func main() {}`) + tg.setenv("CGO_ENABLED", "0") + if canRace { + tg.runFail("install", "-race", "triv.go") + tg.grepStderr("-race requires cgo", "did not correctly report that -race requires cgo") + tg.grepStderrNot("-msan", "reported that -msan instead of -race requires cgo") + } + if canMSan { + tg.runFail("install", "-msan", "triv.go") + tg.grepStderr("-msan requires cgo", "did not correctly report that -msan requires cgo") + tg.grepStderrNot("-race", "reported that -race instead of -msan requires cgo") + } +} + func TestRelativeGOBINFail(t *testing.T) { tg := testgo(t) defer tg.cleanup() @@ -1463,19 +1601,16 @@ func TestPackageNotStaleWithTrailingSlash(t *testing.T) { defer tg.cleanup() // Make sure the packages below are not stale. - tg.run("install", "runtime", "os", "io") + tg.wantNotStale("runtime", "", "must be non-stale before test runs") + tg.wantNotStale("os", "", "must be non-stale before test runs") + tg.wantNotStale("io", "", "must be non-stale before test runs") goroot := runtime.GOROOT() tg.setenv("GOROOT", goroot+"/") - want := "" - if isGoRelease { - want = "standard package in Go release distribution" - } - - tg.wantNotStale("runtime", want, "with trailing slash in GOROOT, runtime listed as stale") - tg.wantNotStale("os", want, "with trailing slash in GOROOT, os listed as stale") - tg.wantNotStale("io", want, "with trailing slash in GOROOT, io listed as stale") + tg.wantNotStale("runtime", "", "with trailing slash in GOROOT, runtime listed as stale") + tg.wantNotStale("os", "", "with trailing slash in GOROOT, os listed as stale") + tg.wantNotStale("io", "", "with trailing slash in GOROOT, io listed as stale") } // With $GOBIN set, binaries get installed to $GOBIN. @@ -1617,6 +1752,27 @@ func TestRejectRelativePathsInGOPATHCommandLinePackage(t *testing.T) { tg.grepStderr("GOPATH entry is relative", "expected an error message rejecting relative GOPATH entries") } +// Issue 21928. +func TestRejectBlankPathsInGOPATH(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + sep := string(filepath.ListSeparator) + tg.setenv("GOPATH", " "+sep+filepath.Join(tg.pwd(), "testdata")) + tg.runFail("build", "go-cmd-test") + tg.grepStderr("GOPATH entry is relative", "expected an error message rejecting relative GOPATH entries") +} + +// Issue 21928. +func TestIgnoreEmptyPathsInGOPATH(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.creatingTemp("testdata/bin/go-cmd-test" + exeSuffix) + sep := string(filepath.ListSeparator) + tg.setenv("GOPATH", ""+sep+filepath.Join(tg.pwd(), "testdata")) + tg.run("install", "go-cmd-test") + tg.wantExecutable("testdata/bin/go-cmd-test"+exeSuffix, "go install go-cmd-test did not write to testdata/bin/go-cmd-test") +} + // Issue 4104. func TestGoTestWithPackageListedMultipleTimes(t *testing.T) { tg := testgo(t) @@ -1675,6 +1831,20 @@ func TestGoListDedupsPackages(t *testing.T) { } } +func TestGoListDeps(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempDir("src/p1/p2/p3/p4") + tg.setenv("GOPATH", tg.path(".")) + tg.tempFile("src/p1/p.go", "package p1\nimport _ \"p1/p2\"\n") + tg.tempFile("src/p1/p2/p.go", "package p2\nimport _ \"p1/p2/p3\"\n") + tg.tempFile("src/p1/p2/p3/p.go", "package p3\nimport _ \"p1/p2/p3/p4\"\n") + tg.tempFile("src/p1/p2/p3/p4/p.go", "package p4\n") + tg.run("list", "-f", "{{.Deps}}", "p1") + tg.grepStdout("p1/p2/p3/p4", "Deps(p1) does not mention p4") +} + // Issue 4096. Validate the output of unsuccessful go install foo/quxx. func TestUnsuccessfulGoInstallShouldMentionMissingPackage(t *testing.T) { tg := testgo(t) @@ -1905,6 +2075,16 @@ func TestGoTestMutexprofileDashOControlsBinaryLocation(t *testing.T) { tg.wantExecutable("myerrors.test"+exeSuffix, "go test -mutexprofile -o myerrors.test did not create myerrors.test") } +func TestGoBuildNonMain(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + // TODO: tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.runFail("build", "-buildmode=exe", "-o", "not_main"+exeSuffix, "not_main") + tg.grepStderr("-buildmode=exe requires exactly one main package", "go build with -o and -buildmode=exe should on a non-main package should throw an error") + tg.mustNotExist("not_main" + exeSuffix) +} + func TestGoTestDashCDashOControlsBinaryLocation(t *testing.T) { tg := testgo(t) defer tg.cleanup() @@ -1928,6 +2108,10 @@ func TestGoTestDashIDashOWritesBinary(t *testing.T) { defer tg.cleanup() tg.parallel() tg.makeTempdir() + + // don't let test -i overwrite runtime + tg.wantNotStale("runtime", "", "must be non-stale before test -i") + tg.run("test", "-v", "-i", "-o", tg.path("myerrors.test"+exeSuffix), "errors") tg.grepBothNot("PASS|FAIL", "test should not have run") tg.wantExecutable(tg.path("myerrors.test"+exeSuffix), "go test -o myerrors.test did not create myerrors.test") @@ -2194,10 +2378,10 @@ func TestSourceFileNameOrderPreserved(t *testing.T) { // Check that coverage analysis works at all. // Don't worry about the exact numbers but require not 0.0%. func checkCoverage(tg *testgoData, data string) { + tg.t.Helper() if regexp.MustCompile(`[^0-9]0\.0%`).MatchString(data) { tg.t.Error("some coverage results are 0.0%") } - tg.t.Log(data) } func TestCoverageRuns(t *testing.T) { @@ -2214,6 +2398,7 @@ func TestCoverageRuns(t *testing.T) { } // Check that coverage analysis uses set mode. +// Also check that coverage profiles merge correctly. func TestCoverageUsesSetMode(t *testing.T) { if testing.Short() { t.Skip("don't build libraries for coverage in short mode") @@ -2221,7 +2406,7 @@ func TestCoverageUsesSetMode(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.creatingTemp("testdata/cover.out") - tg.run("test", "-short", "-cover", "encoding/binary", "-coverprofile=testdata/cover.out") + tg.run("test", "-short", "-cover", "encoding/binary", "errors", "-coverprofile=testdata/cover.out") data := tg.getStdout() + tg.getStderr() if out, err := ioutil.ReadFile("testdata/cover.out"); err != nil { t.Error(err) @@ -2229,6 +2414,15 @@ func TestCoverageUsesSetMode(t *testing.T) { if !bytes.Contains(out, []byte("mode: set")) { t.Error("missing mode: set") } + if !bytes.Contains(out, []byte("errors.go")) { + t.Error("missing errors.go") + } + if !bytes.Contains(out, []byte("binary.go")) { + t.Error("missing binary.go") + } + if bytes.Count(out, []byte("mode: set")) != 1 { + t.Error("too many mode: set") + } } checkCoverage(tg, data) } @@ -2256,6 +2450,14 @@ func TestCoverageUsesAtomicModeForRace(t *testing.T) { checkCoverage(tg, data) } +func TestCoverageSyncAtomicImport(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.run("test", "-short", "-cover", "-covermode=atomic", "-coverpkg=coverdep/p1", "coverdep") +} + func TestCoverageImportMainLoop(t *testing.T) { tg := testgo(t) defer tg.cleanup() @@ -2266,6 +2468,73 @@ func TestCoverageImportMainLoop(t *testing.T) { tg.grepStderr("not an importable package", "did not detect import main") } +func TestCoveragePattern(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + + // If coverpkg=sleepy... expands by package loading + // (as opposed to pattern matching on deps) + // then it will try to load sleepybad, which does not compile, + // and the test command will fail. + tg.run("test", "-coverprofile="+filepath.Join(tg.tempdir, "cover.out"), "-coverpkg=sleepy...", "-run=^$", "sleepy1") +} + +func TestCoverageErrorLine(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.setenv("GOTMPDIR", tg.tempdir) + + tg.runFail("test", "coverbad") + tg.grepStderr(`coverbad[\\/]p\.go:4`, "did not find coverbad/p.go:4") + if canCgo { + tg.grepStderr(`coverbad[\\/]p1\.go:6`, "did not find coverbad/p1.go:6") + } + tg.grepStderrNot(regexp.QuoteMeta(tg.tempdir), "found temporary directory in error") + stderr := tg.getStderr() + + tg.runFail("test", "-cover", "coverbad") + stderr2 := tg.getStderr() + + // It's OK that stderr2 drops the character position in the error, + // because of the //line directive (see golang.org/issue/22662). + stderr = strings.Replace(stderr, "p.go:4:2:", "p.go:4:", -1) + if stderr != stderr2 { + t.Logf("test -cover changed error messages:\nbefore:\n%s\n\nafter:\n%s", stderr, stderr2) + t.Skip("golang.org/issue/22660") + t.FailNow() + } +} + +func TestTestBuildFailureOutput(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + + // Doesn't build, -x output should not claim to run test. + tg.runFail("test", "-x", "coverbad") + tg.grepStderrNot(`[\\/]coverbad\.test( |$)`, "claimed to run test") +} + +func TestCoverageFunc(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + + tg.run("test", "-outputdir="+tg.tempdir, "-coverprofile=cover.out", "coverasm") + tg.run("tool", "cover", "-func="+filepath.Join(tg.tempdir, "cover.out")) + tg.grepStdout(`\tg\t*100.0%`, "did not find g 100% covered") + tg.grepStdoutNot(`\tf\t*[0-9]`, "reported coverage for assembly function f") +} + func TestPluginNonMain(t *testing.T) { wd, err := os.Getwd() if err != nil { @@ -2503,7 +2772,10 @@ func main() { tg.run("run", tg.path("foo.go")) } -// "go test -c -test.bench=XXX errors" should not hang +// "go test -c -test.bench=XXX errors" should not hang. +// "go test -c" should also produce reproducible binaries. +// "go test -c" should also appear to write a new binary every time, +// even if it's really just updating the mtime on an existing up-to-date binary. func TestIssue6480(t *testing.T) { tg := testgo(t) defer tg.cleanup() @@ -2511,6 +2783,52 @@ func TestIssue6480(t *testing.T) { tg.makeTempdir() tg.cd(tg.path(".")) tg.run("test", "-c", "-test.bench=XXX", "errors") + tg.run("test", "-c", "-o", "errors2.test", "errors") + + data1, err := ioutil.ReadFile("errors.test" + exeSuffix) + tg.must(err) + data2, err := ioutil.ReadFile("errors2.test") // no exeSuffix because -o above doesn't have it + tg.must(err) + if !bytes.Equal(data1, data2) { + t.Fatalf("go test -c errors produced different binaries when run twice") + } + + start := time.Now() + tg.run("test", "-x", "-c", "-test.bench=XXX", "errors") + tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly relinked up-to-date test binary") + info, err := os.Stat("errors.test" + exeSuffix) + if err != nil { + t.Fatal(err) + } + start = truncateLike(start, info.ModTime()) + if info.ModTime().Before(start) { + t.Fatalf("mtime of errors.test predates test -c command (%v < %v)", info.ModTime(), start) + } + + start = time.Now() + tg.run("test", "-x", "-c", "-o", "errors2.test", "errors") + tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly relinked up-to-date test binary") + info, err = os.Stat("errors2.test") + if err != nil { + t.Fatal(err) + } + start = truncateLike(start, info.ModTime()) + if info.ModTime().Before(start) { + t.Fatalf("mtime of errors2.test predates test -c command (%v < %v)", info.ModTime(), start) + } +} + +// truncateLike returns the result of truncating t to the apparent precision of p. +func truncateLike(t, p time.Time) time.Time { + nano := p.UnixNano() + d := 1 * time.Nanosecond + for nano%int64(d) == 0 && d < 1*time.Second { + d *= 10 + } + for nano%int64(d) == 0 && d < 2*time.Second { + d *= 2 + } + return t.Truncate(d) } // cmd/cgo: undefined reference when linking a C-library using gccgo @@ -2521,6 +2839,7 @@ func TestIssue7573(t *testing.T) { if _, err := exec.LookPath("gccgo"); err != nil { t.Skip("skipping because no gccgo compiler found") } + t.Skip("golang.org/issue/22472") tg := testgo(t) defer tg.cleanup() @@ -2603,17 +2922,21 @@ func TestBuildDashIInstallsDependencies(t *testing.T) { func F() { foo.F() }`) tg.setenv("GOPATH", tg.path(".")) + // don't let build -i overwrite runtime + tg.wantNotStale("runtime", "", "must be non-stale before build -i") + checkbar := func(desc string) { - tg.sleep() - tg.must(os.Chtimes(tg.path("src/x/y/foo/foo.go"), time.Now(), time.Now())) - tg.sleep() tg.run("build", "-v", "-i", "x/y/bar") tg.grepBoth("x/y/foo", "first build -i "+desc+" did not build x/y/foo") tg.run("build", "-v", "-i", "x/y/bar") tg.grepBothNot("x/y/foo", "second build -i "+desc+" built x/y/foo") } checkbar("pkg") + tg.creatingTemp("bar" + exeSuffix) + tg.sleep() + tg.tempFile("src/x/y/foo/foo.go", `package foo + func F() { F() }`) tg.tempFile("src/x/y/bar/bar.go", `package main import "x/y/foo" func main() { foo.F() }`) @@ -2644,6 +2967,21 @@ func TestGoTestFooTestWorks(t *testing.T) { tg.run("test", "testdata/standalone_test.go") } +// Issue 22388 +func TestGoTestMainWithWrongSignature(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.runFail("test", "testdata/standalone_main_wrong_test.go") + tg.grepStderr(`wrong signature for TestMain, must be: func TestMain\(m \*testing.M\)`, "detected wrong error message") +} + +func TestGoTestMainAsNormalTest(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.run("test", "testdata/standalone_main_normal_test.go") + tg.grepBoth(okPattern, "go test did not say ok") +} + func TestGoTestFlagsAfterPackage(t *testing.T) { tg := testgo(t) defer tg.cleanup() @@ -2766,37 +3104,33 @@ func TestGoVetWithExternalTests(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.makeTempdir() - tg.run("install", "cmd/vet") tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) tg.runFail("vet", "vetpkg") - tg.grepBoth("missing argument for Printf", "go vet vetpkg did not find missing argument for Printf") + tg.grepBoth("Printf", "go vet vetpkg did not find missing argument for Printf") } func TestGoVetWithTags(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.makeTempdir() - tg.run("install", "cmd/vet") tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) tg.runFail("vet", "-tags", "tagtest", "vetpkg") - tg.grepBoth(`c\.go.*wrong number of args for format`, "go vet vetpkg did not run scan tagged file") + tg.grepBoth(`c\.go.*Printf`, "go vet vetpkg did not run scan tagged file") } func TestGoVetWithFlagsOn(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.makeTempdir() - tg.run("install", "cmd/vet") tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) tg.runFail("vet", "-printf", "vetpkg") - tg.grepBoth("missing argument for Printf", "go vet -printf vetpkg did not find missing argument for Printf") + tg.grepBoth("Printf", "go vet -printf vetpkg did not find missing argument for Printf") } func TestGoVetWithFlagsOff(t *testing.T) { tg := testgo(t) defer tg.cleanup() tg.makeTempdir() - tg.run("install", "cmd/vet") tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) tg.run("vet", "-printf=false", "vetpkg") } @@ -3142,11 +3476,12 @@ func TestGoInstallPkgdir(t *testing.T) { defer tg.cleanup() tg.makeTempdir() pkg := tg.path(".") - tg.run("install", "-pkgdir", pkg, "errors") - _, err := os.Stat(filepath.Join(pkg, "errors.a")) - tg.must(err) - _, err = os.Stat(filepath.Join(pkg, "runtime.a")) - tg.must(err) + tg.run("install", "-pkgdir", pkg, "sync") + tg.mustExist(filepath.Join(pkg, "sync.a")) + tg.mustNotExist(filepath.Join(pkg, "sync/atomic.a")) + tg.run("install", "-i", "-pkgdir", pkg, "sync") + tg.mustExist(filepath.Join(pkg, "sync.a")) + tg.mustExist(filepath.Join(pkg, "sync/atomic.a")) } func TestGoTestRaceInstallCgo(t *testing.T) { @@ -3457,15 +3792,6 @@ func TestGoBuildARM(t *testing.T) { tg.grepStderrNot("unable to find math.a", "did not build math.a correctly") } -func TestIssue13655(t *testing.T) { - tg := testgo(t) - defer tg.cleanup() - for _, pkg := range []string{"runtime", "runtime/internal/atomic"} { - tg.run("list", "-f", "{{.Deps}}", pkg) - tg.grepStdout("runtime/internal/sys", "did not find required dependency of "+pkg+" on runtime/internal/sys") - } -} - // For issue 14337. func TestParallelTest(t *testing.T) { tg := testgo(t) @@ -3563,9 +3889,9 @@ func TestBinaryOnlyPackages(t *testing.T) { package p1 `) - tg.wantStale("p1", "cannot access install target", "p1 is binary-only but has no binary, should be stale") + tg.wantStale("p1", "missing or invalid binary-only package", "p1 is binary-only but has no binary, should be stale") tg.runFail("install", "p1") - tg.grepStderr("missing or invalid package binary", "did not report attempt to compile binary-only package") + tg.grepStderr("missing or invalid binary-only package", "did not report attempt to compile binary-only package") tg.tempFile("src/p1/p1.go", ` package p1 @@ -3588,11 +3914,12 @@ func TestBinaryOnlyPackages(t *testing.T) { tg.tempFile("src/p1/missing.go", `//go:binary-only-package package p1 + import _ "fmt" func G() `) - tg.wantNotStale("p1", "no source code", "should NOT want to rebuild p1 (first)") + tg.wantNotStale("p1", "binary-only package", "should NOT want to rebuild p1 (first)") tg.run("install", "-x", "p1") // no-op, up to date - tg.grepBothNot("/compile", "should not have run compiler") + tg.grepBothNot(`[\\/]compile`, "should not have run compiler") tg.run("install", "p2") // does not rebuild p1 (or else p2 will fail) tg.wantNotStale("p2", "", "should NOT want to rebuild p2") @@ -3602,7 +3929,7 @@ func TestBinaryOnlyPackages(t *testing.T) { package p1 func H() `) - tg.wantNotStale("p1", "no source code", "should NOT want to rebuild p1 (second)") + tg.wantNotStale("p1", "binary-only package", "should NOT want to rebuild p1 (second)") tg.wantNotStale("p2", "", "should NOT want to rebuild p2") tg.tempFile("src/p3/p3.go", ` @@ -3622,9 +3949,11 @@ func TestBinaryOnlyPackages(t *testing.T) { tg.grepStdout("hello from p1", "did not see message from p1") tg.tempFile("src/p4/p4.go", `package main`) + // The odd string split below avoids vet complaining about + // a // +build line appearing too late in this source file. tg.tempFile("src/p4/p4not.go", `//go:binary-only-package - // +build asdf + /`+`/ +build asdf package main `) @@ -3634,6 +3963,10 @@ func TestBinaryOnlyPackages(t *testing.T) { // Issue 16050 and 21884. func TestLinkSysoFiles(t *testing.T) { + if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { + t.Skip("not linux/amd64") + } + tg := testgo(t) defer tg.cleanup() tg.parallel() @@ -3714,6 +4047,7 @@ func TestGoEnv(t *testing.T) { tg := testgo(t) tg.parallel() defer tg.cleanup() + tg.setenv("GOOS", "freebsd") // to avoid invalid pair errors tg.setenv("GOARCH", "arm") tg.run("env", "GOARCH") tg.grepStdout("^arm$", "GOARCH not honored") @@ -3868,6 +4202,24 @@ func TestBenchTimeout(t *testing.T) { tg.run("test", "-bench", ".", "-timeout", "750ms", "testdata/timeoutbench_test.go") } +// Issue 19394 +func TestWriteProfilesOnTimeout(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.tempDir("profiling") + tg.tempFile("profiling/timeouttest_test.go", `package timeouttest_test +import "testing" +import "time" +func TestSleep(t *testing.T) { time.Sleep(time.Second) }`) + tg.cd(tg.path("profiling")) + tg.runFail( + "test", + "-cpuprofile", tg.path("profiling/cpu.pprof"), "-memprofile", tg.path("profiling/mem.pprof"), + "-timeout", "1ms") + tg.mustHaveContent(tg.path("profiling/cpu.pprof")) + tg.mustHaveContent(tg.path("profiling/mem.pprof")) +} + func TestLinkXImportPathEscape(t *testing.T) { // golang.org/issue/16710 tg := testgo(t) @@ -4010,9 +4362,7 @@ func TestBuildTagsNoComma(t *testing.T) { defer tg.cleanup() tg.makeTempdir() tg.setenv("GOPATH", tg.path("go")) - tg.run("install", "-tags", "tag1 tag2", "math") - tg.runFail("install", "-tags", "tag1,tag2", "math") - tg.grepBoth("space-separated list contains comma", "-tags with a comma-separated list didn't error") + tg.run("build", "-tags", "tag1 tag2", "math") tg.runFail("build", "-tags", "tag1,tag2", "math") tg.grepBoth("space-separated list contains comma", "-tags with a comma-separated list didn't error") } @@ -4091,7 +4441,7 @@ func TestExecutableGOROOT(t *testing.T) { if err != nil { t.Fatal(err) } - m := regexp.MustCompile("const DefaultGoroot = `([^`]+)`").FindStringSubmatch(string(data)) + m := regexp.MustCompile("var DefaultGoroot = `([^`]+)`").FindStringSubmatch(string(data)) if m == nil { t.Fatal("cannot find DefaultGoroot in ../../runtime/internal/sys/zversion.go") } @@ -4116,6 +4466,43 @@ func TestExecutableGOROOT(t *testing.T) { } check(t, symGoTool, newRoot) }) + + tg.must(os.RemoveAll(tg.path("new/pkg"))) + + // Binaries built in the new tree should report the + // new tree when they call runtime.GOROOT(). + // This is implemented by having the go tool pass a -X option + // to the linker setting runtime/internal/sys.DefaultGoroot. + t.Run("RuntimeGoroot", func(t *testing.T) { + // Build a working GOROOT the easy way, with symlinks. + testenv.MustHaveSymlink(t) + if err := os.Symlink(filepath.Join(testGOROOT, "src"), tg.path("new/src")); err != nil { + t.Fatal(err) + } + if err := os.Symlink(filepath.Join(testGOROOT, "pkg"), tg.path("new/pkg")); err != nil { + t.Fatal(err) + } + + cmd := exec.Command(newGoTool, "run", "testdata/print_goroot.go") + cmd.Env = env + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%s run testdata/print_goroot.go: %v, %s", newGoTool, err, out) + } + goroot, err := filepath.EvalSymlinks(strings.TrimSpace(string(out))) + if err != nil { + t.Fatal(err) + } + want, err := filepath.EvalSymlinks(tg.path("new")) + if err != nil { + t.Fatal(err) + } + if !strings.EqualFold(goroot, want) { + t.Errorf("go run testdata/print_goroot.go:\nhave %s\nwant %s", goroot, want) + } else { + t.Logf("go run testdata/print_goroot.go: %s", goroot) + } + }) } func TestNeedVersion(t *testing.T) { @@ -4142,7 +4529,8 @@ func TestUserOverrideFlags(t *testing.T) { tg := testgo(t) defer tg.cleanup() - tg.parallel() + // Don't call tg.parallel, as creating override.h and override.a may + // confuse other tests. tg.tempFile("override.go", `package main import "C" @@ -4153,7 +4541,7 @@ func GoFunc() {} func main() {}`) tg.creatingTemp("override.a") tg.creatingTemp("override.h") - tg.run("build", "-x", "-buildmode=c-archive", "-gcflags=-shared=false", tg.path("override.go")) + tg.run("build", "-x", "-buildmode=c-archive", "-gcflags=all=-shared=false", tg.path("override.go")) tg.grepStderr("compile .*-shared .*-shared=false", "user can not override code generation flag") } @@ -4161,65 +4549,22 @@ func TestCgoFlagContainsSpace(t *testing.T) { if !canCgo { t.Skip("skipping because cgo not enabled") } - tg := testgo(t) defer tg.cleanup() - ccName := filepath.Base(testCC) - - tg.tempFile(fmt.Sprintf("src/%s/main.go", ccName), fmt.Sprintf(`package main - import ( - "os" - "os/exec" - "strings" - ) - - func main() { - cmd := exec.Command(%q, os.Args[1:]...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - panic(err) - } - - if os.Args[len(os.Args)-1] == "trivial.c" { - return - } - - var success bool - for _, arg := range os.Args { - switch { - case strings.Contains(arg, "c flags"): - if success { - panic("duplicate CFLAGS") - } - success = true - case strings.Contains(arg, "ld flags"): - if success { - panic("duplicate LDFLAGS") - } - success = true - } - } - if !success { - panic("args should contains '-Ic flags' or '-Lld flags'") - } - } - `, testCC)) - tg.cd(tg.path(fmt.Sprintf("src/%s", ccName))) - tg.run("build") - tg.setenv("CC", tg.path(fmt.Sprintf("src/%s/%s", ccName, ccName))) - - tg.tempFile("src/cgo/main.go", `package main + tg.makeTempdir() + tg.cd(tg.path(".")) + tg.tempFile("main.go", `package main // #cgo CFLAGS: -I"c flags" // #cgo LDFLAGS: -L"ld flags" import "C" func main() {} `) - tg.cd(tg.path("src/cgo")) - tg.run("run", "main.go") + tg.run("run", "-x", "main.go") + tg.grepStderr(`"-I[^"]+c flags"`, "did not find quoted c flags") + tg.grepStderrNot(`"-I[^"]+c flags".*"-I[^"]+c flags"`, "found too many quoted c flags") + tg.grepStderr(`"-L[^"]+ld flags"`, "did not find quoted ld flags") + tg.grepStderrNot(`"-L[^"]+c flags".*"-L[^"]+c flags"`, "found too many quoted ld flags") } // Issue #20435. @@ -4260,7 +4605,7 @@ func main() {}`) before() tg.run("install", "mycmd") after() - tg.wantStale("mycmd", "build ID mismatch", "should be stale after environment variable change") + tg.wantStale("mycmd", "stale dependency: runtime/internal/sys", "should be stale after environment variable change") } } @@ -4353,3 +4698,656 @@ func TestListTests(t *testing.T) { t.Run("Example1", testWith("Example", "ExampleSimple")) t.Run("Example2", testWith("Example", "ExampleWithEmptyOutput")) } + +func TestBuildmodePIE(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skipf("skipping test because buildmode=pie is not supported on gccgo") + } + + platform := fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH) + switch platform { + case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386": + case "darwin/amd64": + default: + t.Skipf("skipping test because buildmode=pie is not supported on %s", platform) + } + + tg := testgo(t) + defer tg.cleanup() + + tg.tempFile("main.go", `package main; func main() { print("hello") }`) + src := tg.path("main.go") + obj := tg.path("main") + tg.run("build", "-buildmode=pie", "-o", obj, src) + + switch runtime.GOOS { + case "linux", "android": + f, err := elf.Open(obj) + if err != nil { + t.Fatal(err) + } + defer f.Close() + if f.Type != elf.ET_DYN { + t.Errorf("PIE type must be ET_DYN, but %s", f.Type) + } + case "darwin": + f, err := macho.Open(obj) + if err != nil { + t.Fatal(err) + } + defer f.Close() + if f.Flags&macho.FlagDyldLink == 0 { + t.Error("PIE must have DyldLink flag, but not") + } + if f.Flags&macho.FlagPIE == 0 { + t.Error("PIE must have PIE flag, but not") + } + default: + panic("unreachable") + } + + out, err := exec.Command(obj).CombinedOutput() + if err != nil { + t.Fatal(err) + } + + if string(out) != "hello" { + t.Errorf("got %q; want %q", out, "hello") + } +} + +func TestExecBuildX(t *testing.T) { + if !canCgo { + t.Skip("skipping because cgo not enabled") + } + + if runtime.GOOS == "plan9" || runtime.GOOS == "windows" { + t.Skipf("skipping because unix shell is not supported on %s", runtime.GOOS) + } + + tg := testgo(t) + defer tg.cleanup() + + tg.tempFile("main.go", `package main; import "C"; func main() { print("hello") }`) + src := tg.path("main.go") + obj := tg.path("main") + tg.run("build", "-x", "-o", obj, src) + sh := tg.path("test.sh") + err := ioutil.WriteFile(sh, []byte(tg.getStderr()), 0666) + if err != nil { + t.Fatal(err) + } + + out, err := exec.Command(obj).CombinedOutput() + if err != nil { + t.Fatal(err) + } + if string(out) != "hello" { + t.Fatalf("got %q; want %q", out, "hello") + } + + err = os.Remove(obj) + if err != nil { + t.Fatal(err) + } + + out, err = exec.Command("/usr/bin/env", "bash", "-x", sh).CombinedOutput() + if err != nil { + t.Fatalf("/bin/sh %s: %v\n%s", sh, err, out) + } + t.Logf("shell output:\n%s", out) + + out, err = exec.Command(obj).CombinedOutput() + if err != nil { + t.Fatal(err) + } + if string(out) != "hello" { + t.Fatalf("got %q; want %q", out, "hello") + } +} + +func TestParallelNumber(t *testing.T) { + for _, n := range [...]string{"-1", "0"} { + t.Run(n, func(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.runFail("test", "-parallel", n, "testdata/standalone_parallel_sub_test.go") + tg.grepBoth("-parallel can only be given", "go test -parallel with N<1 did not error") + }) + } +} + +func TestWrongGOOSErrorBeforeLoadError(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.setenv("GOOS", "windwos") + tg.runFail("build", "exclude") + tg.grepStderr("unsupported GOOS/GOARCH pair", "GOOS=windwos go build exclude did not report 'unsupported GOOS/GOARCH pair'") +} + +func TestUpxCompression(t *testing.T) { + if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { + t.Skipf("skipping upx test on %s/%s", runtime.GOOS, runtime.GOARCH) + } + + out, err := exec.Command("upx", "--version").CombinedOutput() + if err != nil { + t.Skip("skipping because upx is not available") + } + + // upx --version prints `upx ` in the first line of output: + // upx 3.94 + // [...] + re := regexp.MustCompile(`([[:digit:]]+)\.([[:digit:]]+)`) + upxVersion := re.FindStringSubmatch(string(out)) + if len(upxVersion) != 3 { + t.Errorf("bad upx version string: %s", upxVersion) + } + + major, err1 := strconv.Atoi(upxVersion[1]) + minor, err2 := strconv.Atoi(upxVersion[2]) + if err1 != nil || err2 != nil { + t.Errorf("bad upx version string: %s", upxVersion[0]) + } + + // Anything below 3.94 is known not to work with go binaries + if (major < 3) || (major == 3 && minor < 94) { + t.Skipf("skipping because upx version %v.%v is too old", major, minor) + } + + tg := testgo(t) + defer tg.cleanup() + + tg.tempFile("main.go", `package main; import "fmt"; func main() { fmt.Print("hello upx") }`) + src := tg.path("main.go") + obj := tg.path("main") + tg.run("build", "-o", obj, src) + + out, err = exec.Command("upx", obj).CombinedOutput() + if err != nil { + t.Logf("executing upx\n%s\n", out) + t.Fatalf("upx failed with %v", err) + } + + out, err = exec.Command(obj).CombinedOutput() + if err != nil { + t.Logf("%s", out) + t.Fatalf("running compressed go binary failed with error %s", err) + } + if string(out) != "hello upx" { + t.Fatalf("bad output from compressed go binary:\ngot %q; want %q", out, "hello upx") + } +} + +func TestGOTMPDIR(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.makeTempdir() + tg.setenv("GOTMPDIR", tg.tempdir) + tg.setenv("GOCACHE", "off") + + // complex/x is a trivial non-main package. + tg.run("build", "-work", "-x", "complex/w") + tg.grepStderr("WORK="+regexp.QuoteMeta(tg.tempdir), "did not work in $GOTMPDIR") +} + +func TestBuildCache(t *testing.T) { + if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") { + t.Skip("GODEBUG gocacheverify") + } + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.makeTempdir() + tg.setenv("GOCACHE", tg.tempdir) + + // complex/w is a trivial non-main package. + // It imports nothing, so there should be no Deps. + tg.run("list", "-f={{join .Deps \" \"}}", "complex/w") + tg.grepStdoutNot(".+", "complex/w depends on unexpected packages") + + tg.run("build", "-x", "complex/w") + tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler") + + tg.run("build", "-x", "complex/w") + tg.grepStderrNot(`[\\/]compile|gccgo`, "ran compiler incorrectly") + + tg.run("build", "-a", "-x", "complex/w") + tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler with -a") + + // complex is a non-trivial main package. + // the link step should not be cached. + tg.run("build", "-o", os.DevNull, "-x", "complex") + tg.grepStderr(`[\\/]link|gccgo`, "did not run linker") + + tg.run("build", "-o", os.DevNull, "-x", "complex") + tg.grepStderr(`[\\/]link|gccgo`, "did not run linker") +} + +func TestCacheOutput(t *testing.T) { + // Test that command output is cached and replayed too. + if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") { + t.Skip("GODEBUG gocacheverify") + } + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.makeTempdir() + tg.setenv("GOCACHE", tg.tempdir) + + tg.run("build", "-gcflags=-m", "errors") + stdout1 := tg.getStdout() + stderr1 := tg.getStderr() + + tg.run("build", "-gcflags=-m", "errors") + stdout2 := tg.getStdout() + stderr2 := tg.getStderr() + + if stdout2 != stdout1 || stderr2 != stderr1 { + t.Errorf("cache did not reproduce output:\n\nstdout1:\n%s\n\nstdout2:\n%s\n\nstderr1:\n%s\n\nstderr2:\n%s", + stdout1, stdout2, stderr1, stderr2) + } +} + +func TestCacheCoverage(t *testing.T) { + if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") { + t.Skip("GODEBUG gocacheverify") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.makeTempdir() + + tg.setenv("GOCACHE", filepath.Join(tg.tempdir, "c1")) + tg.run("test", "-cover", "strings") + tg.run("test", "-cover", "math", "strings") +} + +func TestIssue22588(t *testing.T) { + // Don't get confused by stderr coming from tools. + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + if _, err := os.Stat("/usr/bin/time"); err != nil { + t.Skip(err) + } + + tg.run("list", "-f={{.Stale}}", "runtime") + tg.run("list", "-toolexec=/usr/bin/time", "-f={{.Stale}}", "runtime") + tg.grepStdout("false", "incorrectly reported runtime as stale") +} + +func TestIssue22531(t *testing.T) { + if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") { + t.Skip("GODEBUG gocacheverify") + } + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.tempdir) + tg.setenv("GOCACHE", filepath.Join(tg.tempdir, "cache")) + tg.tempFile("src/m/main.go", "package main /* c1 */; func main() {}\n") + tg.run("install", "-x", "m") + tg.run("list", "-f", "{{.Stale}}", "m") + tg.grepStdout("false", "reported m as stale after install") + tg.run("tool", "buildid", filepath.Join(tg.tempdir, "bin/m"+exeSuffix)) + + // The link action ID did not include the full main build ID, + // even though the full main build ID is written into the + // eventual binary. That caused the following install to + // be a no-op, thinking the gofmt binary was up-to-date, + // even though .Stale could see it was not. + tg.tempFile("src/m/main.go", "package main /* c2 */; func main() {}\n") + tg.run("install", "-x", "m") + tg.run("list", "-f", "{{.Stale}}", "m") + tg.grepStdout("false", "reported m as stale after reinstall") + tg.run("tool", "buildid", filepath.Join(tg.tempdir, "bin/m"+exeSuffix)) +} + +func TestIssue22596(t *testing.T) { + if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") { + t.Skip("GODEBUG gocacheverify") + } + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOCACHE", filepath.Join(tg.tempdir, "cache")) + tg.tempFile("gopath1/src/p/p.go", "package p; func F(){}\n") + tg.tempFile("gopath2/src/p/p.go", "package p; func F(){}\n") + + tg.setenv("GOPATH", filepath.Join(tg.tempdir, "gopath1")) + tg.run("list", "-f={{.Target}}", "p") + target1 := strings.TrimSpace(tg.getStdout()) + tg.run("install", "p") + tg.wantNotStale("p", "", "p stale after install") + + tg.setenv("GOPATH", filepath.Join(tg.tempdir, "gopath2")) + tg.run("list", "-f={{.Target}}", "p") + target2 := strings.TrimSpace(tg.getStdout()) + tg.must(os.MkdirAll(filepath.Dir(target2), 0777)) + tg.must(copyFile(target1, target2, 0666)) + tg.wantStale("p", "build ID mismatch", "p not stale after copy from gopath1") + tg.run("install", "p") + tg.wantNotStale("p", "", "p stale after install2") +} + +func TestTestCache(t *testing.T) { + if strings.Contains(os.Getenv("GODEBUG"), "gocacheverify") { + t.Skip("GODEBUG gocacheverify") + } + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.tempdir) + tg.setenv("GOCACHE", filepath.Join(tg.tempdir, "cache")) + + // timeout here should not affect result being cached + // or being retrieved later. + tg.run("test", "-x", "-timeout=10s", "errors") + tg.grepStderr(`[\\/]compile|gccgo`, "did not run compiler") + tg.grepStderr(`[\\/]link|gccgo`, "did not run linker") + tg.grepStderr(`errors\.test`, "did not run test") + + tg.run("test", "-x", "errors") + tg.grepStdout(`ok \terrors\t\(cached\)`, "did not report cached result") + tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler") + tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker") + tg.grepStderrNot(`errors\.test`, "incorrectly ran test") + tg.grepStderrNot("DO NOT USE", "poisoned action status leaked") + + // Even very low timeouts do not disqualify cached entries. + tg.run("test", "-timeout=1ns", "-x", "errors") + tg.grepStderrNot(`errors\.test`, "incorrectly ran test") + + tg.run("clean", "-testcache") + tg.run("test", "-x", "errors") + tg.grepStderr(`errors\.test`, "did not run test") + + // The -p=1 in the commands below just makes the -x output easier to read. + + t.Log("\n\nINITIAL\n\n") + + tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n") + tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\nvar X = 1\n") + tg.tempFile("src/t/t1/t1_test.go", "package t\nimport \"testing\"\nfunc Test1(*testing.T) {}\n") + tg.tempFile("src/t/t2/t2_test.go", "package t\nimport _ \"p1\"\nimport \"testing\"\nfunc Test2(*testing.T) {}\n") + tg.tempFile("src/t/t3/t3_test.go", "package t\nimport \"p1\"\nimport \"testing\"\nfunc Test3(t *testing.T) {t.Log(p1.X)}\n") + tg.tempFile("src/t/t4/t4_test.go", "package t\nimport \"p2\"\nimport \"testing\"\nfunc Test4(t *testing.T) {t.Log(p2.X)}") + tg.run("test", "-x", "-v", "-short", "t/...") + + t.Log("\n\nREPEAT\n\n") + + tg.run("test", "-x", "-v", "-short", "t/...") + tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1") + tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2") + tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3") + tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4") + tg.grepStderrNot(`[\\/]compile|gccgo`, "incorrectly ran compiler") + tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker") + tg.grepStderrNot(`p[0-9]\.test`, "incorrectly ran test") + + t.Log("\n\nCOMMENT\n\n") + + // Changing the program text without affecting the compiled package + // should result in the package being rebuilt but nothing more. + tg.tempFile("src/p1/p1.go", "package p1\nvar X = 01\n") + tg.run("test", "-p=1", "-x", "-v", "-short", "t/...") + tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t1") + tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t2") + tg.grepStdout(`ok \tt/t3\t\(cached\)`, "did not cache t3") + tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t4") + tg.grepStderrNot(`([\\/]compile|gccgo).*t[0-9]_test\.go`, "incorrectly ran compiler") + tg.grepStderrNot(`[\\/]link|gccgo`, "incorrectly ran linker") + tg.grepStderrNot(`t[0-9]\.test.*test\.short`, "incorrectly ran test") + + t.Log("\n\nCHANGE\n\n") + + // Changing the actual package should have limited effects. + tg.tempFile("src/p1/p1.go", "package p1\nvar X = 02\n") + tg.run("test", "-p=1", "-x", "-v", "-short", "t/...") + + // p2 should have been rebuilt. + tg.grepStderr(`([\\/]compile|gccgo).*p2.go`, "did not recompile p2") + + // t1 does not import anything, should not have been rebuilt. + tg.grepStderrNot(`([\\/]compile|gccgo).*t1_test.go`, "incorrectly recompiled t1") + tg.grepStderrNot(`([\\/]link|gccgo).*t1_test`, "incorrectly relinked t1_test") + tg.grepStdout(`ok \tt/t1\t\(cached\)`, "did not cache t/t1") + + // t2 imports p1 and must be rebuilt and relinked, + // but the change should not have any effect on the test binary, + // so the test should not have been rerun. + tg.grepStderr(`([\\/]compile|gccgo).*t2_test.go`, "did not recompile t2") + tg.grepStderr(`([\\/]link|gccgo).*t2\.test`, "did not relink t2_test") + tg.grepStdout(`ok \tt/t2\t\(cached\)`, "did not cache t/t2") + + // t3 imports p1, and changing X changes t3's test binary. + tg.grepStderr(`([\\/]compile|gccgo).*t3_test.go`, "did not recompile t3") + tg.grepStderr(`([\\/]link|gccgo).*t3\.test`, "did not relink t3_test") + tg.grepStderr(`t3\.test.*-test.short`, "did not rerun t3_test") + tg.grepStdoutNot(`ok \tt/t3\t\(cached\)`, "reported cached t3_test result") + + // t4 imports p2, but p2 did not change, so t4 should be relinked, not recompiled, + // and not rerun. + tg.grepStderrNot(`([\\/]compile|gccgo).*t4_test.go`, "incorrectly recompiled t4") + tg.grepStderr(`([\\/]link|gccgo).*t4\.test`, "did not relink t4_test") + tg.grepStdout(`ok \tt/t4\t\(cached\)`, "did not cache t/t4") +} + +func TestTestVet(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + + tg.tempFile("p1_test.go", ` + package p + import "testing" + func Test(t *testing.T) { + t.Logf("%d") // oops + } + `) + + tg.runFail("test", filepath.Join(tg.tempdir, "p1_test.go")) + tg.grepStderr(`Logf format %d`, "did not diagnose bad Logf") + tg.run("test", "-vet=off", filepath.Join(tg.tempdir, "p1_test.go")) + tg.grepStdout(`^ok`, "did not print test summary") + + tg.tempFile("p1.go", ` + package p + import "fmt" + func F() { + fmt.Printf("%d") // oops + } + `) + tg.runFail("test", filepath.Join(tg.tempdir, "p1.go")) + tg.grepStderr(`Printf format %d`, "did not diagnose bad Printf") + tg.run("test", "-x", "-vet=shift", filepath.Join(tg.tempdir, "p1.go")) + tg.grepStderr(`[\\/]vet.*-shift`, "did not run vet with -shift") + tg.grepStdout(`\[no test files\]`, "did not print test summary") + tg.run("test", "-vet=off", filepath.Join(tg.tempdir, "p1.go")) + tg.grepStdout(`\[no test files\]`, "did not print test summary") + + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.run("test", "vetcycle") // must not fail; #22890 +} + +func TestInstallDeps(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOPATH", tg.tempdir) + + tg.tempFile("src/p1/p1.go", "package p1\nvar X = 1\n") + tg.tempFile("src/p2/p2.go", "package p2\nimport _ \"p1\"\n") + tg.tempFile("src/main1/main.go", "package main\nimport _ \"p2\"\nfunc main() {}\n") + + tg.run("list", "-f={{.Target}}", "p1") + p1 := strings.TrimSpace(tg.getStdout()) + tg.run("list", "-f={{.Target}}", "p2") + p2 := strings.TrimSpace(tg.getStdout()) + tg.run("list", "-f={{.Target}}", "main1") + main1 := strings.TrimSpace(tg.getStdout()) + + tg.run("install", "main1") + + tg.mustExist(main1) + tg.mustNotExist(p2) + tg.mustNotExist(p1) + + tg.run("install", "p2") + tg.mustExist(p2) + tg.mustNotExist(p1) + + // don't let install -i overwrite runtime + tg.wantNotStale("runtime", "", "must be non-stale before install -i") + + tg.run("install", "-i", "main1") + tg.mustExist(p1) + tg.must(os.Remove(p1)) + + tg.run("install", "-i", "p2") + tg.mustExist(p1) +} + +func TestFmtLoadErrors(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + tg.runFail("fmt", "does-not-exist") + tg.run("fmt", "-n", "exclude") +} + +func TestRelativePkgdir(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.makeTempdir() + tg.setenv("GOCACHE", "off") + tg.cd(tg.tempdir) + + tg.run("build", "-i", "-pkgdir=.", "runtime") +} + +func TestGcflagsPatterns(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.setenv("GOPATH", "") + tg.setenv("GOCACHE", "off") + + tg.run("build", "-v", "-gcflags= \t\r\n -e", "fmt") + tg.grepStderr("fmt", "did not rebuild fmt") + tg.grepStderrNot("reflect", "incorrectly rebuilt reflect") + + tg.run("build", "-v", "-gcflags=-e", "fmt", "reflect") + tg.grepStderr("fmt", "did not rebuild fmt") + tg.grepStderr("reflect", "did not rebuild reflect") + tg.grepStderrNot("runtime", "incorrectly rebuilt runtime") + + tg.run("build", "-x", "-v", "-gcflags= \t\r\n reflect \t\r\n = \t\r\n -N", "fmt") + tg.grepStderr("fmt", "did not rebuild fmt") + tg.grepStderr("reflect", "did not rebuild reflect") + tg.grepStderr("compile.* -N .*-p reflect", "did not build reflect with -N flag") + tg.grepStderrNot("compile.* -N .*-p fmt", "incorrectly built fmt with -N flag") + + tg.run("test", "-c", "-n", "-gcflags=-N", "strings") + tg.grepStderr("compile.* -N .*compare_test.go", "did not build strings_test package with -N flag") + + tg.run("test", "-c", "-n", "-gcflags=strings=-N", "strings") + tg.grepStderr("compile.* -N .*compare_test.go", "did not build strings_test package with -N flag") +} + +func TestGoTestMinusN(t *testing.T) { + // Intent here is to verify that 'go test -n' works without crashing. + // This reuses flag_test.go, but really any test would do. + tg := testgo(t) + defer tg.cleanup() + tg.run("test", "testdata/flag_test.go", "-n", "-args", "-v=7") +} + +func TestGoTestJSON(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.makeTempdir() + tg.setenv("GOCACHE", tg.tempdir) + tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata")) + + // It would be nice to test that the output is interlaced + // but it seems to be impossible to do that in a short test + // that isn't also flaky. Just check that we get JSON output. + tg.run("test", "-json", "-short", "-v", "errors", "empty/pkg", "skipper") + tg.grepStdout(`"Package":"errors"`, "did not see JSON output") + tg.grepStdout(`"Action":"run"`, "did not see JSON output") + + tg.grepStdout(`"Action":"output","Package":"empty/pkg","Output":".*no test files`, "did not see no test files print") + tg.grepStdout(`"Action":"skip","Package":"empty/pkg"`, "did not see skip") + + tg.grepStdout(`"Action":"output","Package":"skipper","Test":"Test","Output":"--- SKIP:`, "did not see SKIP output") + tg.grepStdout(`"Action":"skip","Package":"skipper","Test":"Test"`, "did not see skip result for Test") + + tg.run("test", "-json", "-bench=NONE", "-short", "-v", "errors") + tg.grepStdout(`"Package":"errors"`, "did not see JSON output") + tg.grepStdout(`"Action":"run"`, "did not see JSON output") + + tg.run("test", "-o", filepath.Join(tg.tempdir, "errors.test.exe"), "-c", "errors") + tg.run("tool", "test2json", "-p", "errors", filepath.Join(tg.tempdir, "errors.test.exe"), "-test.v", "-test.short") + tg.grepStdout(`"Package":"errors"`, "did not see JSON output") + tg.grepStdout(`"Action":"run"`, "did not see JSON output") + tg.grepStdout(`\{"Action":"pass","Package":"errors"\}`, "did not see final pass") +} + +func TestFailFast(t *testing.T) { + tg := testgo(t) + defer tg.cleanup() + + tests := []struct { + run string + failfast bool + nfail int + }{ + {"TestFailingA", true, 1}, + {"TestFailing[AB]", true, 1}, + {"TestFailing[AB]", false, 2}, + // mix with non-failing tests: + {"TestA|TestFailing[AB]", true, 1}, + {"TestA|TestFailing[AB]", false, 2}, + // mix with parallel tests: + {"TestFailingB|TestParallelFailingA", true, 2}, + {"TestFailingB|TestParallelFailingA", false, 2}, + {"TestFailingB|TestParallelFailing[AB]", true, 3}, + {"TestFailingB|TestParallelFailing[AB]", false, 3}, + // mix with parallel sub-tests + {"TestFailingB|TestParallelFailing[AB]|TestParallelFailingSubtestsA", true, 3}, + {"TestFailingB|TestParallelFailing[AB]|TestParallelFailingSubtestsA", false, 5}, + {"TestParallelFailingSubtestsA", true, 1}, + // only parallels: + {"TestParallelFailing[AB]", false, 2}, + // non-parallel subtests: + {"TestFailingSubtestsA", true, 1}, + {"TestFailingSubtestsA", false, 2}, + } + + for _, tt := range tests { + t.Run(tt.run, func(t *testing.T) { + tg.runFail("test", "./testdata/src/failfast_test.go", "-run="+tt.run, "-failfast="+strconv.FormatBool(tt.failfast)) + + nfail := strings.Count(tg.getStdout(), "FAIL - ") + + if nfail != tt.nfail { + t.Errorf("go test -run=%s -failfast=%t printed %d FAILs, want %d", tt.run, tt.failfast, nfail, tt.nfail) + } + }) + } +} diff --git a/src/cmd/go/go_windows_test.go b/src/cmd/go/go_windows_test.go index d8d04aaf497..aa68a195802 100644 --- a/src/cmd/go/go_windows_test.go +++ b/src/cmd/go/go_windows_test.go @@ -5,12 +5,14 @@ package main import ( + "fmt" "internal/testenv" "io/ioutil" "os" "os/exec" "path/filepath" "strings" + "syscall" "testing" ) @@ -54,3 +56,82 @@ func TestAbsolutePath(t *testing.T) { t.Fatalf("wrong output found: %v %v", err, string(output)) } } + +func isWindowsXP(t *testing.T) bool { + v, err := syscall.GetVersion() + if err != nil { + t.Fatalf("GetVersion failed: %v", err) + } + major := byte(v) + return major < 6 +} + +func runIcacls(t *testing.T, args ...string) string { + t.Helper() + out, err := exec.Command("icacls", args...).CombinedOutput() + if err != nil { + t.Fatalf("icacls failed: %v\n%v", err, string(out)) + } + return string(out) +} + +func runGetACL(t *testing.T, path string) string { + t.Helper() + cmd := fmt.Sprintf(`Get-Acl "%s" | Select -expand AccessToString`, path) + out, err := exec.Command("powershell", "-Command", cmd).CombinedOutput() + if err != nil { + t.Fatalf("Get-Acl failed: %v\n%v", err, string(out)) + } + return string(out) +} + +// For issue 22343: verify that executable file created by "go build" command +// has discretionary access control list (DACL) set as if the file +// was created in the destination directory. +func TestACL(t *testing.T) { + if isWindowsXP(t) { + t.Skip("Windows XP does not have powershell command") + } + + tmpdir, err := ioutil.TempDir("", "TestACL") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + newtmpdir := filepath.Join(tmpdir, "tmp") + err = os.Mkdir(newtmpdir, 0777) + if err != nil { + t.Fatal(err) + } + + // When TestACL/tmp directory is created, it will have + // the same security attributes as TestACL. + // Add Guest account full access to TestACL/tmp - this + // will make all files created in TestACL/tmp have different + // security attributes to the files created in TestACL. + runIcacls(t, newtmpdir, + "/grant", "guest:(oi)(ci)f", // add Guest user to have full access + ) + + src := filepath.Join(tmpdir, "main.go") + err = ioutil.WriteFile(src, []byte("package main; func main() { }\n"), 0644) + if err != nil { + t.Fatal(err) + } + exe := filepath.Join(tmpdir, "main.exe") + cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", exe, src) + cmd.Env = append(os.Environ(), + "TMP="+newtmpdir, + "TEMP="+newtmpdir, + ) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go command failed: %v\n%v", err, string(out)) + } + + // exe file is expected to have the same security attributes as the src. + if got, expected := runGetACL(t, exe), runGetACL(t, src); got != expected { + t.Fatalf("expected Get-Acl output of \n%v\n, got \n%v\n", expected, got) + } +} diff --git a/src/cmd/go/internal/base/base.go b/src/cmd/go/internal/base/base.go index aff33f70d8d..286efbc0410 100644 --- a/src/cmd/go/internal/base/base.go +++ b/src/cmd/go/internal/base/base.go @@ -62,8 +62,8 @@ func (c *Command) Name() string { } func (c *Command) Usage() { - fmt.Fprintf(os.Stderr, "usage: %s\n\n", c.UsageLine) - fmt.Fprintf(os.Stderr, "%s\n", strings.TrimSpace(c.Long)) + fmt.Fprintf(os.Stderr, "usage: %s\n", c.UsageLine) + fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", c.Name()) os.Exit(2) } diff --git a/src/cmd/go/internal/base/path.go b/src/cmd/go/internal/base/path.go index 4f12fa8c28c..7a51181c973 100644 --- a/src/cmd/go/internal/base/path.go +++ b/src/cmd/go/internal/base/path.go @@ -44,28 +44,6 @@ func RelPaths(paths []string) []string { return out } -// FilterDotUnderscoreFiles returns a slice containing all elements -// of path whose base name doesn't begin with "." or "_". -func FilterDotUnderscoreFiles(path []string) []string { - var out []string // lazily initialized - for i, p := range path { - base := filepath.Base(p) - if strings.HasPrefix(base, ".") || strings.HasPrefix(base, "_") { - if out == nil { - out = append(make([]string, 0, len(path)), path[:i]...) - } - continue - } - if out != nil { - out = append(out, p) - } - } - if out == nil { - return path - } - return out -} - // IsTestFile reports whether the source file is a set of tests and should therefore // be excluded from coverage analysis. func IsTestFile(file string) bool { diff --git a/src/cmd/go/internal/base/tool.go b/src/cmd/go/internal/base/tool.go index c907772c00a..d0da65e03ce 100644 --- a/src/cmd/go/internal/base/tool.go +++ b/src/cmd/go/internal/base/tool.go @@ -36,18 +36,9 @@ func Tool(toolName string) string { } // Give a nice message if there is no tool with that name. if _, err := os.Stat(toolPath); err != nil { - if isInGoToolsRepo(toolName) { - fmt.Fprintf(os.Stderr, "go tool: no such tool %q; to install:\n\tgo get golang.org/x/tools/cmd/%s\n", toolName, toolName) - } else { - fmt.Fprintf(os.Stderr, "go tool: no such tool %q\n", toolName) - } + fmt.Fprintf(os.Stderr, "go tool: no such tool %q\n", toolName) SetExitStatus(2) Exit() } return toolPath } - -// TODO: Delete. -func isInGoToolsRepo(toolName string) bool { - return false -} diff --git a/src/cmd/go/internal/cache/cache.go b/src/cmd/go/internal/cache/cache.go new file mode 100644 index 00000000000..311cd89f33d --- /dev/null +++ b/src/cmd/go/internal/cache/cache.go @@ -0,0 +1,370 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cache implements a build artifact cache. +package cache + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +// An ActionID is a cache action key, the hash of a complete description of a +// repeatable computation (command line, environment variables, +// input file contents, executable contents). +type ActionID [HashSize]byte + +// An OutputID is a cache output key, the hash of an output of a computation. +type OutputID [HashSize]byte + +// A Cache is a package cache, backed by a file system directory tree. +type Cache struct { + dir string + log *os.File + now func() time.Time +} + +// Open opens and returns the cache in the given directory. +// +// It is safe for multiple processes on a single machine to use the +// same cache directory in a local file system simultaneously. +// They will coordinate using operating system file locks and may +// duplicate effort but will not corrupt the cache. +// +// However, it is NOT safe for multiple processes on different machines +// to share a cache directory (for example, if the directory were stored +// in a network file system). File locking is notoriously unreliable in +// network file systems and may not suffice to protect the cache. +// +func Open(dir string) (*Cache, error) { + info, err := os.Stat(dir) + if err != nil { + return nil, err + } + if !info.IsDir() { + return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} + } + for i := 0; i < 256; i++ { + name := filepath.Join(dir, fmt.Sprintf("%02x", i)) + if err := os.MkdirAll(name, 0777); err != nil { + return nil, err + } + } + f, err := os.OpenFile(filepath.Join(dir, "log.txt"), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + c := &Cache{ + dir: dir, + log: f, + now: time.Now, + } + return c, nil +} + +// fileName returns the name of the file corresponding to the given id. +func (c *Cache) fileName(id [HashSize]byte, key string) string { + return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key) +} + +var errMissing = errors.New("cache entry not found") + +const ( + // action entry file is "v1 \n" + hexSize = HashSize * 2 + entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1 +) + +// verify controls whether to run the cache in verify mode. +// In verify mode, the cache always returns errMissing from Get +// but then double-checks in Put that the data being written +// exactly matches any existing entry. This provides an easy +// way to detect program behavior that would have been different +// had the cache entry been returned from Get. +// +// verify is enabled by setting the environment variable +// GODEBUG=gocacheverify=1. +var verify = false + +func init() { initEnv() } + +func initEnv() { + verify = false + debugHash = false + debug := strings.Split(os.Getenv("GODEBUG"), ",") + for _, f := range debug { + if f == "gocacheverify=1" { + verify = true + } + if f == "gocachehash=1" { + debugHash = true + } + } +} + +// Get looks up the action ID in the cache, +// returning the corresponding output ID and file size, if any. +// Note that finding an output ID does not guarantee that the +// saved file for that output ID is still available. +func (c *Cache) Get(id ActionID) (Entry, error) { + if verify { + return Entry{}, errMissing + } + return c.get(id) +} + +type Entry struct { + OutputID OutputID + Size int64 + Time time.Time +} + +// get is Get but does not respect verify mode, so that Put can use it. +func (c *Cache) get(id ActionID) (Entry, error) { + missing := func() (Entry, error) { + fmt.Fprintf(c.log, "%d miss %x\n", c.now().Unix(), id) + return Entry{}, errMissing + } + f, err := os.Open(c.fileName(id, "a")) + if err != nil { + return missing() + } + defer f.Close() + entry := make([]byte, entrySize+1) // +1 to detect whether f is too long + if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF { + return missing() + } + if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { + return missing() + } + eid, entry := entry[3:3+hexSize], entry[3+hexSize:] + eout, entry := entry[1:1+hexSize], entry[1+hexSize:] + esize, entry := entry[1:1+20], entry[1+20:] + etime, entry := entry[1:1+20], entry[1+20:] + var buf [HashSize]byte + if _, err := hex.Decode(buf[:], eid); err != nil || buf != id { + return missing() + } + if _, err := hex.Decode(buf[:], eout); err != nil { + return missing() + } + i := 0 + for i < len(esize) && esize[i] == ' ' { + i++ + } + size, err := strconv.ParseInt(string(esize[i:]), 10, 64) + if err != nil || size < 0 { + return missing() + } + i = 0 + for i < len(etime) && etime[i] == ' ' { + i++ + } + tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) + if err != nil || size < 0 { + return missing() + } + + fmt.Fprintf(c.log, "%d get %x\n", c.now().Unix(), id) + + // Best-effort attempt to update mtime on file, + // so that mtime reflects cache access time. + os.Chtimes(c.fileName(id, "a"), c.now(), c.now()) + + return Entry{buf, size, time.Unix(0, tm)}, nil +} + +// GetBytes looks up the action ID in the cache and returns +// the corresponding output bytes. +// GetBytes should only be used for data that can be expected to fit in memory. +func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { + entry, err := c.Get(id) + if err != nil { + return nil, entry, err + } + data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID)) + if sha256.Sum256(data) != entry.OutputID { + return nil, entry, errMissing + } + return data, entry, nil +} + +// OutputFile returns the name of the cache file storing output with the given OutputID. +func (c *Cache) OutputFile(out OutputID) string { + file := c.fileName(out, "d") + + // Best-effort attempt to update mtime on file, + // so that mtime reflects cache access time. + os.Chtimes(file, c.now(), c.now()) + + return file +} + +// putIndexEntry adds an entry to the cache recording that executing the action +// with the given id produces an output with the given output id (hash) and size. +func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error { + // Note: We expect that for one reason or another it may happen + // that repeating an action produces a different output hash + // (for example, if the output contains a time stamp or temp dir name). + // While not ideal, this is also not a correctness problem, so we + // don't make a big deal about it. In particular, we leave the action + // cache entries writable specifically so that they can be overwritten. + // + // Setting GODEBUG=gocacheverify=1 does make a big deal: + // in verify mode we are double-checking that the cache entries + // are entirely reproducible. As just noted, this may be unrealistic + // in some cases but the check is also useful for shaking out real bugs. + entry := []byte(fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())) + if verify && allowVerify { + old, err := c.get(id) + if err == nil && (old.OutputID != out || old.Size != size) { + // panic to show stack trace, so we can see what code is generating this cache entry. + msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size) + panic(msg) + } + } + file := c.fileName(id, "a") + if err := ioutil.WriteFile(file, entry, 0666); err != nil { + os.Remove(file) + return err + } + + fmt.Fprintf(c.log, "%d put %x %x %d\n", c.now().Unix(), id, out, size) + return nil +} + +// Put stores the given output in the cache as the output for the action ID. +// It may read file twice. The content of file must not change between the two passes. +func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { + return c.put(id, file, true) +} + +// PutNoVerify is like Put but disables the verify check +// when GODEBUG=goverifycache=1 is set. +// It is meant for data that is OK to cache but that we expect to vary slightly from run to run, +// like test output containing times and the like. +func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { + return c.put(id, file, false) +} + +func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) { + // Compute output ID. + h := sha256.New() + if _, err := file.Seek(0, 0); err != nil { + return OutputID{}, 0, err + } + size, err := io.Copy(h, file) + if err != nil { + return OutputID{}, 0, err + } + var out OutputID + h.Sum(out[:0]) + + // Copy to cached output file (if not already present). + if err := c.copyFile(file, out, size); err != nil { + return out, size, err + } + + // Add to cache index. + return out, size, c.putIndexEntry(id, out, size, allowVerify) +} + +// PutBytes stores the given bytes in the cache as the output for the action ID. +func (c *Cache) PutBytes(id ActionID, data []byte) error { + _, _, err := c.Put(id, bytes.NewReader(data)) + return err +} + +// copyFile copies file into the cache, expecting it to have the given +// output ID and size, if that file is not present already. +func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { + name := c.fileName(out, "d") + info, err := os.Stat(name) + if err == nil && info.Size() == size { + // Check hash. + if f, err := os.Open(name); err == nil { + h := sha256.New() + io.Copy(h, f) + f.Close() + var out2 OutputID + h.Sum(out2[:0]) + if out == out2 { + return nil + } + } + // Hash did not match. Fall through and rewrite file. + } + + // Copy file to cache directory. + mode := os.O_RDWR | os.O_CREATE + if err == nil && info.Size() > size { // shouldn't happen but fix in case + mode |= os.O_TRUNC + } + f, err := os.OpenFile(name, mode, 0666) + if err != nil { + return err + } + defer f.Close() + if size == 0 { + // File now exists with correct size. + // Only one possible zero-length file, so contents are OK too. + // Early return here makes sure there's a "last byte" for code below. + return nil + } + + // From here on, if any of the I/O writing the file fails, + // we make a best-effort attempt to truncate the file f + // before returning, to avoid leaving bad bytes in the file. + + // Copy file to f, but also into h to double-check hash. + if _, err := file.Seek(0, 0); err != nil { + f.Truncate(0) + return err + } + h := sha256.New() + w := io.MultiWriter(f, h) + if _, err := io.CopyN(w, file, size-1); err != nil { + f.Truncate(0) + return err + } + // Check last byte before writing it; writing it will make the size match + // what other processes expect to find and might cause them to start + // using the file. + buf := make([]byte, 1) + if _, err := file.Read(buf); err != nil { + f.Truncate(0) + return err + } + h.Write(buf) + sum := h.Sum(nil) + if !bytes.Equal(sum, out[:]) { + f.Truncate(0) + return fmt.Errorf("file content changed underfoot") + } + + // Commit cache file entry. + if _, err := f.Write(buf); err != nil { + f.Truncate(0) + return err + } + if err := f.Close(); err != nil { + // Data might not have been written, + // but file may look like it is the right size. + // To be extra careful, remove cached file. + os.Remove(name) + return err + } + + return nil +} diff --git a/src/cmd/go/internal/cache/cache_test.go b/src/cmd/go/internal/cache/cache_test.go new file mode 100644 index 00000000000..84749a6965c --- /dev/null +++ b/src/cmd/go/internal/cache/cache_test.go @@ -0,0 +1,198 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "encoding/binary" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" +) + +func init() { + verify = false // even if GODEBUG is set +} + +func TestBasic(t *testing.T) { + dir, err := ioutil.TempDir("", "cachetest-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + _, err = Open(filepath.Join(dir, "notexist")) + if err == nil { + t.Fatal(`Open("tmp/notexist") succeeded, want failure`) + } + + cdir := filepath.Join(dir, "c1") + if err := os.Mkdir(cdir, 0777); err != nil { + t.Fatal(err) + } + + c1, err := Open(cdir) + if err != nil { + t.Fatalf("Open(c1) (create): %v", err) + } + if err := c1.putIndexEntry(dummyID(1), dummyID(12), 13, true); err != nil { + t.Fatalf("addIndexEntry: %v", err) + } + if err := c1.putIndexEntry(dummyID(1), dummyID(2), 3, true); err != nil { // overwrite entry + t.Fatalf("addIndexEntry: %v", err) + } + if entry, err := c1.Get(dummyID(1)); err != nil || entry.OutputID != dummyID(2) || entry.Size != 3 { + t.Fatalf("c1.Get(1) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(2), 3) + } + + c2, err := Open(cdir) + if err != nil { + t.Fatalf("Open(c2) (reuse): %v", err) + } + if entry, err := c2.Get(dummyID(1)); err != nil || entry.OutputID != dummyID(2) || entry.Size != 3 { + t.Fatalf("c2.Get(1) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(2), 3) + } + if err := c2.putIndexEntry(dummyID(2), dummyID(3), 4, true); err != nil { + t.Fatalf("addIndexEntry: %v", err) + } + if entry, err := c1.Get(dummyID(2)); err != nil || entry.OutputID != dummyID(3) || entry.Size != 4 { + t.Fatalf("c1.Get(2) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(3), 4) + } +} + +func TestGrowth(t *testing.T) { + dir, err := ioutil.TempDir("", "cachetest-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + c, err := Open(dir) + if err != nil { + t.Fatalf("Open: %v", err) + } + + n := 10000 + if testing.Short() { + n = 1000 + } + + for i := 0; i < n; i++ { + if err := c.putIndexEntry(dummyID(i), dummyID(i*99), int64(i)*101, true); err != nil { + t.Fatalf("addIndexEntry: %v", err) + } + id := ActionID(dummyID(i)) + entry, err := c.Get(id) + if err != nil { + t.Fatalf("Get(%x): %v", id, err) + } + if entry.OutputID != dummyID(i*99) || entry.Size != int64(i)*101 { + t.Errorf("Get(%x) = %x, %d, want %x, %d", id, entry.OutputID, entry.Size, dummyID(i*99), int64(i)*101) + } + } + for i := 0; i < n; i++ { + id := ActionID(dummyID(i)) + entry, err := c.Get(id) + if err != nil { + t.Fatalf("Get2(%x): %v", id, err) + } + if entry.OutputID != dummyID(i*99) || entry.Size != int64(i)*101 { + t.Errorf("Get2(%x) = %x, %d, want %x, %d", id, entry.OutputID, entry.Size, dummyID(i*99), int64(i)*101) + } + } +} + +func TestVerifyPanic(t *testing.T) { + os.Setenv("GODEBUG", "gocacheverify=1") + initEnv() + defer func() { + os.Unsetenv("GODEBUG") + verify = false + }() + + if !verify { + t.Fatal("initEnv did not set verify") + } + + dir, err := ioutil.TempDir("", "cachetest-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + c, err := Open(dir) + if err != nil { + t.Fatalf("Open: %v", err) + } + + id := ActionID(dummyID(1)) + if err := c.PutBytes(id, []byte("abc")); err != nil { + t.Fatal(err) + } + + defer func() { + if err := recover(); err != nil { + t.Log(err) + return + } + }() + c.PutBytes(id, []byte("def")) + t.Fatal("mismatched Put did not panic in verify mode") +} + +func TestCacheLog(t *testing.T) { + dir, err := ioutil.TempDir("", "cachetest-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + c, err := Open(dir) + if err != nil { + t.Fatalf("Open: %v", err) + } + c.now = func() time.Time { return time.Unix(1e9, 0) } + + id := ActionID(dummyID(1)) + c.Get(id) + c.PutBytes(id, []byte("abc")) + c.Get(id) + + c, err = Open(dir) + if err != nil { + t.Fatalf("Open #2: %v", err) + } + c.now = func() time.Time { return time.Unix(1e9+1, 0) } + c.Get(id) + + id2 := ActionID(dummyID(2)) + c.Get(id2) + c.PutBytes(id2, []byte("abc")) + c.Get(id2) + c.Get(id) + + data, err := ioutil.ReadFile(filepath.Join(dir, "log.txt")) + if err != nil { + t.Fatal(err) + } + want := `1000000000 miss 0100000000000000000000000000000000000000000000000000000000000000 +1000000000 put 0100000000000000000000000000000000000000000000000000000000000000 ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad 3 +1000000000 get 0100000000000000000000000000000000000000000000000000000000000000 +1000000001 get 0100000000000000000000000000000000000000000000000000000000000000 +1000000001 miss 0200000000000000000000000000000000000000000000000000000000000000 +1000000001 put 0200000000000000000000000000000000000000000000000000000000000000 ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad 3 +1000000001 get 0200000000000000000000000000000000000000000000000000000000000000 +1000000001 get 0100000000000000000000000000000000000000000000000000000000000000 +` + if string(data) != want { + t.Fatalf("log:\n%s\nwant:\n%s", string(data), want) + } +} + +func dummyID(x int) [HashSize]byte { + var out [HashSize]byte + binary.LittleEndian.PutUint64(out[:], uint64(x)) + return out +} diff --git a/src/cmd/go/internal/cache/default.go b/src/cmd/go/internal/cache/default.go new file mode 100644 index 00000000000..6411ec7a563 --- /dev/null +++ b/src/cmd/go/internal/cache/default.go @@ -0,0 +1,100 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "cmd/go/internal/base" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sync" +) + +// Default returns the default cache to use, or nil if no cache should be used. +func Default() *Cache { + defaultOnce.Do(initDefaultCache) + return defaultCache +} + +var ( + defaultOnce sync.Once + defaultCache *Cache +) + +// cacheREADME is a message stored in a README in the cache directory. +// Because the cache lives outside the normal Go trees, we leave the +// README as a courtesy to explain where it came from. +const cacheREADME = `This directory holds cached build artifacts from the Go build system. +Run "go clean -cache" if the directory is getting too large. +See golang.org to learn more about Go. +` + +// initDefaultCache does the work of finding the default cache +// the first time Default is called. +func initDefaultCache() { + dir := DefaultDir() + if dir == "off" { + return + } + if err := os.MkdirAll(dir, 0777); err != nil { + base.Fatalf("initializing cache in $GOCACHE: %s", err) + } + if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { + // Best effort. + ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666) + } + + c, err := Open(dir) + if err != nil { + base.Fatalf("initializing cache in $GOCACHE: %s", err) + } + defaultCache = c +} + +// DefaultDir returns the effective GOCACHE setting. +// It returns "off" if the cache is disabled. +func DefaultDir() string { + dir := os.Getenv("GOCACHE") + if dir != "" { + return dir + } + + // Compute default location. + // TODO(rsc): This code belongs somewhere else, + // like maybe ioutil.CacheDir or os.CacheDir. + switch runtime.GOOS { + case "windows": + dir = os.Getenv("LocalAppData") + + case "darwin": + dir = os.Getenv("HOME") + if dir == "" { + return "off" + } + dir += "/Library/Caches" + + case "plan9": + dir = os.Getenv("home") + if dir == "" { + return "off" + } + // Plan 9 has no established per-user cache directory, + // but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix. + dir += "/lib/cache" + + default: // Unix + // https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + dir = os.Getenv("XDG_CACHE_HOME") + if dir == "" { + dir = os.Getenv("HOME") + if dir == "" { + return "off" + } + dir += "/.cache" + } + } + return filepath.Join(dir, "go-build") +} diff --git a/src/cmd/go/internal/cache/hash.go b/src/cmd/go/internal/cache/hash.go new file mode 100644 index 00000000000..0e45e7db547 --- /dev/null +++ b/src/cmd/go/internal/cache/hash.go @@ -0,0 +1,174 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bytes" + "crypto/sha256" + "fmt" + "hash" + "io" + "os" + "runtime" + "sync" +) + +var debugHash = false // set when GODEBUG=gocachehash=1 + +// HashSize is the number of bytes in a hash. +const HashSize = 32 + +// A Hash provides access to the canonical hash function used to index the cache. +// The current implementation uses salted SHA256, but clients must not assume this. +type Hash struct { + h hash.Hash + name string // for debugging + buf *bytes.Buffer // for verify +} + +// hashSalt is a salt string added to the beginning of every hash +// created by NewHash. Using the Go version makes sure that different +// versions of the go command (or even different Git commits during +// work on the development branch) do not address the same cache +// entries, so that a bug in one version does not affect the execution +// of other versions. This salt will result in additional ActionID files +// in the cache, but not additional copies of the large output files, +// which are still addressed by unsalted SHA256. +var hashSalt = []byte(runtime.Version()) + +// Subkey returns an action ID corresponding to mixing a parent +// action ID with a string description of the subkey. +func Subkey(parent ActionID, desc string) ActionID { + h := sha256.New() + h.Write([]byte("subkey:")) + h.Write(parent[:]) + h.Write([]byte(desc)) + var out ActionID + h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out) + } + if verify { + hashDebug.Lock() + hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc) + hashDebug.Unlock() + } + return out +} + +// NewHash returns a new Hash. +// The caller is expected to Write data to it and then call Sum. +func NewHash(name string) *Hash { + h := &Hash{h: sha256.New(), name: name} + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name) + } + h.Write(hashSalt) + if verify { + h.buf = new(bytes.Buffer) + } + return h +} + +// Write writes data to the running hash. +func (h *Hash) Write(b []byte) (int, error) { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b) + } + if h.buf != nil { + h.buf.Write(b) + } + return h.h.Write(b) +} + +// Sum returns the hash of the data written previously. +func (h *Hash) Sum() [HashSize]byte { + var out [HashSize]byte + h.h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out) + } + if h.buf != nil { + hashDebug.Lock() + if hashDebug.m == nil { + hashDebug.m = make(map[[HashSize]byte]string) + } + hashDebug.m[out] = h.buf.String() + hashDebug.Unlock() + } + return out +} + +// In GODEBUG=gocacheverify=1 mode, +// hashDebug holds the input to every computed hash ID, +// so that we can work backward from the ID involved in a +// cache entry mismatch to a description of what should be there. +var hashDebug struct { + sync.Mutex + m map[[HashSize]byte]string +} + +// reverseHash returns the input used to compute the hash id. +func reverseHash(id [HashSize]byte) string { + hashDebug.Lock() + s := hashDebug.m[id] + hashDebug.Unlock() + return s +} + +var hashFileCache struct { + sync.Mutex + m map[string][HashSize]byte +} + +// HashFile returns the hash of the named file. +// It caches repeated lookups for a given file, +// and the cache entry for a file can be initialized +// using SetFileHash. +// The hash used by FileHash is not the same as +// the hash used by NewHash. +func FileHash(file string) ([HashSize]byte, error) { + hashFileCache.Lock() + out, ok := hashFileCache.m[file] + hashFileCache.Unlock() + + if ok { + return out, nil + } + + h := sha256.New() + f, err := os.Open(file) + if err != nil { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) + } + return [HashSize]byte{}, err + } + _, err = io.Copy(h, f) + f.Close() + if err != nil { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) + } + return [HashSize]byte{}, err + } + h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out) + } + + SetFileHash(file, out) + return out, nil +} + +// SetFileHash sets the hash returned by FileHash for file. +func SetFileHash(file string, sum [HashSize]byte) { + hashFileCache.Lock() + if hashFileCache.m == nil { + hashFileCache.m = make(map[string][HashSize]byte) + } + hashFileCache.m[file] = sum + hashFileCache.Unlock() +} diff --git a/src/cmd/go/internal/cache/hash_test.go b/src/cmd/go/internal/cache/hash_test.go new file mode 100644 index 00000000000..3bf7143039c --- /dev/null +++ b/src/cmd/go/internal/cache/hash_test.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "fmt" + "io/ioutil" + "os" + "testing" +) + +func TestHash(t *testing.T) { + oldSalt := hashSalt + hashSalt = nil + defer func() { + hashSalt = oldSalt + }() + + h := NewHash("alice") + h.Write([]byte("hello world")) + sum := fmt.Sprintf("%x", h.Sum()) + want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9" + if sum != want { + t.Errorf("hash(hello world) = %v, want %v", sum, want) + } +} + +func TestHashFile(t *testing.T) { + f, err := ioutil.TempFile("", "cmd-go-test-") + if err != nil { + t.Fatal(err) + } + name := f.Name() + fmt.Fprintf(f, "hello world") + defer os.Remove(name) + if err := f.Close(); err != nil { + t.Fatal(err) + } + + var h ActionID // make sure hash result is assignable to ActionID + h, err = FileHash(name) + if err != nil { + t.Fatal(err) + } + sum := fmt.Sprintf("%x", h) + want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9" + if sum != want { + t.Errorf("hash(hello world) = %v, want %v", sum, want) + } +} diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index b3ad1ce71ec..dfab20a8de3 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -22,7 +22,6 @@ var ( BuildBuildmode string // -buildmode flag BuildContext = build.Default BuildI bool // -i flag - BuildLdflags []string // -ldflags flag BuildLinkshared bool // -linkshared flag BuildMSan bool // -msan flag BuildN bool // -n flag @@ -37,6 +36,10 @@ var ( BuildV bool // -v flag BuildWork bool // -work flag BuildX bool // -x flag + + CmdName string // "build", "install", "list", etc. + + DebugActiongraph string // -debug-actiongraph flag (undocumented, unstable) ) func init() { @@ -80,8 +83,9 @@ var ( GOROOTsrc = filepath.Join(GOROOT, "src") // Used in envcmd.MkEnv and build ID computations. - GOARM = fmt.Sprint(objabi.GOARM) - GO386 = objabi.GO386 + GOARM = fmt.Sprint(objabi.GOARM) + GO386 = objabi.GO386 + GOMIPS = objabi.GOMIPS ) // Update build context to use our computed GOROOT. @@ -98,22 +102,41 @@ func findGOROOT() string { if env := os.Getenv("GOROOT"); env != "" { return filepath.Clean(env) } + def := filepath.Clean(runtime.GOROOT()) exe, err := os.Executable() if err == nil { exe, err = filepath.Abs(exe) if err == nil { if dir := filepath.Join(exe, "../.."); isGOROOT(dir) { + // If def (runtime.GOROOT()) and dir are the same + // directory, prefer the spelling used in def. + if isSameDir(def, dir) { + return def + } return dir } exe, err = filepath.EvalSymlinks(exe) if err == nil { if dir := filepath.Join(exe, "../.."); isGOROOT(dir) { + if isSameDir(def, dir) { + return def + } return dir } } } } - return filepath.Clean(runtime.GOROOT()) + return def +} + +// isSameDir reports whether dir1 and dir2 are the same directory. +func isSameDir(dir1, dir2 string) bool { + if dir1 == dir2 { + return true + } + info1, err1 := os.Stat(dir1) + info2, err2 := os.Stat(dir2) + return err1 == nil && err2 == nil && os.SameFile(info1, info2) } // isGOROOT reports whether path looks like a GOROOT. diff --git a/src/cmd/go/internal/clean/clean.go b/src/cmd/go/internal/clean/clean.go index 454cac1f47b..fa5af944af6 100644 --- a/src/cmd/go/internal/clean/clean.go +++ b/src/cmd/go/internal/clean/clean.go @@ -11,16 +11,18 @@ import ( "os" "path/filepath" "strings" + "time" "cmd/go/internal/base" + "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/load" "cmd/go/internal/work" ) var CmdClean = &base.Command{ - UsageLine: "clean [-i] [-r] [-n] [-x] [build flags] [packages]", - Short: "remove object files", + UsageLine: "clean [-i] [-r] [-n] [-x] [-cache] [-testcache] [build flags] [packages]", + Short: "remove object files and cached files", Long: ` Clean removes object files from package source directories. The go command builds most objects in a temporary directory, @@ -58,14 +60,23 @@ dependencies of the packages named by the import paths. The -x flag causes clean to print remove commands as it executes them. +The -cache flag causes clean to remove the entire go build cache. + +The -testcache flag causes clean to expire all test results in the +go build cache. + For more about build flags, see 'go help build'. For more about specifying packages, see 'go help packages'. `, } -var cleanI bool // clean -i flag -var cleanR bool // clean -r flag +var ( + cleanI bool // clean -i flag + cleanR bool // clean -r flag + cleanCache bool // clean -cache flag + cleanTestcache bool // clean -testcache flag +) func init() { // break init cycle @@ -73,6 +84,9 @@ func init() { CmdClean.Flag.BoolVar(&cleanI, "i", false, "") CmdClean.Flag.BoolVar(&cleanR, "r", false, "") + CmdClean.Flag.BoolVar(&cleanCache, "cache", false, "") + CmdClean.Flag.BoolVar(&cleanTestcache, "testcache", false, "") + // -n and -x are important enough to be // mentioned explicitly in the docs but they // are part of the build flags. @@ -84,6 +98,46 @@ func runClean(cmd *base.Command, args []string) { for _, pkg := range load.PackagesAndErrors(args) { clean(pkg) } + + if cleanCache { + var b work.Builder + b.Print = fmt.Print + dir := cache.DefaultDir() + if dir != "off" { + // Remove the cache subdirectories but not the top cache directory. + // The top cache directory may have been created with special permissions + // and not something that we want to remove. Also, we'd like to preserve + // the access log for future analysis, even if the cache is cleared. + subdirs, _ := filepath.Glob(filepath.Join(dir, "[0-9a-f][0-9a-f]")) + if len(subdirs) > 0 { + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "rm -r %s", strings.Join(subdirs, " ")) + } + printedErrors := false + for _, d := range subdirs { + // Only print the first error - there may be many. + // This also mimics what os.RemoveAll(dir) would do. + if err := os.RemoveAll(d); err != nil && !printedErrors { + printedErrors = true + base.Errorf("go clean -cache: %v", err) + } + } + } + } + } + + if cleanTestcache && !cleanCache { + // Instead of walking through the entire cache looking for test results, + // we write a file to the cache indicating that all test results from before + // right now are to be ignored. + dir := cache.DefaultDir() + if dir != "off" { + err := ioutil.WriteFile(filepath.Join(dir, "testexpire.txt"), []byte(fmt.Sprintf("%d\n", time.Now().UnixNano())), 0666) + if err != nil { + base.Errorf("go clean -testcache: %v", err) + } + } + } } var cleaned = map[*load.Package]bool{} @@ -213,12 +267,12 @@ func clean(p *load.Package) { } } - if cleanI && p.Internal.Target != "" { + if cleanI && p.Target != "" { if cfg.BuildN || cfg.BuildX { - b.Showcmd("", "rm -f %s", p.Internal.Target) + b.Showcmd("", "rm -f %s", p.Target) } if !cfg.BuildN { - removeFile(p.Internal.Target) + removeFile(p.Target) } } diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index 43d4334f060..fa19bebe218 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -13,6 +13,7 @@ import ( "strings" "cmd/go/internal/base" + "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/load" "cmd/go/internal/work" @@ -31,6 +32,8 @@ each named variable on its own line. The -json flag prints the environment in JSON format instead of as a shell script. + +For more about environment variables, see 'go help environment'. `, } @@ -47,6 +50,7 @@ func MkEnv() []cfg.EnvVar { env := []cfg.EnvVar{ {Name: "GOARCH", Value: cfg.Goarch}, {Name: "GOBIN", Value: cfg.GOBIN}, + {Name: "GOCACHE", Value: cache.DefaultDir()}, {Name: "GOEXE", Value: cfg.ExeSuffix}, {Name: "GOHOSTARCH", Value: runtime.GOARCH}, {Name: "GOHOSTOS", Value: runtime.GOOS}, @@ -54,6 +58,7 @@ func MkEnv() []cfg.EnvVar { {Name: "GOPATH", Value: cfg.BuildContext.GOPATH}, {Name: "GORACE", Value: os.Getenv("GORACE")}, {Name: "GOROOT", Value: cfg.GOROOT}, + {Name: "GOTMPDIR", Value: os.Getenv("GOTMPDIR")}, {Name: "GOTOOLDIR", Value: base.ToolDir}, // disable escape codes in clang errors @@ -71,13 +76,20 @@ func MkEnv() []cfg.EnvVar { env = append(env, cfg.EnvVar{Name: "GOARM", Value: cfg.GOARM}) case "386": env = append(env, cfg.EnvVar{Name: "GO386", Value: cfg.GO386}) + case "mips", "mipsle": + env = append(env, cfg.EnvVar{Name: "GOMIPS", Value: cfg.GOMIPS}) } - cmd := b.GccCmd(".") - env = append(env, cfg.EnvVar{Name: "CC", Value: cmd[0]}) - env = append(env, cfg.EnvVar{Name: "GOGCCFLAGS", Value: strings.Join(cmd[3:], " ")}) - cmd = b.GxxCmd(".") - env = append(env, cfg.EnvVar{Name: "CXX", Value: cmd[0]}) + cc := cfg.DefaultCC(cfg.Goos, cfg.Goarch) + if env := strings.Fields(os.Getenv("CC")); len(env) > 0 { + cc = env[0] + } + cxx := cfg.DefaultCXX(cfg.Goos, cfg.Goarch) + if env := strings.Fields(os.Getenv("CXX")); len(env) > 0 { + cxx = env[0] + } + env = append(env, cfg.EnvVar{Name: "CC", Value: cc}) + env = append(env, cfg.EnvVar{Name: "CXX", Value: cxx}) if cfg.BuildContext.CgoEnabled { env = append(env, cfg.EnvVar{Name: "CGO_ENABLED", Value: "1"}) @@ -102,19 +114,45 @@ func ExtraEnvVars() []cfg.EnvVar { var b work.Builder b.Init() cppflags, cflags, cxxflags, fflags, ldflags := b.CFlags(&load.Package{}) + cmd := b.GccCmd(".", "") return []cfg.EnvVar{ + // Note: Update the switch in runEnv below when adding to this list. {Name: "CGO_CFLAGS", Value: strings.Join(cflags, " ")}, {Name: "CGO_CPPFLAGS", Value: strings.Join(cppflags, " ")}, {Name: "CGO_CXXFLAGS", Value: strings.Join(cxxflags, " ")}, {Name: "CGO_FFLAGS", Value: strings.Join(fflags, " ")}, {Name: "CGO_LDFLAGS", Value: strings.Join(ldflags, " ")}, {Name: "PKG_CONFIG", Value: b.PkgconfigCmd()}, + {Name: "GOGCCFLAGS", Value: strings.Join(cmd[3:], " ")}, } } func runEnv(cmd *base.Command, args []string) { env := cfg.CmdEnv - env = append(env, ExtraEnvVars()...) + + // Do we need to call ExtraEnvVars, which is a bit expensive? + // Only if we're listing all environment variables ("go env") + // or the variables being requested are in the extra list. + needExtra := true + if len(args) > 0 { + needExtra = false + for _, arg := range args { + switch arg { + case "CGO_CFLAGS", + "CGO_CPPFLAGS", + "CGO_CXXFLAGS", + "CGO_FFLAGS", + "CGO_LDFLAGS", + "PKG_CONFIG", + "GOGCCFLAGS": + needExtra = true + } + } + } + if needExtra { + env = append(env, ExtraEnvVars()...) + } + if len(args) > 0 { if *envJson { var es []cfg.EnvVar diff --git a/src/cmd/go/internal/fix/fix.go b/src/cmd/go/internal/fix/fix.go index 788d49bcb60..99c7ca51acf 100644 --- a/src/cmd/go/internal/fix/fix.go +++ b/src/cmd/go/internal/fix/fix.go @@ -15,7 +15,7 @@ import ( var CmdFix = &base.Command{ Run: runFix, UsageLine: "fix [packages]", - Short: "run go tool fix on packages", + Short: "update packages to use new APIs", Long: ` Fix runs the Go fix command on the packages named by the import paths. @@ -33,7 +33,7 @@ func runFix(cmd *base.Command, args []string) { // Use pkg.gofiles instead of pkg.Dir so that // the command only applies to this package, // not to packages in subdirectories. - files := base.FilterDotUnderscoreFiles(base.RelPaths(pkg.Internal.AllGoFiles)) + files := base.RelPaths(pkg.InternalAllGoFiles()) base.Run(str.StringList(cfg.BuildToolexec, base.Tool("fix"), files)) } } diff --git a/src/cmd/go/internal/fmtcmd/fmt.go b/src/cmd/go/internal/fmtcmd/fmt.go index 0563a0410b3..eb96823fa6a 100644 --- a/src/cmd/go/internal/fmtcmd/fmt.go +++ b/src/cmd/go/internal/fmtcmd/fmt.go @@ -8,6 +8,9 @@ package fmtcmd import ( "os" "path/filepath" + "runtime" + "strings" + "sync" "cmd/go/internal/base" "cmd/go/internal/cfg" @@ -22,7 +25,7 @@ func init() { var CmdFmt = &base.Command{ Run: runFmt, UsageLine: "fmt [-n] [-x] [packages]", - Short: "run gofmt on package sources", + Short: "gofmt (reformat) package sources", Long: ` Fmt runs the command 'gofmt -l -w' on the packages named by the import paths. It prints the names of the files that are modified. @@ -41,13 +44,38 @@ See also: go fix, go vet. func runFmt(cmd *base.Command, args []string) { gofmt := gofmtPath() - for _, pkg := range load.Packages(args) { + procs := runtime.GOMAXPROCS(0) + var wg sync.WaitGroup + wg.Add(procs) + fileC := make(chan string, 2*procs) + for i := 0; i < procs; i++ { + go func() { + defer wg.Done() + for file := range fileC { + base.Run(str.StringList(gofmt, "-l", "-w", file)) + } + }() + } + for _, pkg := range load.PackagesAndErrors(args) { + if pkg.Error != nil { + if strings.HasPrefix(pkg.Error.Err, "build constraints exclude all Go files") { + // Skip this error, as we will format + // all files regardless. + } else { + base.Errorf("can't load package: %s", pkg.Error) + continue + } + } // Use pkg.gofiles instead of pkg.Dir so that // the command only applies to this package, // not to packages in subdirectories. - files := base.FilterDotUnderscoreFiles(base.RelPaths(pkg.Internal.AllGoFiles)) - base.Run(str.StringList(gofmt, "-l", "-w", files)) + files := base.RelPaths(pkg.InternalAllGoFiles()) + for _, file := range files { + fileC <- file + } } + close(fileC) + wg.Wait() } func gofmtPath() string { diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go index d47c9b73236..75c0d3b09d6 100644 --- a/src/cmd/go/internal/generate/generate.go +++ b/src/cmd/go/internal/generate/generate.go @@ -153,7 +153,7 @@ func runGenerate(cmd *base.Command, args []string) { } // Even if the arguments are .go files, this loop suffices. for _, pkg := range load.Packages(args) { - for _, file := range pkg.Internal.GoFiles { + for _, file := range pkg.InternalGoFiles() { if !generate(pkg.Name, file) { break } @@ -385,7 +385,7 @@ func (g *Generator) setShorthand(words []string) { } command := words[1] if g.commands[command] != nil { - g.errorf("command %q defined multiply defined", command) + g.errorf("command %q multiply defined", command) } g.commands[command] = words[2:len(words):len(words)] // force later append to make copy } diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index e5dda643e40..d42dae6e61f 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -90,8 +90,7 @@ func init() { } func runGet(cmd *base.Command, args []string) { - work.InstrumentInit() - work.BuildModeInit() + work.BuildInit() if *getF && !*getU { base.Fatalf("go get: cannot use -f flag without -u") @@ -301,7 +300,7 @@ func download(arg string, parent *load.Package, stk *load.ImportStack, mode int) // due to wildcard expansion. for _, p := range pkgs { if *getFix { - files := base.FilterDotUnderscoreFiles(base.RelPaths(p.Internal.AllGoFiles)) + files := base.RelPaths(p.InternalAllGoFiles()) base.Run(cfg.BuildToolexec, str.StringList(base.Tool("fix"), files)) // The imports might have changed, so reload again. @@ -457,12 +456,8 @@ func downloadPackage(p *load.Package) error { // Check that this is an appropriate place for the repo to be checked out. // The target directory must either not exist or have a repo checked out already. meta := filepath.Join(root, "."+vcs.cmd) - st, err := os.Stat(meta) - if err == nil && !st.IsDir() { - return fmt.Errorf("%s exists but is not a directory", meta) - } - if err != nil { - // Metadata directory does not exist. Prepare to checkout new copy. + if _, err := os.Stat(meta); err != nil { + // Metadata file or directory does not exist. Prepare to checkout new copy. // Some version control tools require the target directory not to exist. // We require that too, just to avoid stepping on existing work. if _, err := os.Stat(root); err == nil { diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go index 86d2e32efbc..26693b13a93 100644 --- a/src/cmd/go/internal/get/vcs.go +++ b/src/cmd/go/internal/get/vcs.go @@ -93,6 +93,7 @@ var vcsList = []*vcsCmd{ vcsGit, vcsSvn, vcsBzr, + vcsFossil, } // vcsByCmd returns the version control system for the given @@ -324,6 +325,34 @@ func svnRemoteRepo(vcsSvn *vcsCmd, rootDir string) (remoteRepo string, err error return strings.TrimSpace(out), nil } +// fossilRepoName is the name go get associates with a fossil repository. In the +// real world the file can be named anything. +const fossilRepoName = ".fossil" + +// vcsFossil describes how to use Fossil (fossil-scm.org) +var vcsFossil = &vcsCmd{ + name: "Fossil", + cmd: "fossil", + + createCmd: []string{"-go-internal-mkdir {dir} clone {repo} " + filepath.Join("{dir}", fossilRepoName), "-go-internal-cd {dir} open .fossil"}, + downloadCmd: []string{"up"}, + + tagCmd: []tagCmd{{"tag ls", `(.*)`}}, + tagSyncCmd: []string{"up tag:{tag}"}, + tagSyncDefault: []string{"up trunk"}, + + scheme: []string{"https", "http"}, + remoteRepo: fossilRemoteRepo, +} + +func fossilRemoteRepo(vcsFossil *vcsCmd, rootDir string) (remoteRepo string, err error) { + out, err := vcsFossil.runOutput(rootDir, "remote-url") + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + func (v *vcsCmd) String() string { return v.name } @@ -362,6 +391,19 @@ func (v *vcsCmd) run1(dir string, cmdline string, keyval []string, verbose bool) args[i] = expand(m, arg) } + if len(args) >= 2 && args[0] == "-go-internal-mkdir" { + var err error + if filepath.IsAbs(args[1]) { + err = os.Mkdir(args[1], os.ModePerm) + } else { + err = os.Mkdir(filepath.Join(dir, args[1]), os.ModePerm) + } + if err != nil { + return nil, err + } + args = args[2:] + } + if len(args) >= 2 && args[0] == "-go-internal-cd" { if filepath.IsAbs(args[1]) { dir = args[1] @@ -928,7 +970,7 @@ var vcsPaths = []*vcsPath{ // IBM DevOps Services (JazzHub) { - prefix: "hub.jazz.net/git", + prefix: "hub.jazz.net/git/", re: `^(?Phub.jazz.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`, vcs: "git", repo: "https://{root}", @@ -937,7 +979,7 @@ var vcsPaths = []*vcsPath{ // Git at Apache { - prefix: "git.apache.org", + prefix: "git.apache.org/", re: `^(?Pgit.apache.org/[a-z0-9_.\-]+\.git)(/[A-Za-z0-9_.\-]+)*$`, vcs: "git", repo: "https://{root}", @@ -945,16 +987,24 @@ var vcsPaths = []*vcsPath{ // Git at OpenStack { - prefix: "git.openstack.org", + prefix: "git.openstack.org/", re: `^(?Pgit\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/[A-Za-z0-9_.\-]+)*$`, vcs: "git", repo: "https://{root}", }, + // chiselapp.com for fossil + { + prefix: "chiselapp.com/", + re: `^(?Pchiselapp\.com/user/[A-Za-z0-9]+/repository/[A-Za-z0-9_.\-]+)$`, + vcs: "fossil", + repo: "https://{root}", + }, + // General syntax for any server. // Must be last. { - re: `^(?P(?P([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\-]+)+?)\.(?Pbzr|git|hg|svn))(/~?[A-Za-z0-9_.\-]+)*$`, + re: `^(?P(?P([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\-]+)+?)\.(?Pbzr|fossil|git|hg|svn))(/~?[A-Za-z0-9_.\-]+)*$`, ping: true, }, } diff --git a/src/cmd/go/internal/get/vcs_test.go b/src/cmd/go/internal/get/vcs_test.go index 62d352ae575..e29338aec19 100644 --- a/src/cmd/go/internal/get/vcs_test.go +++ b/src/cmd/go/internal/get/vcs_test.go @@ -154,6 +154,22 @@ func TestRepoRootForImportPath(t *testing.T) { repo: "https://git.apache.org/package-name_2.x.git", }, }, + { + "chiselapp.com/user/kyle/repository/fossilgg", + &repoRoot{ + vcs: vcsFossil, + repo: "https://chiselapp.com/user/kyle/repository/fossilgg", + }, + }, + { + // must have a user/$name/repository/$repo path + "chiselapp.com/kyle/repository/fossilgg", + nil, + }, + { + "chiselapp.com/user/kyle/fossilgg", + nil, + }, } for _, test := range tests { @@ -241,6 +257,8 @@ func TestIsSecure(t *testing.T) { {vcsGit, "example.com:path/to/repo.git", false}, {vcsGit, "path/that/contains/a:colon/repo.git", false}, {vcsHg, "ssh://user@example.com/path/to/repo.hg", true}, + {vcsFossil, "http://example.com/foo", false}, + {vcsFossil, "https://example.com/foo", true}, } for _, test := range tests { diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go index b9349ee8894..43144db593a 100644 --- a/src/cmd/go/internal/help/helpdoc.go +++ b/src/cmd/go/internal/help/helpdoc.go @@ -471,6 +471,12 @@ General-purpose environment variables: See https://golang.org/doc/articles/race_detector.html. GOROOT The root of the go tree. + GOTMPDIR + The directory where the go command will write + temporary source files, packages, and binaries. + GOCACHE + The directory where the go command will store + cached information for reuse in future builds. Environment variables for use with cgo: @@ -505,6 +511,9 @@ Architecture-specific environment variables: GO386 For GOARCH=386, the floating point instruction set. Valid values are 387, sse2. + GOMIPS + For GOARCH=mips{,le}, whether to use floating point instructions. + Valid values are hardfloat (default), softfloat. Special-purpose environment variables: @@ -565,8 +574,10 @@ Non-test Go source files can also include a //go:binary-only-package comment, indicating that the package sources are included for documentation only and must not be used to build the package binary. This enables distribution of Go packages in -their compiled form alone. See the go/build package documentation -for more details. +their compiled form alone. Even binary-only packages require +accurate import blocks listing required dependencies, so that +those dependencies can be supplied when linking the resulting +command. `, } diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index 241d0894c01..74352730004 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -152,7 +152,7 @@ var listJson = CmdList.Flag.Bool("json", false, "") var nl = []byte{'\n'} func runList(cmd *base.Command, args []string) { - work.BuildModeInit() + work.BuildInit() out := newTrackingWriter(os.Stdout) defer out.w.Flush() @@ -194,12 +194,29 @@ func runList(cmd *base.Command, args []string) { } } - loadpkgs := load.Packages + var pkgs []*load.Package if *listE { - loadpkgs = load.PackagesAndErrors + pkgs = load.PackagesAndErrors(args) + } else { + pkgs = load.Packages(args) } - for _, pkg := range loadpkgs(args) { + // Estimate whether staleness information is needed, + // since it's a little bit of work to compute. + needStale := *listJson || strings.Contains(*listFmt, ".Stale") + if needStale { + var b work.Builder + b.Init() + b.ComputeStaleOnly = true + a := &work.Action{} + // TODO: Use pkgsFilter? + for _, p := range pkgs { + a.Deps = append(a.Deps, b.AutoAction(work.ModeInstall, work.ModeInstall, p)) + } + b.Do(a) + } + + for _, pkg := range pkgs { // Show vendor-expanded paths in listing pkg.TestImports = pkg.Vendored(pkg.TestImports) pkg.XTestImports = pkg.Vendored(pkg.XTestImports) diff --git a/src/cmd/go/internal/load/flag.go b/src/cmd/go/internal/load/flag.go new file mode 100644 index 00000000000..7ad4208ccc0 --- /dev/null +++ b/src/cmd/go/internal/load/flag.go @@ -0,0 +1,121 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "cmd/go/internal/base" + "cmd/go/internal/str" + "fmt" + "strings" +) + +var ( + BuildAsmflags PerPackageFlag // -asmflags + BuildGcflags PerPackageFlag // -gcflags + BuildLdflags PerPackageFlag // -ldflags + BuildGccgoflags PerPackageFlag // -gccgoflags +) + +// A PerPackageFlag is a command-line flag implementation (a flag.Value) +// that allows specifying different effective flags for different packages. +// See 'go help build' for more details about per-package flags. +type PerPackageFlag struct { + present bool + values []ppfValue +} + +// A ppfValue is a single = per-package flag value. +type ppfValue struct { + match func(*Package) bool // compiled pattern + flags []string +} + +// Set is called each time the flag is encountered on the command line. +func (f *PerPackageFlag) Set(v string) error { + return f.set(v, base.Cwd) +} + +// set is the implementation of Set, taking a cwd (current working directory) for easier testing. +func (f *PerPackageFlag) set(v, cwd string) error { + f.present = true + match := func(p *Package) bool { return p.Internal.CmdlinePkg || p.Internal.CmdlineFiles } // default predicate with no pattern + // For backwards compatibility with earlier flag splitting, ignore spaces around flags. + v = strings.TrimSpace(v) + if v == "" { + // Special case: -gcflags="" means no flags for command-line arguments + // (overrides previous -gcflags="-whatever"). + f.values = append(f.values, ppfValue{match, []string{}}) + return nil + } + if !strings.HasPrefix(v, "-") { + i := strings.Index(v, "=") + if i < 0 { + return fmt.Errorf("missing = in =") + } + if i == 0 { + return fmt.Errorf("missing in =") + } + pattern := strings.TrimSpace(v[:i]) + match = MatchPackage(pattern, cwd) + v = v[i+1:] + } + flags, err := str.SplitQuotedFields(v) + if err != nil { + return err + } + if flags == nil { + flags = []string{} + } + f.values = append(f.values, ppfValue{match, flags}) + return nil +} + +// String is required to implement flag.Value. +// It is not used, because cmd/go never calls flag.PrintDefaults. +func (f *PerPackageFlag) String() string { return "" } + +// Present reports whether the flag appeared on the command line. +func (f *PerPackageFlag) Present() bool { + return f.present +} + +// For returns the flags to use for the given package. +func (f *PerPackageFlag) For(p *Package) []string { + flags := []string{} + for _, v := range f.values { + if v.match(p) { + flags = v.flags + } + } + return flags +} + +var cmdlineMatchers []func(*Package) bool + +// SetCmdlinePatterns records the set of patterns given on the command line, +// for use by the PerPackageFlags. +func SetCmdlinePatterns(args []string) { + setCmdlinePatterns(args, base.Cwd) +} + +func setCmdlinePatterns(args []string, cwd string) { + if len(args) == 0 { + args = []string{"."} + } + cmdlineMatchers = nil // allow reset for testing + for _, arg := range args { + cmdlineMatchers = append(cmdlineMatchers, MatchPackage(arg, cwd)) + } +} + +// isCmdlinePkg reports whether p is a package listed on the command line. +func isCmdlinePkg(p *Package) bool { + for _, m := range cmdlineMatchers { + if m(p) { + return true + } + } + return false +} diff --git a/src/cmd/go/internal/load/flag_test.go b/src/cmd/go/internal/load/flag_test.go new file mode 100644 index 00000000000..d3223e12d52 --- /dev/null +++ b/src/cmd/go/internal/load/flag_test.go @@ -0,0 +1,135 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "fmt" + "path/filepath" + "reflect" + "testing" +) + +type ppfTestPackage struct { + path string + dir string + cmdline bool + flags []string +} + +type ppfTest struct { + args []string + pkgs []ppfTestPackage +} + +var ppfTests = []ppfTest{ + // -gcflags=-S applies only to packages on command line. + { + args: []string{"-S"}, + pkgs: []ppfTestPackage{ + {cmdline: true, flags: []string{"-S"}}, + {cmdline: false, flags: []string{}}, + }, + }, + + // -gcflags=-S -gcflags= overrides the earlier -S. + { + args: []string{"-S", ""}, + pkgs: []ppfTestPackage{ + {cmdline: true, flags: []string{}}, + }, + }, + + // -gcflags=net=-S applies only to package net + { + args: []string{"net=-S"}, + pkgs: []ppfTestPackage{ + {path: "math", cmdline: true, flags: []string{}}, + {path: "net", flags: []string{"-S"}}, + }, + }, + + // -gcflags=net=-S -gcflags=net= also overrides the earlier -S + { + args: []string{"net=-S", "net="}, + pkgs: []ppfTestPackage{ + {path: "net", flags: []string{}}, + }, + }, + + // -gcflags=net/...=-S net math + // applies -S to net and net/http but not math + { + args: []string{"net/...=-S"}, + pkgs: []ppfTestPackage{ + {path: "net", flags: []string{"-S"}}, + {path: "net/http", flags: []string{"-S"}}, + {path: "math", flags: []string{}}, + }, + }, + + // -gcflags=net/...=-S -gcflags=-m net math + // applies -m to net and math and -S to other packages matching net/... + // (net matches too, but it was grabbed by the later -gcflags). + { + args: []string{"net/...=-S", "-m"}, + pkgs: []ppfTestPackage{ + {path: "net", cmdline: true, flags: []string{"-m"}}, + {path: "math", cmdline: true, flags: []string{"-m"}}, + {path: "net", cmdline: false, flags: []string{"-S"}}, + {path: "net/http", flags: []string{"-S"}}, + {path: "math", flags: []string{}}, + }, + }, + + // relative path patterns + // ppfDirTest(pattern, n, dirs...) says the first n dirs should match and the others should not. + ppfDirTest(".", 1, "/my/test/dir", "/my/test", "/my/test/other", "/my/test/dir/sub"), + ppfDirTest("..", 1, "/my/test", "/my/test/dir", "/my/test/other", "/my/test/dir/sub"), + ppfDirTest("./sub", 1, "/my/test/dir/sub", "/my/test", "/my/test/dir", "/my/test/other", "/my/test/dir/sub/sub"), + ppfDirTest("../other", 1, "/my/test/other", "/my/test", "/my/test/dir", "/my/test/other/sub", "/my/test/dir/other", "/my/test/dir/sub"), + ppfDirTest("./...", 3, "/my/test/dir", "/my/test/dir/sub", "/my/test/dir/sub/sub", "/my/test/other", "/my/test/other/sub"), + ppfDirTest("../...", 4, "/my/test/dir", "/my/test/other", "/my/test/dir/sub", "/my/test/other/sub", "/my/other/test"), + ppfDirTest("../...sub...", 3, "/my/test/dir/sub", "/my/test/othersub", "/my/test/yellowsubmarine", "/my/other/test"), +} + +func ppfDirTest(pattern string, nmatch int, dirs ...string) ppfTest { + var pkgs []ppfTestPackage + for i, d := range dirs { + flags := []string{} + if i < nmatch { + flags = []string{"-S"} + } + pkgs = append(pkgs, ppfTestPackage{path: "p", dir: d, flags: flags}) + } + return ppfTest{args: []string{pattern + "=-S"}, pkgs: pkgs} +} + +func TestPerPackageFlag(t *testing.T) { + nativeDir := func(d string) string { + if filepath.Separator == '\\' { + return `C:` + filepath.FromSlash(d) + } + return d + } + + for i, tt := range ppfTests { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + ppFlags := new(PerPackageFlag) + for _, arg := range tt.args { + t.Logf("set(%s)", arg) + if err := ppFlags.set(arg, nativeDir("/my/test/dir")); err != nil { + t.Fatal(err) + } + } + for _, p := range tt.pkgs { + dir := nativeDir(p.dir) + flags := ppFlags.For(&Package{PackagePublic: PackagePublic{ImportPath: p.path, Dir: dir}, Internal: PackageInternal{CmdlinePkg: p.cmdline}}) + if !reflect.DeepEqual(flags, p.flags) { + t.Errorf("For(%v, %v, %v) = %v, want %v", p.path, dir, p.cmdline, flags, p.flags) + } + } + }) + } +} diff --git a/src/cmd/go/internal/load/icfg.go b/src/cmd/go/internal/load/icfg.go new file mode 100644 index 00000000000..0b346df0772 --- /dev/null +++ b/src/cmd/go/internal/load/icfg.go @@ -0,0 +1,75 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "bytes" + "encoding/json" + "errors" + "io/ioutil" +) + +// DebugDeprecatedImportcfg is installed as the undocumented -debug-deprecated-importcfg build flag. +// It is useful for debugging subtle problems in the go command logic but not something +// we want users to depend on. The hope is that the "deprecated" will make that clear. +// We intend to remove this flag in Go 1.11. +var DebugDeprecatedImportcfg debugDeprecatedImportcfgFlag + +type debugDeprecatedImportcfgFlag struct { + enabled bool + pkgs map[string]*debugDeprecatedImportcfgPkg +} + +type debugDeprecatedImportcfgPkg struct { + Dir string + Import map[string]string +} + +var ( + debugDeprecatedImportcfgMagic = []byte("# debug-deprecated-importcfg\n") + errImportcfgSyntax = errors.New("malformed syntax") +) + +func (f *debugDeprecatedImportcfgFlag) String() string { return "" } + +func (f *debugDeprecatedImportcfgFlag) Set(x string) error { + if x == "" { + *f = debugDeprecatedImportcfgFlag{} + return nil + } + data, err := ioutil.ReadFile(x) + if err != nil { + return err + } + + if !bytes.HasPrefix(data, debugDeprecatedImportcfgMagic) { + return errImportcfgSyntax + } + data = data[len(debugDeprecatedImportcfgMagic):] + + f.pkgs = nil + if err := json.Unmarshal(data, &f.pkgs); err != nil { + return errImportcfgSyntax + } + f.enabled = true + return nil +} + +func (f *debugDeprecatedImportcfgFlag) lookup(parent *Package, path string) (dir, newPath string) { + if parent == nil { + if p1 := f.pkgs[path]; p1 != nil { + return p1.Dir, path + } + return "", "" + } + if p1 := f.pkgs[parent.ImportPath]; p1 != nil { + if newPath := p1.Import[path]; newPath != "" { + if p2 := f.pkgs[newPath]; p2 != nil { + return p2.Dir, newPath + } + } + } + return "", "" +} diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 4fbde1de8ce..b19a9600367 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -6,7 +6,6 @@ package load import ( - "crypto/sha1" "fmt" "go/build" "go/token" @@ -14,13 +13,11 @@ import ( "os" pathpkg "path" "path/filepath" - "runtime" "sort" "strings" "unicode" "cmd/go/internal/base" - "cmd/go/internal/buildid" "cmd/go/internal/cfg" "cmd/go/internal/str" ) @@ -42,16 +39,20 @@ type PackagePublic struct { ImportComment string `json:",omitempty"` // path in import comment on package statement Name string `json:",omitempty"` // package name Doc string `json:",omitempty"` // package documentation string - Target string `json:",omitempty"` // install path + Target string `json:",omitempty"` // installed target for this package (may be executable) Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared) Goroot bool `json:",omitempty"` // is this package found in the Go root? Standard bool `json:",omitempty"` // is this package part of the standard Go library? - Stale bool `json:",omitempty"` // would 'go install' do anything for this package? - StaleReason string `json:",omitempty"` // why is Stale true? Root string `json:",omitempty"` // Go root or Go path dir containing this package ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory BinaryOnly bool `json:",omitempty"` // package cannot be recompiled + // Stale and StaleReason remain here *only* for the list command. + // They are only initialized in preparation for list execution. + // The regular build determines staleness on the fly during action execution. + Stale bool `json:",omitempty"` // would 'go install' do anything for this package? + StaleReason string `json:",omitempty"` // why is Stale true? + // Source files GoFiles []string `json:",omitempty"` // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) CgoFiles []string `json:",omitempty"` // .go sources files that import "C" @@ -93,25 +94,23 @@ type PackagePublic struct { type PackageInternal struct { // Unexported fields are not part of the public API. Build *build.Package - Pkgdir string // overrides build.PkgDir - Imports []*Package - Deps []*Package - GoFiles []string // GoFiles+CgoFiles+TestGoFiles+XTestGoFiles files, absolute paths - SFiles []string - AllGoFiles []string // gofiles + IgnoredGoFiles, absolute paths - Target string // installed file for this package (may be executable) - Fake bool // synthesized package - External bool // synthesized external test package + Imports []*Package // this package's direct imports + RawImports []string // this package's original imports as they appear in the text of the program ForceLibrary bool // this package is a library (even if named "main") - Cmdline bool // defined by files listed on command line + CmdlineFiles bool // package built from files listed on command line + CmdlinePkg bool // package listed on command line Local bool // imported via local path (./ or ../) LocalPrefix string // interpret ./ and ../ imports relative to this prefix ExeName string // desired name for temporary executable CoverMode string // preprocess Go source files with the coverage tool in this mode CoverVars map[string]*CoverVar // variables created by coverage analysis OmitDebug bool // tell linker not to write debug information - BuildID string // expected build ID for generated package GobinSubdir bool // install target would be subdir of GOBIN + + Asmflags []string // -asmflags for this package + Gcflags []string // -gcflags for this package + Ldflags []string // -ldflags for this package + Gccgoflags []string // -gccgoflags for this package } type NoGoError struct { @@ -222,6 +221,7 @@ func (p *Package) copyBuild(pp *build.Package) { // We modify p.Imports in place, so make copy now. p.Imports = make([]string, len(pp.Imports)) copy(p.Imports, pp.Imports) + p.Internal.RawImports = pp.Imports p.TestGoFiles = pp.TestGoFiles p.TestImports = pp.TestImports p.XTestGoFiles = pp.XTestGoFiles @@ -357,7 +357,7 @@ func makeImportValid(r rune) rune { // Mode flags for loadImport and download (in get.go). const ( - // useVendor means that loadImport should do vendor expansion + // UseVendor means that loadImport should do vendor expansion // (provided the vendoring experiment is enabled). // That is, useVendor means that the import path came from // a source file and has not been vendor-expanded yet. @@ -368,12 +368,12 @@ const ( // disallowVendor will reject direct use of paths containing /vendor/. UseVendor = 1 << iota - // getTestDeps is for download (part of "go get") and indicates + // GetTestDeps is for download (part of "go get") and indicates // that test dependencies should be fetched too. GetTestDeps ) -// loadImport scans the directory named by path, which must be an import path, +// LoadImport scans the directory named by path, which must be an import path, // but possibly a local import path (an absolute file system path or one beginning // with ./ or ../). A local relative path is interpreted relative to srcDir. // It returns a *Package describing the package found in that directory. @@ -389,8 +389,14 @@ func LoadImport(path, srcDir string, parent *Package, stk *ImportStack, importPo importPath := path origPath := path isLocal := build.IsLocalImport(path) + var debugDeprecatedImportcfgDir string if isLocal { importPath = dirToImportPath(filepath.Join(srcDir, path)) + } else if DebugDeprecatedImportcfg.enabled { + if d, i := DebugDeprecatedImportcfg.lookup(parent, path); d != "" { + debugDeprecatedImportcfgDir = d + importPath = i + } } else if mode&UseVendor != 0 { // We do our own vendor resolution, because we want to // find out the key to use in packageCache without the @@ -412,20 +418,23 @@ func LoadImport(path, srcDir string, parent *Package, stk *ImportStack, importPo // Load package. // Import always returns bp != nil, even if an error occurs, // in order to return partial information. - // - // TODO: After Go 1, decide when to pass build.AllowBinary here. - // See issue 3268 for mistakes to avoid. - buildMode := build.ImportComment - if mode&UseVendor == 0 || path != origPath { - // Not vendoring, or we already found the vendored path. - buildMode |= build.IgnoreVendor + var bp *build.Package + var err error + if debugDeprecatedImportcfgDir != "" { + bp, err = cfg.BuildContext.ImportDir(debugDeprecatedImportcfgDir, 0) + } else { + buildMode := build.ImportComment + if mode&UseVendor == 0 || path != origPath { + // Not vendoring, or we already found the vendored path. + buildMode |= build.IgnoreVendor + } + bp, err = cfg.BuildContext.Import(path, srcDir, buildMode) } - bp, err := cfg.BuildContext.Import(path, srcDir, buildMode) bp.ImportPath = importPath if cfg.GOBIN != "" { bp.BinDir = cfg.GOBIN } - if err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path && + if debugDeprecatedImportcfgDir == "" && err == nil && !isLocal && bp.ImportComment != "" && bp.ImportComment != path && !strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/") { err = fmt.Errorf("code in directory %s expects import %q", bp.Dir, bp.ImportComment) } @@ -434,7 +443,7 @@ func LoadImport(path, srcDir string, parent *Package, stk *ImportStack, importPo p = setErrorPos(p, importPos) } - if origPath != cleanImport(origPath) { + if debugDeprecatedImportcfgDir == "" && origPath != cleanImport(origPath) { p.Error = &PackageError{ ImportStack: stk.Copy(), Err: fmt.Sprintf("non-canonical import path: %q should be %q", origPath, pathpkg.Clean(origPath)), @@ -809,45 +818,27 @@ func FindVendor(path string) (index int, ok bool) { return 0, false } -type targetDir int +type TargetDir int const ( - ToRoot targetDir = iota // to bin dir inside package root (default) - ToTool // GOROOT/pkg/tool - StalePath // the old import path; fail to build + ToTool TargetDir = iota // to GOROOT/pkg/tool (default for cmd/*) + ToBin // to bin dir inside package root (default for non-cmd/*) + StalePath // an old import path; fail to build ) -// goTools is a map of Go program import path to install target directory. -var GoTools = map[string]targetDir{ - "cmd/addr2line": ToTool, - "cmd/api": ToTool, - "cmd/asm": ToTool, - "cmd/compile": ToTool, - "cmd/cgo": ToTool, - "cmd/cover": ToTool, - "cmd/dist": ToTool, - "cmd/doc": ToTool, - "cmd/fix": ToTool, - "cmd/link": ToTool, - "cmd/newlink": ToTool, - "cmd/nm": ToTool, - "cmd/objdump": ToTool, - "cmd/pack": ToTool, - "cmd/pprof": ToTool, - "cmd/trace": ToTool, - "cmd/vet": ToTool, - "code.google.com/p/go.tools/cmd/cover": StalePath, - "code.google.com/p/go.tools/cmd/godoc": StalePath, - "code.google.com/p/go.tools/cmd/vet": StalePath, -} - -var raceExclude = map[string]bool{ - "runtime/race": true, - "runtime/msan": true, - "runtime/cgo": true, - "cmd/cgo": true, - "syscall": true, - "errors": true, +// InstallTargetDir reports the target directory for installing the command p. +func InstallTargetDir(p *Package) TargetDir { + if strings.HasPrefix(p.ImportPath, "code.google.com/p/go.tools/cmd/") { + return StalePath + } + if p.Goroot && strings.HasPrefix(p.ImportPath, "cmd/") && p.Name == "main" { + switch p.ImportPath { + case "cmd/go", "cmd/gofmt": + return ToBin + } + return ToTool + } + return ToBin } var cgoExclude = map[string]bool{ @@ -864,12 +855,34 @@ var foldPath = make(map[string]string) // load populates p using information from bp, err, which should // be the result of calling build.Context.Import. -func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package { +func (p *Package) load(stk *ImportStack, bp *build.Package, err error) { p.copyBuild(bp) + // Decide whether p was listed on the command line. + // Given that load is called while processing the command line, + // you might think we could simply pass a flag down into load + // saying whether we are loading something named on the command + // line or something to satisfy an import. But the first load of a + // package named on the command line may be as a dependency + // of an earlier package named on the command line, not when we + // get to that package during command line processing. + // For example "go test fmt reflect" will load reflect as a dependency + // of fmt before it attempts to load as a command-line argument. + // Because loads are cached, the later load will be a no-op, + // so it is important that the first load can fill in CmdlinePkg correctly. + // Hence the call to an explicit matching check here. + p.Internal.CmdlinePkg = isCmdlinePkg(p) + + p.Internal.Asmflags = BuildAsmflags.For(p) + p.Internal.Gcflags = BuildGcflags.For(p) + p.Internal.Ldflags = BuildLdflags.For(p) + p.Internal.Gccgoflags = BuildGccgoflags.For(p) + // The localPrefix is the path we interpret ./ imports relative to. // Synthesized main packages sometimes override this. - p.Internal.LocalPrefix = dirToImportPath(p.Dir) + if p.Internal.Local { + p.Internal.LocalPrefix = dirToImportPath(p.Dir) + } if err != nil { if _, ok := err.(*build.NoGoError); ok { @@ -881,7 +894,7 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package ImportStack: stk.Copy(), Err: err.Error(), } - return p + return } useBindir := p.Name == "main" @@ -894,11 +907,11 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package if useBindir { // Report an error when the old code.google.com/p/go.tools paths are used. - if GoTools[p.ImportPath] == StalePath { + if InstallTargetDir(p) == StalePath { newPath := strings.Replace(p.ImportPath, "code.google.com/p/go.", "golang.org/x/", 1) e := fmt.Sprintf("the %v command has moved; use %v instead.", p.ImportPath, newPath) p.Error = &PackageError{Err: e} - return p + return } _, elem := filepath.Split(p.Dir) full := cfg.BuildContext.GOOS + "_" + cfg.BuildContext.GOARCH + "/" + elem @@ -908,125 +921,82 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package } if p.Internal.Build.BinDir != "" { // Install to GOBIN or bin of GOPATH entry. - p.Internal.Target = filepath.Join(p.Internal.Build.BinDir, elem) + p.Target = filepath.Join(p.Internal.Build.BinDir, elem) if !p.Goroot && strings.Contains(elem, "/") && cfg.GOBIN != "" { // Do not create $GOBIN/goos_goarch/elem. - p.Internal.Target = "" + p.Target = "" p.Internal.GobinSubdir = true } } - if GoTools[p.ImportPath] == ToTool { + if InstallTargetDir(p) == ToTool { // This is for 'go tool'. // Override all the usual logic and force it into the tool directory. - p.Internal.Target = filepath.Join(cfg.GOROOTpkg, "tool", full) + p.Target = filepath.Join(cfg.GOROOTpkg, "tool", full) } - if p.Internal.Target != "" && cfg.BuildContext.GOOS == "windows" { - p.Internal.Target += ".exe" + if p.Target != "" && cfg.BuildContext.GOOS == "windows" { + p.Target += ".exe" } } else if p.Internal.Local { // Local import turned into absolute path. // No permanent install target. - p.Internal.Target = "" + p.Target = "" } else { - p.Internal.Target = p.Internal.Build.PkgObj + p.Target = p.Internal.Build.PkgObj if cfg.BuildLinkshared { - shlibnamefile := p.Internal.Target[:len(p.Internal.Target)-2] + ".shlibname" + shlibnamefile := p.Target[:len(p.Target)-2] + ".shlibname" shlib, err := ioutil.ReadFile(shlibnamefile) + if err != nil && !os.IsNotExist(err) { + base.Fatalf("reading shlibname: %v", err) + } if err == nil { libname := strings.TrimSpace(string(shlib)) if cfg.BuildContext.Compiler == "gccgo" { p.Shlib = filepath.Join(p.Internal.Build.PkgTargetRoot, "shlibs", libname) } else { p.Shlib = filepath.Join(p.Internal.Build.PkgTargetRoot, libname) - } - } else if !os.IsNotExist(err) { - base.Fatalf("unexpected error reading %s: %v", shlibnamefile, err) } } } - ImportPaths := p.Imports - // Packages that use cgo import runtime/cgo implicitly. - // Packages that use cgo also import syscall implicitly, - // to wrap errno. - // Exclude certain packages to avoid circular dependencies. - if len(p.CgoFiles) > 0 && (!p.Standard || !cgoExclude[p.ImportPath]) { - ImportPaths = append(ImportPaths, "runtime/cgo") - } - if len(p.CgoFiles) > 0 && (!p.Standard || !cgoSyscallExclude[p.ImportPath]) { - ImportPaths = append(ImportPaths, "syscall") - } - - if cfg.BuildContext.CgoEnabled && p.Name == "main" && !p.Goroot { - // Currently build modes c-shared, pie (on systems that do not - // support PIE with internal linking mode), plugin, and - // -linkshared force external linking mode, as of course does - // -ldflags=-linkmode=external. External linking mode forces - // an import of runtime/cgo. - pieCgo := cfg.BuildBuildmode == "pie" && (cfg.BuildContext.GOOS != "linux" || cfg.BuildContext.GOARCH != "amd64") - linkmodeExternal := false - for i, a := range cfg.BuildLdflags { - if a == "-linkmode=external" { - linkmodeExternal = true - } - if a == "-linkmode" && i+1 < len(cfg.BuildLdflags) && cfg.BuildLdflags[i+1] == "external" { - linkmodeExternal = true + // Build augmented import list to add implicit dependencies. + // Be careful not to add imports twice, just to avoid confusion. + importPaths := p.Imports + addImport := func(path string) { + for _, p := range importPaths { + if path == p { + return } } - if cfg.BuildBuildmode == "c-shared" || cfg.BuildBuildmode == "plugin" || pieCgo || cfg.BuildLinkshared || linkmodeExternal { - ImportPaths = append(ImportPaths, "runtime/cgo") + importPaths = append(importPaths, path) + } + + // Cgo translation adds imports of "runtime/cgo" and "syscall", + // except for certain packages, to avoid circular dependencies. + if p.UsesCgo() && (!p.Standard || !cgoExclude[p.ImportPath]) { + addImport("runtime/cgo") + } + if p.UsesCgo() && (!p.Standard || !cgoSyscallExclude[p.ImportPath]) { + addImport("syscall") + } + + // SWIG adds imports of some standard packages. + if p.UsesSwig() { + addImport("runtime/cgo") + addImport("syscall") + addImport("sync") + + // TODO: The .swig and .swigcxx files can use + // %go_import directives to import other packages. + } + + // The linker loads implicit dependencies. + if p.Name == "main" && !p.Internal.ForceLibrary { + for _, dep := range LinkerDeps(p) { + addImport(dep) } } - // Everything depends on runtime, except runtime, its internal - // subpackages, and unsafe. - if !p.Standard || (p.ImportPath != "runtime" && !strings.HasPrefix(p.ImportPath, "runtime/internal/") && p.ImportPath != "unsafe") { - ImportPaths = append(ImportPaths, "runtime") - // When race detection enabled everything depends on runtime/race. - // Exclude certain packages to avoid circular dependencies. - if cfg.BuildRace && (!p.Standard || !raceExclude[p.ImportPath]) { - ImportPaths = append(ImportPaths, "runtime/race") - } - // MSan uses runtime/msan. - if cfg.BuildMSan && (!p.Standard || !raceExclude[p.ImportPath]) { - ImportPaths = append(ImportPaths, "runtime/msan") - } - // On ARM with GOARM=5, everything depends on math for the link. - if p.Name == "main" && cfg.Goarch == "arm" { - ImportPaths = append(ImportPaths, "math") - } - } - - // Runtime and its internal packages depend on runtime/internal/sys, - // so that they pick up the generated zversion.go file. - // This can be an issue particularly for runtime/internal/atomic; - // see issue 13655. - if p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal/")) && p.ImportPath != "runtime/internal/sys" { - ImportPaths = append(ImportPaths, "runtime/internal/sys") - } - - // Build list of full paths to all Go files in the package, - // for use by commands like go fmt. - p.Internal.GoFiles = str.StringList(p.GoFiles, p.CgoFiles, p.TestGoFiles, p.XTestGoFiles) - for i := range p.Internal.GoFiles { - p.Internal.GoFiles[i] = filepath.Join(p.Dir, p.Internal.GoFiles[i]) - } - sort.Strings(p.Internal.GoFiles) - - p.Internal.SFiles = str.StringList(p.SFiles) - for i := range p.Internal.SFiles { - p.Internal.SFiles[i] = filepath.Join(p.Dir, p.Internal.SFiles[i]) - } - sort.Strings(p.Internal.SFiles) - - p.Internal.AllGoFiles = str.StringList(p.IgnoredGoFiles) - for i := range p.Internal.AllGoFiles { - p.Internal.AllGoFiles[i] = filepath.Join(p.Dir, p.Internal.AllGoFiles[i]) - } - p.Internal.AllGoFiles = append(p.Internal.AllGoFiles, p.Internal.GoFiles...) - sort.Strings(p.Internal.AllGoFiles) - // Check for case-insensitive collision of input files. // To avoid problems on case-insensitive files, we reject any package // where two different input files have equal names under a case-insensitive @@ -1052,23 +1022,12 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package ImportStack: stk.Copy(), Err: fmt.Sprintf("case-insensitive file name collision: %q and %q", f1, f2), } - return p + return } // Build list of imported packages and full dependency list. imports := make([]*Package, 0, len(p.Imports)) - deps := make(map[string]*Package) - save := func(path string, p1 *Package) { - // The same import path could produce an error or not, - // depending on what tries to import it. - // Prefer to record entries with errors, so we can report them. - p0 := deps[path] - if p0 == nil || p1.Error != nil && (p0.Error == nil || len(p0.Error.ImportStack) > len(p1.Error.ImportStack)) { - deps[path] = p1 - } - } - - for i, path := range ImportPaths { + for i, path := range importPaths { if path == "C" { continue } @@ -1085,22 +1044,38 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package } path = p1.ImportPath - ImportPaths[i] = path + importPaths[i] = path if i < len(p.Imports) { p.Imports[i] = path } - save(path, p1) imports = append(imports, p1) - for _, dep := range p1.Internal.Deps { - save(dep.ImportPath, dep) - } if p1.Incomplete { p.Incomplete = true } } p.Internal.Imports = imports + deps := make(map[string]*Package) + var q []*Package + q = append(q, imports...) + for i := 0; i < len(q); i++ { + p1 := q[i] + path := p1.ImportPath + // The same import path could produce an error or not, + // depending on what tries to import it. + // Prefer to record entries with errors, so we can report them. + p0 := deps[path] + if p0 == nil || p1.Error != nil && (p0.Error == nil || len(p0.Error.ImportStack) > len(p1.Error.ImportStack)) { + deps[path] = p1 + for _, p2 := range p1.Internal.Imports { + if deps[p2.ImportPath] != p2 { + q = append(q, p2) + } + } + } + } + p.Deps = make([]string, 0, len(deps)) for dep := range deps { p.Deps = append(p.Deps, dep) @@ -1111,7 +1086,6 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package if p1 == nil { panic("impossible: missing entry in package cache for " + dep + " imported by " + p.ImportPath) } - p.Internal.Deps = append(p.Internal.Deps, p1) if p1.Error != nil { p.DepsErrors = append(p.DepsErrors, p1.Error) } @@ -1119,9 +1093,8 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package // unsafe is a fake package. if p.Standard && (p.ImportPath == "unsafe" || cfg.BuildContext.Compiler == "gccgo") { - p.Internal.Target = "" + p.Target = "" } - p.Target = p.Internal.Target // If cgo is not enabled, ignore cgo supporting sources // just as we ignore go files containing import "C". @@ -1137,13 +1110,32 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package // code; see issue #16050). } - // The gc toolchain only permits C source files with cgo. - if len(p.CFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() && cfg.BuildContext.Compiler == "gc" { + setError := func(msg string) { p.Error = &PackageError{ ImportStack: stk.Copy(), - Err: fmt.Sprintf("C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CFiles, " ")), + Err: msg, } - return p + } + + // The gc toolchain only permits C source files with cgo or SWIG. + if len(p.CFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() && cfg.BuildContext.Compiler == "gc" { + setError(fmt.Sprintf("C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CFiles, " "))) + return + } + + // C++, Objective-C, and Fortran source files are permitted only with cgo or SWIG, + // regardless of toolchain. + if len(p.CXXFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() { + setError(fmt.Sprintf("C++ source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CXXFiles, " "))) + return + } + if len(p.MFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() { + setError(fmt.Sprintf("Objective-C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.MFiles, " "))) + return + } + if len(p.FFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() { + setError(fmt.Sprintf("Fortran source files not allowed when not using cgo or SWIG: %s", strings.Join(p.FFiles, " "))) + return } // Check for case-insensitive collisions of import paths. @@ -1151,23 +1143,104 @@ func (p *Package) load(stk *ImportStack, bp *build.Package, err error) *Package if other := foldPath[fold]; other == "" { foldPath[fold] = p.ImportPath } else if other != p.ImportPath { - p.Error = &PackageError{ - ImportStack: stk.Copy(), - Err: fmt.Sprintf("case-insensitive import collision: %q and %q", p.ImportPath, other), - } - return p + setError(fmt.Sprintf("case-insensitive import collision: %q and %q", p.ImportPath, other)) + return + } +} + +// LinkerDeps returns the list of linker-induced dependencies for main package p. +func LinkerDeps(p *Package) []string { + // Everything links runtime. + deps := []string{"runtime"} + + // External linking mode forces an import of runtime/cgo. + if externalLinkingForced(p) { + deps = append(deps, "runtime/cgo") + } + // On ARM with GOARM=5, it forces an import of math, for soft floating point. + if cfg.Goarch == "arm" { + deps = append(deps, "math") + } + // Using the race detector forces an import of runtime/race. + if cfg.BuildRace { + deps = append(deps, "runtime/race") + } + // Using memory sanitizer forces an import of runtime/msan. + if cfg.BuildMSan { + deps = append(deps, "runtime/msan") } - if p.BinaryOnly { - // For binary-only package, use build ID from supplied package binary. - buildID, err := buildid.ReadBuildID(p.Name, p.Target) - if err == nil { - p.Internal.BuildID = buildID + return deps +} + +// externalLinkingForced reports whether external linking is being +// forced even for programs that do not use cgo. +func externalLinkingForced(p *Package) bool { + // Some targets must use external linking even inside GOROOT. + switch cfg.BuildContext.GOOS { + case "android": + return true + case "darwin": + switch cfg.BuildContext.GOARCH { + case "arm", "arm64": + return true } - } else { - computeBuildID(p) } - return p + + if !cfg.BuildContext.CgoEnabled { + return false + } + // Currently build modes c-shared, pie (on systems that do not + // support PIE with internal linking mode (currently all + // systems: issue #18968)), plugin, and -linkshared force + // external linking mode, as of course does + // -ldflags=-linkmode=external. External linking mode forces + // an import of runtime/cgo. + pieCgo := cfg.BuildBuildmode == "pie" + linkmodeExternal := false + if p != nil { + ldflags := BuildLdflags.For(p) + for i, a := range ldflags { + if a == "-linkmode=external" { + linkmodeExternal = true + } + if a == "-linkmode" && i+1 < len(ldflags) && ldflags[i+1] == "external" { + linkmodeExternal = true + } + } + } + + return cfg.BuildBuildmode == "c-shared" || cfg.BuildBuildmode == "plugin" || pieCgo || cfg.BuildLinkshared || linkmodeExternal +} + +// mkAbs rewrites list, which must be paths relative to p.Dir, +// into a sorted list of absolute paths. It edits list in place but for +// convenience also returns list back to its caller. +func (p *Package) mkAbs(list []string) []string { + for i, f := range list { + list[i] = filepath.Join(p.Dir, f) + } + sort.Strings(list) + return list +} + +// InternalGoFiles returns the list of Go files being built for the package, +// using absolute paths. +func (p *Package) InternalGoFiles() []string { + return p.mkAbs(str.StringList(p.GoFiles, p.CgoFiles, p.TestGoFiles, p.XTestGoFiles)) +} + +// InternalGoFiles returns the list of all Go files possibly relevant for the package, +// using absolute paths. "Possibly relevant" means that files are not excluded +// due to build tags, but files with names beginning with . or _ are still excluded. +func (p *Package) InternalAllGoFiles() []string { + var extra []string + for _, f := range p.IgnoredGoFiles { + if f != "" && f[0] != '.' || f[0] != '_' { + extra = append(extra, f) + } + } + return p.mkAbs(str.StringList(extra, p.GoFiles, p.CgoFiles, p.TestGoFiles, p.XTestGoFiles)) } // usesSwig reports whether the package needs to run SWIG. @@ -1202,517 +1275,6 @@ func PackageList(roots []*Package) []*Package { return all } -// computeStale computes the Stale flag in the package dag that starts -// at the named pkgs (command-line arguments). -func ComputeStale(pkgs ...*Package) { - for _, p := range PackageList(pkgs) { - p.Stale, p.StaleReason = isStale(p) - } -} - -// The runtime version string takes one of two forms: -// "go1.X[.Y]" for Go releases, and "devel +hash" at tip. -// Determine whether we are in a released copy by -// inspecting the version. -var isGoRelease = strings.HasPrefix(runtime.Version(), "go1") - -// isStale and computeBuildID -// -// Theory of Operation -// -// There is an installed copy of the package (or binary). -// Can we reuse the installed copy, or do we need to build a new one? -// -// We can use the installed copy if it matches what we'd get -// by building a new one. The hard part is predicting that without -// actually running a build. -// -// To start, we must know the set of inputs to the build process that can -// affect the generated output. At a minimum, that includes the source -// files for the package and also any compiled packages imported by those -// source files. The *Package has these, and we use them. One might also -// argue for including in the input set: the build tags, whether the race -// detector is in use, the target operating system and architecture, the -// compiler and linker binaries being used, the additional flags being -// passed to those, the cgo binary being used, the additional flags cgo -// passes to the host C compiler, the host C compiler being used, the set -// of host C include files and installed C libraries, and so on. -// We include some but not all of this information. -// -// Once we have decided on a set of inputs, we must next decide how to -// tell whether the content of that set has changed since the last build -// of p. If there have been no changes, then we assume a new build would -// produce the same result and reuse the installed package or binary. -// But if there have been changes, then we assume a new build might not -// produce the same result, so we rebuild. -// -// There are two common ways to decide whether the content of the set has -// changed: modification times and content hashes. We use a mixture of both. -// -// The use of modification times (mtimes) was pioneered by make: -// assuming that a file's mtime is an accurate record of when that file was last written, -// and assuming that the modification time of an installed package or -// binary is the time that it was built, if the mtimes of the inputs -// predate the mtime of the installed object, then the build of that -// object saw those versions of the files, and therefore a rebuild using -// those same versions would produce the same object. In contrast, if any -// mtime of an input is newer than the mtime of the installed object, a -// change has occurred since the build, and the build should be redone. -// -// Modification times are attractive because the logic is easy to -// understand and the file system maintains the mtimes automatically -// (less work for us). Unfortunately, there are a variety of ways in -// which the mtime approach fails to detect a change and reuses a stale -// object file incorrectly. (Making the opposite mistake, rebuilding -// unnecessarily, is only a performance problem and not a correctness -// problem, so we ignore that one.) -// -// As a warmup, one problem is that to be perfectly precise, we need to -// compare the input mtimes against the time at the beginning of the -// build, but the object file time is the time at the end of the build. -// If an input file changes after being read but before the object is -// written, the next build will see an object newer than the input and -// will incorrectly decide that the object is up to date. We make no -// attempt to detect or solve this problem. -// -// Another problem is that due to file system imprecision, an input and -// output that are actually ordered in time have the same mtime. -// This typically happens on file systems with 1-second (or, worse, -// 2-second) mtime granularity and with automated scripts that write an -// input and then immediately run a build, or vice versa. If an input and -// an output have the same mtime, the conservative behavior is to treat -// the output as out-of-date and rebuild. This can cause one or more -// spurious rebuilds, but only for 1 second, until the object finally has -// an mtime later than the input. -// -// Another problem is that binary distributions often set the mtime on -// all files to the same time. If the distribution includes both inputs -// and cached build outputs, the conservative solution to the previous -// problem will cause unnecessary rebuilds. Worse, in such a binary -// distribution, those rebuilds might not even have permission to update -// the cached build output. To avoid these write errors, if an input and -// output have the same mtime, we assume the output is up-to-date. -// This is the opposite of what the previous problem would have us do, -// but binary distributions are more common than instances of the -// previous problem. -// -// A variant of the last problem is that some binary distributions do not -// set the mtime on all files to the same time. Instead they let the file -// system record mtimes as the distribution is unpacked. If the outputs -// are unpacked before the inputs, they'll be older and a build will try -// to rebuild them. That rebuild might hit the same write errors as in -// the last scenario. We don't make any attempt to solve this, and we -// haven't had many reports of it. Perhaps the only time this happens is -// when people manually unpack the distribution, and most of the time -// that's done as the same user who will be using it, so an initial -// rebuild on first use succeeds quietly. -// -// More generally, people and programs change mtimes on files. The last -// few problems were specific examples of this, but it's a general problem. -// For example, instead of a binary distribution, copying a home -// directory from one directory or machine to another might copy files -// but not preserve mtimes. If the inputs are new than the outputs on the -// first machine but copied first, they end up older than the outputs on -// the second machine. -// -// Because many other build systems have the same sensitivity to mtimes, -// most programs manipulating source code take pains not to break the -// mtime assumptions. For example, Git does not set the mtime of files -// during a checkout operation, even when checking out an old version of -// the code. This decision was made specifically to work well with -// mtime-based build systems. -// -// The killer problem, though, for mtime-based build systems is that the -// build only has access to the mtimes of the inputs that still exist. -// If it is possible to remove an input without changing any other inputs, -// a later build will think the object is up-to-date when it is not. -// This happens for Go because a package is made up of all source -// files in a directory. If a source file is removed, there is no newer -// mtime available recording that fact. The mtime on the directory could -// be used, but it also changes when unrelated files are added to or -// removed from the directory, so including the directory mtime would -// cause unnecessary rebuilds, possibly many. It would also exacerbate -// the problems mentioned earlier, since even programs that are careful -// to maintain mtimes on files rarely maintain mtimes on directories. -// -// A variant of the last problem is when the inputs change for other -// reasons. For example, Go 1.4 and Go 1.5 both install $GOPATH/src/mypkg -// into the same target, $GOPATH/pkg/$GOOS_$GOARCH/mypkg.a. -// If Go 1.4 has built mypkg into mypkg.a, a build using Go 1.5 must -// rebuild mypkg.a, but from mtimes alone mypkg.a looks up-to-date. -// If Go 1.5 has just been installed, perhaps the compiler will have a -// newer mtime; since the compiler is considered an input, that would -// trigger a rebuild. But only once, and only the last Go 1.4 build of -// mypkg.a happened before Go 1.5 was installed. If a user has the two -// versions installed in different locations and flips back and forth, -// mtimes alone cannot tell what to do. Changing the toolchain is -// changing the set of inputs, without affecting any mtimes. -// -// To detect the set of inputs changing, we turn away from mtimes and to -// an explicit data comparison. Specifically, we build a list of the -// inputs to the build, compute its SHA1 hash, and record that as the -// ``build ID'' in the generated object. At the next build, we can -// recompute the build ID and compare it to the one in the generated -// object. If they differ, the list of inputs has changed, so the object -// is out of date and must be rebuilt. -// -// Because this build ID is computed before the build begins, the -// comparison does not have the race that mtime comparison does. -// -// Making the build sensitive to changes in other state is -// straightforward: include the state in the build ID hash, and if it -// changes, so does the build ID, triggering a rebuild. -// -// To detect changes in toolchain, we include the toolchain version in -// the build ID hash for package runtime, and then we include the build -// IDs of all imported packages in the build ID for p. -// -// It is natural to think about including build tags in the build ID, but -// the naive approach of just dumping the tags into the hash would cause -// spurious rebuilds. For example, 'go install' and 'go install -tags neverusedtag' -// produce the same binaries (assuming neverusedtag is never used). -// A more precise approach would be to include only tags that have an -// effect on the build. But the effect of a tag on the build is to -// include or exclude a file from the compilation, and that file list is -// already in the build ID hash. So the build ID is already tag-sensitive -// in a perfectly precise way. So we do NOT explicitly add build tags to -// the build ID hash. -// -// We do not include as part of the build ID the operating system, -// architecture, or whether the race detector is enabled, even though all -// three have an effect on the output, because that information is used -// to decide the install location. Binaries for linux and binaries for -// darwin are written to different directory trees; including that -// information in the build ID is unnecessary (although it would be -// harmless). -// -// TODO(rsc): Investigate the cost of putting source file content into -// the build ID hash as a replacement for the use of mtimes. Using the -// file content would avoid all the mtime problems, but it does require -// reading all the source files, something we avoid today (we read the -// beginning to find the build tags and the imports, but we stop as soon -// as we see the import block is over). If the package is stale, the compiler -// is going to read the files anyway. But if the package is up-to-date, the -// read is overhead. -// -// TODO(rsc): Investigate the complexity of making the build more -// precise about when individual results are needed. To be fully precise, -// there are two results of a compilation: the entire .a file used by the link -// and the subpiece used by later compilations (__.PKGDEF only). -// If a rebuild is needed but produces the previous __.PKGDEF, then -// no more recompilation due to the rebuilt package is needed, only -// relinking. To date, there is nothing in the Go command to express this. -// -// Special Cases -// -// When the go command makes the wrong build decision and does not -// rebuild something it should, users fall back to adding the -a flag. -// Any common use of the -a flag should be considered prima facie evidence -// that isStale is returning an incorrect false result in some important case. -// Bugs reported in the behavior of -a itself should prompt the question -// ``Why is -a being used at all? What bug does that indicate?'' -// -// There is a long history of changes to isStale to try to make -a into a -// suitable workaround for bugs in the mtime-based decisions. -// It is worth recording that history to inform (and, as much as possible, deter) future changes. -// -// (1) Before the build IDs were introduced, building with alternate tags -// would happily reuse installed objects built without those tags. -// For example, "go build -tags netgo myprog.go" would use the installed -// copy of package net, even if that copy had been built without netgo. -// (The netgo tag controls whether package net uses cgo or pure Go for -// functionality such as name resolution.) -// Using the installed non-netgo package defeats the purpose. -// -// Users worked around this with "go build -tags netgo -a myprog.go". -// -// Build IDs have made that workaround unnecessary: -// "go build -tags netgo myprog.go" -// cannot use a non-netgo copy of package net. -// -// (2) Before the build IDs were introduced, building with different toolchains, -// especially changing between toolchains, tried to reuse objects stored in -// $GOPATH/pkg, resulting in link-time errors about object file mismatches. -// -// Users worked around this with "go install -a ./...". -// -// Build IDs have made that workaround unnecessary: -// "go install ./..." will rebuild any objects it finds that were built against -// a different toolchain. -// -// (3) The common use of "go install -a ./..." led to reports of problems -// when the -a forced the rebuild of the standard library, which for some -// users was not writable. Because we didn't understand that the real -// problem was the bug -a was working around, we changed -a not to -// apply to the standard library. -// -// (4) The common use of "go build -tags netgo -a myprog.go" broke -// when we changed -a not to apply to the standard library, because -// if go build doesn't rebuild package net, it uses the non-netgo version. -// -// Users worked around this with "go build -tags netgo -installsuffix barf myprog.go". -// The -installsuffix here is making the go command look for packages -// in pkg/$GOOS_$GOARCH_barf instead of pkg/$GOOS_$GOARCH. -// Since the former presumably doesn't exist, go build decides to rebuild -// everything, including the standard library. Since go build doesn't -// install anything it builds, nothing is ever written to pkg/$GOOS_$GOARCH_barf, -// so repeated invocations continue to work. -// -// If the use of -a wasn't a red flag, the use of -installsuffix to point to -// a non-existent directory in a command that installs nothing should -// have been. -// -// (5) Now that (1) and (2) no longer need -a, we have removed the kludge -// introduced in (3): once again, -a means ``rebuild everything,'' not -// ``rebuild everything except the standard library.'' Only Go 1.4 had -// the restricted meaning. -// -// In addition to these cases trying to trigger rebuilds, there are -// special cases trying NOT to trigger rebuilds. The main one is that for -// a variety of reasons (see above), the install process for a Go release -// cannot be relied upon to set the mtimes such that the go command will -// think the standard library is up to date. So the mtime evidence is -// ignored for the standard library if we find ourselves in a release -// version of Go. Build ID-based staleness checks still apply to the -// standard library, even in release versions. This makes -// 'go build -tags netgo' work, among other things. - -// isStale reports whether package p needs to be rebuilt, -// along with the reason why. -func isStale(p *Package) (bool, string) { - if p.Standard && (p.ImportPath == "unsafe" || cfg.BuildContext.Compiler == "gccgo") { - // fake, builtin package - return false, "builtin package" - } - if p.Error != nil { - return true, "errors loading package" - } - if p.Stale { - return true, p.StaleReason - } - - // If this is a package with no source code, it cannot be rebuilt. - // If the binary is missing, we mark the package stale so that - // if a rebuild is needed, that rebuild attempt will produce a useful error. - // (Some commands, such as 'go list', do not attempt to rebuild.) - if p.BinaryOnly { - if p.Internal.Target == "" { - // Fail if a build is attempted. - return true, "no source code for package, but no install target" - } - if _, err := os.Stat(p.Internal.Target); err != nil { - // Fail if a build is attempted. - return true, "no source code for package, but cannot access install target: " + err.Error() - } - return false, "no source code for package" - } - - // If the -a flag is given, rebuild everything. - if cfg.BuildA { - return true, "build -a flag in use" - } - - // If there's no install target, we have to rebuild. - if p.Internal.Target == "" { - return true, "no install target" - } - - // Package is stale if completely unbuilt. - fi, err := os.Stat(p.Internal.Target) - if err != nil { - return true, "cannot stat install target" - } - - // Package is stale if the expected build ID differs from the - // recorded build ID. This catches changes like a source file - // being removed from a package directory. See issue 3895. - // It also catches changes in build tags that affect the set of - // files being compiled. See issue 9369. - // It also catches changes in toolchain, like when flipping between - // two versions of Go compiling a single GOPATH. - // See issue 8290 and issue 10702. - targetBuildID, err := buildid.ReadBuildID(p.Name, p.Target) - if err == nil && targetBuildID != p.Internal.BuildID { - return true, "build ID mismatch" - } - - // Package is stale if a dependency is. - for _, p1 := range p.Internal.Deps { - if p1.Stale { - return true, "stale dependency" - } - } - - // The checks above are content-based staleness. - // We assume they are always accurate. - // - // The checks below are mtime-based staleness. - // We hope they are accurate, but we know that they fail in the case of - // prebuilt Go installations that don't preserve the build mtimes - // (for example, if the pkg/ mtimes are before the src/ mtimes). - // See the large comment above isStale for details. - - // If we are running a release copy of Go and didn't find a content-based - // reason to rebuild the standard packages, do not rebuild them. - // They may not be writable anyway, but they are certainly not changing. - // This makes 'go build' skip the standard packages when - // using an official release, even when the mtimes have been changed. - // See issue 3036, issue 3149, issue 4106, issue 8290. - // (If a change to a release tree must be made by hand, the way to force the - // install is to run make.bash, which will remove the old package archives - // before rebuilding.) - if p.Standard && isGoRelease { - return false, "standard package in Go release distribution" - } - - // Time-based staleness. - - built := fi.ModTime() - - olderThan := func(file string) bool { - fi, err := os.Stat(file) - return err != nil || fi.ModTime().After(built) - } - - // Package is stale if a dependency is, or if a dependency is newer. - for _, p1 := range p.Internal.Deps { - if p1.Internal.Target != "" && olderThan(p1.Internal.Target) { - return true, "newer dependency" - } - } - - // As a courtesy to developers installing new versions of the compiler - // frequently, define that packages are stale if they are - // older than the compiler, and commands if they are older than - // the linker. This heuristic will not work if the binaries are - // back-dated, as some binary distributions may do, but it does handle - // a very common case. - // See issue 3036. - // Exclude $GOROOT, under the assumption that people working on - // the compiler may want to control when everything gets rebuilt, - // and people updating the Go repository will run make.bash or all.bash - // and get a full rebuild anyway. - // Excluding $GOROOT used to also fix issue 4106, but that's now - // taken care of above (at least when the installed Go is a released version). - if p.Root != cfg.GOROOT { - if olderThan(cfg.BuildToolchainCompiler()) { - return true, "newer compiler" - } - if p.Internal.Build.IsCommand() && olderThan(cfg.BuildToolchainLinker()) { - return true, "newer linker" - } - } - - // Note: Until Go 1.5, we had an additional shortcut here. - // We built a list of the workspace roots ($GOROOT, each $GOPATH) - // containing targets directly named on the command line, - // and if p were not in any of those, it would be treated as up-to-date - // as long as it is built. The goal was to avoid rebuilding a system-installed - // $GOROOT, unless something from $GOROOT were explicitly named - // on the command line (like go install math). - // That's now handled by the isGoRelease clause above. - // The other effect of the shortcut was to isolate different entries in - // $GOPATH from each other. This had the unfortunate effect that - // if you had (say), GOPATH listing two entries, one for commands - // and one for libraries, and you did a 'git pull' in the library one - // and then tried 'go install commands/...', it would build the new libraries - // during the first build (because they wouldn't have been installed at all) - // but then subsequent builds would not rebuild the libraries, even if the - // mtimes indicate they are stale, because the different GOPATH entries - // were treated differently. This behavior was confusing when using - // non-trivial GOPATHs, which were particularly common with some - // code management conventions, like the original godep. - // Since the $GOROOT case (the original motivation) is handled separately, - // we no longer put a barrier between the different $GOPATH entries. - // - // One implication of this is that if there is a system directory for - // non-standard Go packages that is included in $GOPATH, the mtimes - // on those compiled packages must be no earlier than the mtimes - // on the source files. Since most distributions use the same mtime - // for all files in a tree, they will be unaffected. People using plain - // tar x to extract system-installed packages will need to adjust mtimes, - // but it's better to force them to get the mtimes right than to ignore - // the mtimes and thereby do the wrong thing in common use cases. - // - // So there is no GOPATH vs GOPATH shortcut here anymore. - // - // If something needs to come back here, we could try writing a dummy - // file with a random name to the $GOPATH/pkg directory (and removing it) - // to test for write access, and then skip GOPATH roots we don't have write - // access to. But hopefully we can just use the mtimes always. - - srcs := str.StringList(p.GoFiles, p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.CgoFiles, p.SysoFiles, p.SwigFiles, p.SwigCXXFiles) - for _, src := range srcs { - if olderThan(filepath.Join(p.Dir, src)) { - return true, "newer source file" - } - } - - return false, "" -} - -// computeBuildID computes the build ID for p, leaving it in p.Internal.BuildID. -// Build ID is a hash of the information we want to detect changes in. -// See the long comment in isStale for details. -func computeBuildID(p *Package) { - h := sha1.New() - - // Include the list of files compiled as part of the package. - // This lets us detect removed files. See issue 3895. - inputFiles := str.StringList( - p.GoFiles, - p.CgoFiles, - p.CFiles, - p.CXXFiles, - p.FFiles, - p.MFiles, - p.HFiles, - p.SFiles, - p.SysoFiles, - p.SwigFiles, - p.SwigCXXFiles, - ) - for _, file := range inputFiles { - fmt.Fprintf(h, "file %s\n", file) - } - - // Include the content of runtime/internal/sys/zversion.go in the hash - // for package runtime. This will give package runtime a - // different build ID in each Go release. - if p.Standard && p.ImportPath == "runtime/internal/sys" && cfg.BuildContext.Compiler != "gccgo" { - data, err := ioutil.ReadFile(filepath.Join(p.Dir, "zversion.go")) - if os.IsNotExist(err) { - p.Stale = true - p.StaleReason = fmt.Sprintf("missing zversion.go") - } else if err != nil { - base.Fatalf("go: %s", err) - } - fmt.Fprintf(h, "zversion %q\n", string(data)) - - // Add environment variables that affect code generation. - switch cfg.BuildContext.GOARCH { - case "arm": - fmt.Fprintf(h, "GOARM=%s\n", cfg.GOARM) - case "386": - fmt.Fprintf(h, "GO386=%s\n", cfg.GO386) - } - } - - // Include the build IDs of any dependencies in the hash. - // This, combined with the runtime/zversion content, - // will cause packages to have different build IDs when - // compiled with different Go releases. - // This helps the go command know to recompile when - // people use the same GOPATH but switch between - // different Go releases. See issue 10702. - // This is also a better fix for issue 8290. - for _, p1 := range p.Internal.Deps { - fmt.Fprintf(h, "dep %s %s\n", p1.ImportPath, p1.Internal.BuildID) - } - - p.Internal.BuildID = fmt.Sprintf("%x", h.Sum(nil)) -} - var cmdCache = map[string]*Package{} func ClearCmdCache() { @@ -1831,7 +1393,6 @@ func PackagesAndErrors(args []string) []*Package { seenPkg[pkg] = true pkgs = append(pkgs, pkg) } - ComputeStale(pkgs...) return pkgs } @@ -1932,13 +1493,13 @@ func GoFilesPackage(gofiles []string) *Package { bp, err := ctxt.ImportDir(dir, 0) pkg := new(Package) pkg.Internal.Local = true - pkg.Internal.Cmdline = true + pkg.Internal.CmdlineFiles = true stk.Push("main") pkg.load(&stk, bp, err) stk.Pop() pkg.Internal.LocalPrefix = dirToImportPath(dir) pkg.ImportPath = "command-line-arguments" - pkg.Internal.Target = "" + pkg.Target = "" if pkg.Name == "main" { _, elem := filepath.Split(gofiles[0]) @@ -1947,14 +1508,9 @@ func GoFilesPackage(gofiles []string) *Package { cfg.BuildO = exe } if cfg.GOBIN != "" { - pkg.Internal.Target = filepath.Join(cfg.GOBIN, exe) + pkg.Target = filepath.Join(cfg.GOBIN, exe) } } - pkg.Target = pkg.Internal.Target - pkg.Stale = true - pkg.StaleReason = "files named on command line" - - ComputeStale(pkg) return pkg } diff --git a/src/cmd/go/internal/load/search.go b/src/cmd/go/internal/load/search.go index 0c7d9ce0e6c..595de079046 100644 --- a/src/cmd/go/internal/load/search.go +++ b/src/cmd/go/internal/load/search.go @@ -266,6 +266,50 @@ func matchPattern(pattern string) func(name string) bool { } } +// MatchPackage(pattern, cwd)(p) reports whether package p matches pattern in the working directory cwd. +func MatchPackage(pattern, cwd string) func(*Package) bool { + switch { + case strings.HasPrefix(pattern, "./") || strings.HasPrefix(pattern, "../") || pattern == "." || pattern == "..": + // Split pattern into leading pattern-free directory path + // (including all . and .. elements) and the final pattern. + var dir string + i := strings.Index(pattern, "...") + if i < 0 { + dir, pattern = pattern, "" + } else { + j := strings.LastIndex(pattern[:i], "/") + dir, pattern = pattern[:j], pattern[j+1:] + } + dir = filepath.Join(cwd, dir) + if pattern == "" { + return func(p *Package) bool { return p.Dir == dir } + } + matchPath := matchPattern(pattern) + return func(p *Package) bool { + // Compute relative path to dir and see if it matches the pattern. + rel, err := filepath.Rel(dir, p.Dir) + if err != nil { + // Cannot make relative - e.g. different drive letters on Windows. + return false + } + rel = filepath.ToSlash(rel) + if rel == ".." || strings.HasPrefix(rel, "../") { + return false + } + return matchPath(rel) + } + case pattern == "all": + return func(p *Package) bool { return true } + case pattern == "std": + return func(p *Package) bool { return p.Standard } + case pattern == "cmd": + return func(p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") } + default: + matchPath := matchPattern(pattern) + return func(p *Package) bool { return matchPath(p.ImportPath) } + } +} + // replaceVendor returns the result of replacing // non-trailing vendor path elements in x with repl. func replaceVendor(x, repl string) string { @@ -302,6 +346,9 @@ func ImportPaths(args []string) []string { // ImportPathsNoDotExpansion returns the import paths to use for the given // command line, but it does no ... expansion. func ImportPathsNoDotExpansion(args []string) []string { + if cmdlineMatchers == nil { + SetCmdlinePatterns(args) + } if len(args) == 0 { return []string{"."} } @@ -332,7 +379,7 @@ func ImportPathsNoDotExpansion(args []string) []string { return out } -// isMetaPackage checks if name is a reserved package name that expands to multiple packages. +// IsMetaPackage checks if name is a reserved package name that expands to multiple packages. func IsMetaPackage(name string) bool { return name == "std" || name == "cmd" || name == "all" } diff --git a/src/cmd/go/internal/load/testgo.go b/src/cmd/go/internal/load/testgo.go deleted file mode 100644 index 7734048f5c9..00000000000 --- a/src/cmd/go/internal/load/testgo.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file contains extra hooks for testing the go command. -// It is compiled into the Go binary only when building the -// test copy; it does not get compiled into the standard go -// command, so these testing hooks are not present in the -// go command that everyone uses. - -// +build testgo - -package load - -import "os" - -func init() { - if v := os.Getenv("TESTGO_IS_GO_RELEASE"); v != "" { - isGoRelease = v == "1" - } -} diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go index 6e276c28ec1..ce24748f4e3 100644 --- a/src/cmd/go/internal/run/run.go +++ b/src/cmd/go/internal/run/run.go @@ -52,8 +52,7 @@ func printStderr(args ...interface{}) (int, error) { } func runRun(cmd *base.Command, args []string) { - work.InstrumentInit() - work.BuildModeInit() + work.BuildInit() var b work.Builder b.Init() b.Print = printStderr @@ -94,7 +93,7 @@ func runRun(cmd *base.Command, args []string) { if p.Name != "main" { base.Fatalf("go run: cannot run non-main package") } - p.Internal.Target = "" // must build - not up to date + p.Target = "" // must build - not up to date var src string if len(p.GoFiles) > 0 { src = p.GoFiles[0] @@ -110,8 +109,8 @@ func runRun(cmd *base.Command, args []string) { base.Fatalf("go run: no suitable source files%s", hint) } p.Internal.ExeName = src[:len(src)-len(".go")] // name temporary executable for first go file - a1 := b.Action(work.ModeBuild, work.ModeBuild, p) - a := &work.Action{Func: buildRunProgram, Args: cmdArgs, Deps: []*work.Action{a1}} + a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) + a := &work.Action{Mode: "go run", Func: buildRunProgram, Args: cmdArgs, Deps: []*work.Action{a1}} b.Do(a) } diff --git a/src/cmd/go/internal/test/cover.go b/src/cmd/go/internal/test/cover.go new file mode 100644 index 00000000000..12538b46564 --- /dev/null +++ b/src/cmd/go/internal/test/cover.go @@ -0,0 +1,84 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "cmd/go/internal/base" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +var coverMerge struct { + f *os.File + sync.Mutex // for f.Write +} + +// initCoverProfile initializes the test coverage profile. +// It must be run before any calls to mergeCoverProfile or closeCoverProfile. +// Using this function clears the profile in case it existed from a previous run, +// or in case it doesn't exist and the test is going to fail to create it (or not run). +func initCoverProfile() { + if testCoverProfile == "" { + return + } + if !filepath.IsAbs(testCoverProfile) && testOutputDir != "" { + testCoverProfile = filepath.Join(testOutputDir, testCoverProfile) + } + + // No mutex - caller's responsibility to call with no racing goroutines. + f, err := os.Create(testCoverProfile) + if err != nil { + base.Fatalf("%v", err) + } + _, err = fmt.Fprintf(f, "mode: %s\n", testCoverMode) + if err != nil { + base.Fatalf("%v", err) + } + coverMerge.f = f +} + +// mergeCoverProfile merges file into the profile stored in testCoverProfile. +// It prints any errors it encounters to ew. +func mergeCoverProfile(ew io.Writer, file string) { + if coverMerge.f == nil { + return + } + coverMerge.Lock() + defer coverMerge.Unlock() + + expect := fmt.Sprintf("mode: %s\n", testCoverMode) + buf := make([]byte, len(expect)) + r, err := os.Open(file) + if err != nil { + // Test did not create profile, which is OK. + return + } + defer r.Close() + + n, err := io.ReadFull(r, buf) + if n == 0 { + return + } + if err != nil || string(buf) != expect { + fmt.Fprintf(ew, "error: test wrote malformed coverage profile.\n") + return + } + _, err = io.Copy(coverMerge.f, r) + if err != nil { + fmt.Fprintf(ew, "error: saving coverage profile: %v\n", err) + } +} + +func closeCoverProfile() { + if coverMerge.f == nil { + return + } + if err := coverMerge.f.Close(); err != nil { + base.Errorf("closing coverage profile: %v", err) + } +} diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index ebebffd7777..74a34ad9c49 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -13,24 +13,29 @@ import ( "go/doc" "go/parser" "go/token" + "io" + "io/ioutil" "os" "os/exec" "path" "path/filepath" "regexp" - "runtime" "sort" + "strconv" "strings" + "sync" "text/template" "time" "unicode" "unicode/utf8" "cmd/go/internal/base" + "cmd/go/internal/cache" "cmd/go/internal/cfg" "cmd/go/internal/load" "cmd/go/internal/str" "cmd/go/internal/work" + "cmd/internal/test2json" ) // Break init loop. @@ -57,10 +62,10 @@ followed by detailed output for each failed package. 'Go test' recompiles each package along with any files with names matching the file pattern "*_test.go". -Files whose names begin with "_" (including "_test.go") or "." are ignored. These additional files can contain test functions, benchmark functions, and example functions. See 'go help testfunc' for more. Each listed package causes the execution of a separate test binary. +Files whose names begin with "_" (including "_test.go") or "." are ignored. Test files that declare a package with the suffix "_test" will be compiled as a separate package, and then linked and run with the main test binary. @@ -68,11 +73,46 @@ separate package, and then linked and run with the main test binary. The go tool will ignore a directory named "testdata", making it available to hold ancillary data needed by the tests. -By default, go test needs no arguments. It compiles and tests the package -with source in the current directory, including tests, and runs the tests. +As part of building a test binary, go test runs go vet on the package +and its test source files to identify significant problems. If go vet +finds any problems, go test reports those and does not run the test binary. +Only a high-confidence subset of the default go vet checks are used. +To disable the running of go vet, use the -vet=off flag. -The package is built in a temporary directory so it does not interfere with the -non-test installation. +Go test runs in two different modes: local directory mode when invoked with +no package arguments (for example, 'go test'), and package list mode when +invoked with package arguments (for example 'go test math', 'go test ./...', +and even 'go test .'). + +In local directory mode, go test compiles and tests the package sources +found in the current directory and then runs the resulting test binary. +In this mode, caching (discussed below) is disabled. After the package test +finishes, go test prints a summary line showing the test status ('ok' or 'FAIL'), +package name, and elapsed time. + +In package list mode, go test compiles and tests each of the packages +listed on the command line. If a package test passes, go test prints only +the final 'ok' summary line. If a package test fails, go test prints the +full test output. If invoked with the -bench or -v flag, go test prints +the full output even for passing package tests, in order to display the +requested benchmark results or verbose logging. + +All test output and summary lines are printed to the go command's standard +output, even if the test printed them to its own standard error. +(The go command's standard error is reserved for printing errors building +the tests.) + +In package list mode, go test also caches successful package test results. +If go test has cached a previous test run using the same test binary and +the same command line consisting entirely of cacheable test flags +(defined as -cpu, -list, -parallel, -run, -short, and -v), +go test will redisplay the previous output instead of running the test +binary again. In the summary line, go test prints '(cached)' in place of +the elapsed time. To disable test caching, use any test flag or argument +other than the cacheable flags. The idiomatic way to disable test caching +explicitly is to use -count=1. A cached result is treated as executing in +no time at all, so a successful package test result will be cached and reused +regardless of -timeout setting. ` + strings.TrimSpace(testFlag1) + ` See 'go help testflag' for details. @@ -105,6 +145,10 @@ In addition to the build flags, the flags handled by 'go test' itself are: Install packages that are dependencies of the test. Do not run the test. + -json + Convert test output to JSON suitable for automated processing. + See 'go doc test2json' for the encoding details. + -o file Compile the test binary to the named file. The test still runs (unless -c or -i is specified). @@ -143,7 +187,7 @@ control the execution of any test: const testFlag2 = ` -bench regexp Run only those benchmarks matching a regular expression. - By default, no benchmarks are run. + By default, no benchmarks are run. To run all benchmarks, use '-bench .' or '-bench=.'. The regular expression is split by unbracketed slash (/) characters into a sequence of regular expressions, and each @@ -182,10 +226,10 @@ const testFlag2 = ` significantly more expensive. Sets -cover. - -coverpkg pkg1,pkg2,pkg3 - Apply coverage analysis in each test to the given list of packages. + -coverpkg pattern1,pattern2,pattern3 + Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. - Packages are specified as import paths. + See 'go help packages' for a description of package patterns. Sets -cover. -cpu 1,2,4 @@ -193,6 +237,9 @@ const testFlag2 = ` benchmarks should be executed. The default is the current value of GOMAXPROCS. + -failfast + Do not start new tests after the first test failure. + -list regexp List tests, benchmarks, or examples matching the regular expression. No tests, benchmarks or examples will be run. This will only @@ -225,12 +272,20 @@ const testFlag2 = ` -timeout d If a test binary runs longer than duration d, panic. + If d is 0, the timeout is disabled. The default is 10 minutes (10m). -v Verbose output: log all tests as they are run. Also print all text from Log and Logf calls even if the test succeeds. + -vet list + Configure the invocation of "go vet" during "go test" + to use the comma-separated list of vet checks. + If list is empty, "go test" runs "go vet" with a curated list of + checks believed to be always worth addressing. + If list is "off", "go test" does not run "go vet" at all. + The following flags are also recognized by 'go test' and can be used to profile the tests during execution: @@ -411,36 +466,68 @@ var ( testCoverMode string // -covermode flag testCoverPaths []string // -coverpkg flag testCoverPkgs []*load.Package // -coverpkg flag + testCoverProfile string // -coverprofile flag + testOutputDir string // -outputdir flag testO string // -o flag - testProfile bool // some profiling flag + testProfile string // profiling flag that limits test to one package testNeedBinary bool // profile needs to keep binary around + testJSON bool // -json flag testV bool // -v flag testTimeout string // -timeout flag testArgs []string testBench bool testList bool - testStreamOutput bool // show output as it is generated - testShowPass bool // show passing output + testShowPass bool // show passing output + testVetList string // -vet flag + pkgArgs []string + pkgs []*load.Package testKillTimeout = 10 * time.Minute + testCacheExpire time.Time // ignore cached test results before this time ) -var testMainDeps = map[string]bool{ +var testMainDeps = []string{ // Dependencies for testmain. - "testing": true, - "testing/internal/testdeps": true, - "os": true, + "os", + "testing", + "testing/internal/testdeps", +} + +// testVetFlags is the list of flags to pass to vet when invoked automatically during go test. +var testVetFlags = []string{ + // TODO(rsc): Decide which tests are enabled by default. + // See golang.org/issue/18085. + // "-asmdecl", + // "-assign", + "-atomic", + "-bool", + "-buildtags", + // "-cgocall", + // "-composites", + // "-copylocks", + // "-httpresponse", + // "-lostcancel", + // "-methods", + "-nilfunc", + "-printf", + // "-rangeloops", + // "-shift", + // "-structtags", + // "-tests", + // "-unreachable", + // "-unsafeptr", + // "-unusedresult", } func runTest(cmd *base.Command, args []string) { - var pkgArgs []string pkgArgs, testArgs = testFlags(args) work.FindExecCmd() // initialize cached result - work.InstrumentInit() - work.BuildModeInit() - pkgs := load.PackagesForBuild(pkgArgs) + work.BuildInit() + work.VetFlags = testVetFlags + + pkgs = load.PackagesForBuild(pkgArgs) if len(pkgs) == 0 { base.Fatalf("no packages to test") } @@ -451,9 +538,11 @@ func runTest(cmd *base.Command, args []string) { if testO != "" && len(pkgs) != 1 { base.Fatalf("cannot use -o flag with multiple packages") } - if testProfile && len(pkgs) != 1 { - base.Fatalf("cannot use test profile flag with multiple packages") + if testProfile != "" && len(pkgs) != 1 { + base.Fatalf("cannot use %s flag with multiple packages", testProfile) } + initCoverProfile() + defer closeCoverProfile() // If a test timeout was given and is parseable, set our kill timeout // to that timeout plus one minute. This is a backup alarm in case @@ -461,6 +550,10 @@ func runTest(cmd *base.Command, args []string) { // timer does not get a chance to fire. if dt, err := time.ParseDuration(testTimeout); err == nil && dt > 0 { testKillTimeout = dt + 1*time.Minute + } else if err == nil && dt == 0 { + // An explicit zero disables the test timeout. + // Let it have one century (almost) before we kill it. + testKillTimeout = 100 * 365 * 24 * time.Hour } // show passing test output (after buffering) with -v flag. @@ -468,21 +561,22 @@ func runTest(cmd *base.Command, args []string) { // otherwise the output will get mixed. testShowPass = testV || testList - // stream test output (no buffering) when no package has - // been given on the command line (implicit current directory) - // or when benchmarking. - // Also stream if we're showing output anyway with a - // single package under test or if parallelism is set to 1. - // In these cases, streaming the output produces the same result - // as not streaming, just more immediately. - testStreamOutput = len(pkgArgs) == 0 || testBench || - (testShowPass && (len(pkgs) == 1 || cfg.BuildP == 1)) - // For 'go test -i -o x.test', we want to build x.test. Imply -c to make the logic easier. if cfg.BuildI && testO != "" { testC = true } + // Read testcache expiration time, if present. + // (We implement go clean -testcache by writing an expiration date + // instead of searching out and deleting test result cache entries.) + if dir := cache.DefaultDir(); dir != "off" { + if data, _ := ioutil.ReadFile(filepath.Join(dir, "testexpire.txt")); len(data) > 0 && data[len(data)-1] == '\n' { + if t, err := strconv.ParseInt(string(data[:len(data)-1]), 10, 64); err == nil { + testCacheExpire = time.Unix(0, t) + } + } + } + var b work.Builder b.Init() @@ -490,7 +584,7 @@ func runTest(cmd *base.Command, args []string) { cfg.BuildV = testV deps := make(map[string]bool) - for dep := range testMainDeps { + for _, dep := range testMainDeps { deps[dep] = true } @@ -511,9 +605,6 @@ func runTest(cmd *base.Command, args []string) { if deps["C"] { delete(deps, "C") deps["runtime/cgo"] = true - if cfg.Goos == runtime.GOOS && cfg.Goarch == runtime.GOARCH && !cfg.BuildRace && !cfg.BuildMSan { - deps["cmd/cgo"] = true - } } // Ignore pseudo-packages. delete(deps, "unsafe") @@ -526,9 +617,9 @@ func runTest(cmd *base.Command, args []string) { } sort.Strings(all) - a := &work.Action{} + a := &work.Action{Mode: "go test -i"} for _, p := range load.PackagesForBuild(all) { - a.Deps = append(a.Deps, b.Action(work.ModeInstall, work.ModeInstall, p)) + a.Deps = append(a.Deps, b.CompileAction(work.ModeInstall, work.ModeInstall, p)) } b.Do(a) if !testC || a.Failed { @@ -540,21 +631,30 @@ func runTest(cmd *base.Command, args []string) { var builds, runs, prints []*work.Action if testCoverPaths != nil { - // Load packages that were asked about for coverage. - // packagesForBuild exits if the packages cannot be loaded. - testCoverPkgs = load.PackagesForBuild(testCoverPaths) + match := make([]func(*load.Package) bool, len(testCoverPaths)) + matched := make([]bool, len(testCoverPaths)) + for i := range testCoverPaths { + match[i] = load.MatchPackage(testCoverPaths[i], base.Cwd) + } - // Warn about -coverpkg arguments that are not actually used. - used := make(map[string]bool) - for _, p := range pkgs { - used[p.ImportPath] = true - for _, dep := range p.Deps { - used[dep] = true + // Select for coverage all dependencies matching the testCoverPaths patterns. + for _, p := range load.PackageList(pkgs) { + haveMatch := false + for i := range testCoverPaths { + if match[i](p) { + matched[i] = true + haveMatch = true + } + } + if haveMatch { + testCoverPkgs = append(testCoverPkgs, p) } } - for _, p := range testCoverPkgs { - if !used[p.ImportPath] { - fmt.Fprintf(os.Stderr, "warning: no packages being tested depend on %s\n", p.ImportPath) + + // Warn about -coverpkg arguments that are not actually used. + for i := range testCoverPaths { + if !matched[i] { + fmt.Fprintf(os.Stderr, "warning: no packages being tested depend on matches for pattern %s\n", testCoverPaths[i]) } } @@ -564,15 +664,15 @@ func runTest(cmd *base.Command, args []string) { if p.ImportPath == "unsafe" { continue } - p.Stale = true // rebuild - p.StaleReason = "rebuild for coverage" - p.Internal.Fake = true // do not warn about rebuild p.Internal.CoverMode = testCoverMode var coverFiles []string coverFiles = append(coverFiles, p.GoFiles...) coverFiles = append(coverFiles, p.CgoFiles...) coverFiles = append(coverFiles, p.TestGoFiles...) p.Internal.CoverVars = declareCoverVars(p.ImportPath, coverFiles...) + if testCover && testCoverMode == "atomic" { + ensureImport(p, "sync/atomic") + } } } @@ -604,7 +704,7 @@ func runTest(cmd *base.Command, args []string) { } // Ultimately the goal is to print the output. - root := &work.Action{Deps: prints} + root := &work.Action{Mode: "go test", Deps: prints} // Force the printing of results to happen in order, // one at a time. @@ -627,68 +727,23 @@ func runTest(cmd *base.Command, args []string) { } } - // If we are building any out-of-date packages other - // than those under test, warn. - okBuild := map[*load.Package]bool{} - for _, p := range pkgs { - okBuild[p] = true - } - warned := false - for _, a := range work.ActionList(root) { - if a.Package == nil || okBuild[a.Package] { - continue - } - okBuild[a.Package] = true // warn at most once - - // Don't warn about packages being rebuilt because of - // things like coverage analysis. - for _, p1 := range a.Package.Internal.Imports { - if p1.Internal.Fake { - a.Package.Internal.Fake = true - } - } - - if a.Func != nil && !okBuild[a.Package] && !a.Package.Internal.Fake && !a.Package.Internal.Local { - if !warned { - fmt.Fprintf(os.Stderr, "warning: building out-of-date packages:\n") - warned = true - } - fmt.Fprintf(os.Stderr, "\t%s\n", a.Package.ImportPath) - } - } - if warned { - args := strings.Join(pkgArgs, " ") - if args != "" { - args = " " + args - } - extraOpts := "" - if cfg.BuildRace { - extraOpts = "-race " - } - if cfg.BuildMSan { - extraOpts = "-msan " - } - fmt.Fprintf(os.Stderr, "installing these packages with 'go test %s-i%s' will speed future tests.\n\n", extraOpts, args) - } - b.Do(root) } // ensures that package p imports the named package func ensureImport(p *load.Package, pkg string) { - for _, d := range p.Internal.Deps { + for _, d := range p.Internal.Imports { if d.Name == pkg { return } } - a := load.LoadPackage(pkg, &load.ImportStack{}) - if a.Error != nil { - base.Fatalf("load %s: %v", pkg, a.Error) + p1 := load.LoadPackage(pkg, &load.ImportStack{}) + if p1.Error != nil { + base.Fatalf("load %s: %v", pkg, p1.Error) } - load.ComputeStale(a) - p.Internal.Imports = append(p.Internal.Imports, a) + p.Internal.Imports = append(p.Internal.Imports, p1) } var windowsBadWords = []string{ @@ -700,9 +755,10 @@ var windowsBadWords = []string{ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, printAction *work.Action, err error) { if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { - build := b.Action(work.ModeBuild, work.ModeBuild, p) - run := &work.Action{Package: p, Deps: []*work.Action{build}} - print := &work.Action{Func: builderNoTest, Package: p, Deps: []*work.Action{run}} + build := b.CompileAction(work.ModeBuild, work.ModeBuild, p) + run := &work.Action{Mode: "test run", Package: p, Deps: []*work.Action{build}} + addTestVet(b, p, run, nil) + print := &work.Action{Mode: "test print", Func: builderNoTest, Package: p, Deps: []*work.Action{run}} return build, run, print, nil } @@ -715,6 +771,7 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin var imports, ximports []*load.Package var stk load.ImportStack stk.Push(p.ImportPath + " (test)") + rawTestImports := str.StringList(p.TestImports) for i, path := range p.TestImports { p1 := load.LoadImport(path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], load.UseVendor) if p1.Error != nil { @@ -742,6 +799,7 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin stk.Pop() stk.Push(p.ImportPath + "_test") pxtestNeedsPtest := false + rawXTestImports := str.StringList(p.XTestImports) for i, path := range p.XTestImports { p1 := load.LoadImport(path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], load.UseVendor) if p1.Error != nil { @@ -773,29 +831,6 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin } testBinary := elem + ".test" - // The ptest package needs to be importable under the - // same import path that p has, but we cannot put it in - // the usual place in the temporary tree, because then - // other tests will see it as the real package. - // Instead we make a _test directory under the import path - // and then repeat the import path there. We tell the - // compiler and linker to look in that _test directory first. - // - // That is, if the package under test is unicode/utf8, - // then the normal place to write the package archive is - // $WORK/unicode/utf8.a, but we write the test package archive to - // $WORK/unicode/utf8/_test/unicode/utf8.a. - // We write the external test package archive to - // $WORK/unicode/utf8/_test/unicode/utf8_test.a. - testDir := filepath.Join(b.WorkDir, filepath.FromSlash(p.ImportPath+"/_test")) - ptestObj := work.BuildToolchain.Pkgpath(testDir, p) - - // Create the directory for the .a files. - ptestDir, _ := filepath.Split(ptestObj) - if err := b.Mkdir(ptestDir); err != nil { - return nil, nil, nil, err - } - // Should we apply coverage analysis locally, // only for this package and only for this test? // Yes, if -cover is on but -coverpkg has not specified @@ -809,14 +844,22 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin ptest.GoFiles = nil ptest.GoFiles = append(ptest.GoFiles, p.GoFiles...) ptest.GoFiles = append(ptest.GoFiles, p.TestGoFiles...) - ptest.Internal.Target = "" - ptest.Imports = str.StringList(p.Imports, p.TestImports) - ptest.Internal.Imports = append(append([]*load.Package{}, p.Internal.Imports...), imports...) - ptest.Internal.Pkgdir = testDir - ptest.Internal.Fake = true + ptest.Target = "" + // Note: The preparation of the vet config requires that common + // indexes in ptest.Imports, ptest.Internal.Imports, and ptest.Internal.RawImports + // all line up (but RawImports can be shorter than the others). + // That is, for 0 ≤ i < len(RawImports), + // RawImports[i] is the import string in the program text, + // Imports[i] is the expanded import string (vendoring applied or relative path expanded away), + // and Internal.Imports[i] is the corresponding *Package. + // Any implicitly added imports appear in Imports and Internal.Imports + // but not RawImports (because they were not in the source code). + // We insert TestImports, imports, and rawTestImports at the start of + // these lists to preserve the alignment. + ptest.Imports = str.StringList(p.TestImports, p.Imports) + ptest.Internal.Imports = append(imports, p.Internal.Imports...) + ptest.Internal.RawImports = str.StringList(rawTestImports, p.Internal.RawImports) ptest.Internal.ForceLibrary = true - ptest.Stale = true - ptest.StaleReason = "rebuild for test" ptest.Internal.Build = new(build.Package) *ptest.Internal.Build = *p.Internal.Build m := map[string][]token.Position{} @@ -849,17 +892,19 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin Dir: p.Dir, GoFiles: p.XTestGoFiles, Imports: p.XTestImports, - Stale: true, }, Internal: load.PackageInternal{ LocalPrefix: p.Internal.LocalPrefix, Build: &build.Package{ ImportPos: p.Internal.Build.XTestImportPos, }, - Imports: ximports, - Pkgdir: testDir, - Fake: true, - External: true, + Imports: ximports, + RawImports: rawXTestImports, + + Asmflags: p.Internal.Asmflags, + Gcflags: p.Internal.Gcflags, + Ldflags: p.Internal.Ldflags, + Gccgoflags: p.Internal.Gccgoflags, }, } if pxtestNeedsPtest { @@ -867,27 +912,34 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin } } + testDir := b.NewObjdir() + if err := b.Mkdir(testDir); err != nil { + return nil, nil, nil, err + } + // Action for building pkg.test. pmain = &load.Package{ PackagePublic: load.PackagePublic{ Name: "main", Dir: testDir, GoFiles: []string{"_testmain.go"}, - ImportPath: "testmain", + ImportPath: p.ImportPath + " (testmain)", Root: p.Root, - Stale: true, }, Internal: load.PackageInternal{ Build: &build.Package{Name: "main"}, - Pkgdir: testDir, - Fake: true, OmitDebug: !testC && !testNeedBinary, }, } // The generated main also imports testing, regexp, and os. + // Also the linker introduces implicit dependencies reported by LinkerDeps. stk.Push("testmain") - for dep := range testMainDeps { + deps := testMainDeps // cap==len, so safe for append + for _, d := range load.LinkerDeps(p) { + deps = append(deps, d) + } + for _, dep := range deps { if dep == ptest.ImportPath { pmain.Internal.Imports = append(pmain.Internal.Imports, ptest) } else { @@ -932,24 +984,21 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin if ptest != p && localCover { // We have made modifications to the package p being tested - // and are rebuilding p (as ptest), writing it to the testDir tree. - // Arrange to rebuild, writing to that same tree, all packages q - // such that the test depends on q, and q depends on p. + // and are rebuilding p (as ptest). + // Arrange to rebuild all packages q such that + // the test depends on q and q depends on p. // This makes sure that q sees the modifications to p. // Strictly speaking, the rebuild is only necessary if the // modifications to p change its export metadata, but // determining that is a bit tricky, so we rebuild always. + // TODO(rsc): Once we get export metadata changes + // handled properly, look into the expense of dropping + // "&& localCover" above. // // This will cause extra compilation, so for now we only do it // when testCover is set. The conditions are more general, though, // and we may find that we need to do it always in the future. - recompileForTest(pmain, p, ptest, testDir) - } - - if cfg.BuildContext.GOOS == "darwin" { - if cfg.BuildContext.GOARCH == "arm" || cfg.BuildContext.GOARCH == "arm64" { - t.NeedCgo = true - } + recompileForTest(pmain, p, ptest) } for _, cp := range pmain.Internal.Imports { @@ -959,34 +1008,19 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin } if !cfg.BuildN { - // writeTestmain writes _testmain.go. This must happen after recompileForTest, - // because recompileForTest modifies XXX. - if err := writeTestmain(filepath.Join(testDir, "_testmain.go"), t); err != nil { + // writeTestmain writes _testmain.go, + // using the test description gathered in t. + if err := writeTestmain(testDir+"_testmain.go", t); err != nil { return nil, nil, nil, err } } - load.ComputeStale(pmain) + // Set compile objdir to testDir we've already created, + // so that the default file path stripping applies to _testmain.go. + b.CompileAction(work.ModeBuild, work.ModeBuild, pmain).Objdir = testDir - if ptest != p { - a := b.Action(work.ModeBuild, work.ModeBuild, ptest) - a.Objdir = testDir + string(filepath.Separator) + "_obj_test" + string(filepath.Separator) - a.Objpkg = ptestObj - a.Target = ptestObj - a.Link = false - } - - if pxtest != nil { - a := b.Action(work.ModeBuild, work.ModeBuild, pxtest) - a.Objdir = testDir + string(filepath.Separator) + "_obj_xtest" + string(filepath.Separator) - a.Objpkg = work.BuildToolchain.Pkgpath(testDir, pxtest) - a.Target = a.Objpkg - } - - a := b.Action(work.ModeBuild, work.ModeBuild, pmain) - a.Objdir = testDir + string(filepath.Separator) - a.Objpkg = filepath.Join(testDir, "main.a") - a.Target = filepath.Join(testDir, testBinary) + cfg.ExeSuffix + a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain) + a.Target = testDir + testBinary + cfg.ExeSuffix if cfg.Goos == "windows" { // There are many reserved words on Windows that, // if used in the name of an executable, cause Windows @@ -1012,12 +1046,13 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin // we could just do this always on Windows. for _, bad := range windowsBadWords { if strings.Contains(testBinary, bad) { - a.Target = filepath.Join(testDir, "test.test") + cfg.ExeSuffix + a.Target = testDir + "test.test" + cfg.ExeSuffix break } } } buildAction = a + var installAction *work.Action if testC || testNeedBinary { // -c or profiling flag: create action to copy binary to ./test.out. @@ -1028,30 +1063,46 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin target = filepath.Join(base.Cwd, target) } } - buildAction = &work.Action{ + pmain.Target = target + installAction = &work.Action{ + Mode: "test build", Func: work.BuildInstallFunc, Deps: []*work.Action{buildAction}, Package: pmain, Target: target, } - runAction = buildAction // make sure runAction != nil even if not running test + buildAction = installAction + runAction = installAction // make sure runAction != nil even if not running test } if testC { - printAction = &work.Action{Package: p, Deps: []*work.Action{runAction}} // nop + printAction = &work.Action{Mode: "test print (nop)", Package: p, Deps: []*work.Action{runAction}} // nop } else { // run test + c := new(runCache) runAction = &work.Action{ - Func: builderRunTest, + Mode: "test run", + Func: c.builderRunTest, Deps: []*work.Action{buildAction}, Package: p, IgnoreFail: true, + TryCache: c.tryCache, + Objdir: testDir, + } + if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 { + addTestVet(b, ptest, runAction, installAction) + } + if pxtest != nil { + addTestVet(b, pxtest, runAction, installAction) } cleanAction := &work.Action{ + Mode: "test clean", Func: builderCleanTest, Deps: []*work.Action{runAction}, Package: p, + Objdir: testDir, } printAction = &work.Action{ + Mode: "test print", Func: builderPrintTest, Deps: []*work.Action{cleanAction}, Package: p, @@ -1061,6 +1112,22 @@ func builderTest(b *work.Builder, p *load.Package) (buildAction, runAction, prin return buildAction, runAction, printAction, nil } +func addTestVet(b *work.Builder, p *load.Package, runAction, installAction *work.Action) { + if testVetList == "off" { + return + } + + vet := b.VetAction(work.ModeBuild, work.ModeBuild, p) + runAction.Deps = append(runAction.Deps, vet) + // Install will clean the build directory. + // Make sure vet runs first. + // The install ordering in b.VetAction does not apply here + // because we are using a custom installAction (created above). + if installAction != nil { + installAction.Deps = append(installAction.Deps, vet) + } +} + func testImportStack(top string, p *load.Package, target string) []string { stk := []string{top, p.ImportPath} Search: @@ -1079,7 +1146,7 @@ Search: return stk } -func recompileForTest(pmain, preal, ptest *load.Package, testDir string) { +func recompileForTest(pmain, preal, ptest *load.Package) { // The "test copy" of preal is ptest. // For each package that depends on preal, make a "test copy" // that depends on ptest. And so on, up the dependency tree. @@ -1092,28 +1159,19 @@ func recompileForTest(pmain, preal, ptest *load.Package, testDir string) { return } didSplit = true - if p.Internal.Pkgdir != testDir { - p1 := new(load.Package) - testCopy[p] = p1 - *p1 = *p - p1.Internal.Imports = make([]*load.Package, len(p.Internal.Imports)) - copy(p1.Internal.Imports, p.Internal.Imports) - p = p1 - p.Internal.Pkgdir = testDir - p.Internal.Target = "" - p.Internal.Fake = true - p.Stale = true - p.StaleReason = "depends on package being tested" + if testCopy[p] != nil { + panic("recompileForTest loop") } + p1 := new(load.Package) + testCopy[p] = p1 + *p1 = *p + p1.Internal.Imports = make([]*load.Package, len(p.Internal.Imports)) + copy(p1.Internal.Imports, p.Internal.Imports) + p = p1 + p.Target = "" } - // Update p.Deps and p.Internal.Imports to use at test copies. - for i, dep := range p.Internal.Deps { - if p1 := testCopy[dep]; p1 != nil && p1 != dep { - split() - p.Internal.Deps[i] = p1 - } - } + // Update p.Internal.Imports to use test copies. for i, imp := range p.Internal.Imports { if p1 := testCopy[imp]; p1 != nil && p1 != imp { split() @@ -1123,8 +1181,6 @@ func recompileForTest(pmain, preal, ptest *load.Package, testDir string) { } } -var coverIndex = 0 - // isTestFile reports whether the source file is a set of tests and should therefore // be excluded from coverage analysis. func isTestFile(file string) bool { @@ -1136,6 +1192,7 @@ func isTestFile(file string) bool { // to the files, to be used when annotating the files. func declareCoverVars(importPath string, files ...string) map[string]*load.CoverVar { coverVars := make(map[string]*load.CoverVar) + coverIndex := 0 for _, file := range files { if isTestFile(file) { continue @@ -1151,10 +1208,67 @@ func declareCoverVars(importPath string, files ...string) map[string]*load.Cover var noTestsToRun = []byte("\ntesting: warning: no tests to run\n") +type runCache struct { + disableCache bool // cache should be disabled for this run + + buf *bytes.Buffer + id1 cache.ActionID + id2 cache.ActionID +} + +// stdoutMu and lockedStdout provide a locked standard output +// that guarantees never to interlace writes from multiple +// goroutines, so that we can have multiple JSON streams writing +// to a lockedStdout simultaneously and know that events will +// still be intelligible. +var stdoutMu sync.Mutex + +type lockedStdout struct{} + +func (lockedStdout) Write(b []byte) (int, error) { + stdoutMu.Lock() + defer stdoutMu.Unlock() + return os.Stdout.Write(b) +} + // builderRunTest is the action for running a test binary. -func builderRunTest(b *work.Builder, a *work.Action) error { +func (c *runCache) builderRunTest(b *work.Builder, a *work.Action) error { + if c.buf == nil { + // We did not find a cached result using the link step action ID, + // so we ran the link step. Try again now with the link output + // content ID. The attempt using the action ID makes sure that + // if the link inputs don't change, we reuse the cached test + // result without even rerunning the linker. The attempt using + // the link output (test binary) content ID makes sure that if + // we have different link inputs but the same final binary, + // we still reuse the cached test result. + // c.saveOutput will store the result under both IDs. + c.tryCacheWithID(b, a, a.Deps[0].BuildContentID()) + } + if c.buf != nil { + a.TestOutput = c.buf + return nil + } + + if a.Failed { + // We were unable to build the binary. + a.Failed = false + a.TestOutput = new(bytes.Buffer) + fmt.Fprintf(a.TestOutput, "FAIL\t%s [build failed]\n", a.Package.ImportPath) + base.SetExitStatus(1) + return nil + } + args := str.StringList(work.FindExecCmd(), a.Deps[0].Target, testArgs) - a.TestOutput = new(bytes.Buffer) + + if testCoverProfile != "" { + // Write coverage to temporary profile, for merging later. + for i, arg := range args { + if strings.HasPrefix(arg, "-test.coverprofile=") { + args[i] = "-test.coverprofile=" + a.Objdir + "_cover_.out" + } + } + } if cfg.BuildN || cfg.BuildX { b.Showcmd("", "%s", strings.Join(args, " ")) @@ -1163,25 +1277,45 @@ func builderRunTest(b *work.Builder, a *work.Action) error { } } - if a.Failed { - // We were unable to build the binary. - a.Failed = false - fmt.Fprintf(a.TestOutput, "FAIL\t%s [build failed]\n", a.Package.ImportPath) - base.SetExitStatus(1) - return nil - } - cmd := exec.Command(args[0], args[1:]...) cmd.Dir = a.Package.Dir cmd.Env = base.EnvForDir(cmd.Dir, cfg.OrigEnv) var buf bytes.Buffer - if testStreamOutput { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } else { - cmd.Stdout = &buf - cmd.Stderr = &buf + var stdout io.Writer = os.Stdout + if testJSON { + json := test2json.NewConverter(lockedStdout{}, a.Package.ImportPath, test2json.Timestamp) + defer json.Close() + stdout = json } + if len(pkgArgs) == 0 || testBench { + // Stream test output (no buffering) when no package has + // been given on the command line (implicit current directory) + // or when benchmarking. + cmd.Stdout = stdout + } else { + // If we're only running a single package under test or if parallelism is + // set to 1, and if we're displaying all output (testShowPass), we can + // hurry the output along, echoing it as soon as it comes in. + // We still have to copy to &buf for caching the result. This special + // case was introduced in Go 1.5 and is intentionally undocumented: + // the exact details of output buffering are up to the go command and + // subject to change. It would be nice to remove this special case + // entirely, but it is surely very helpful to see progress being made + // when tests are run on slow single-CPU ARM systems. + // + // If we're showing JSON output, then display output as soon as + // possible even when multiple tests are being run: the JSON output + // events are attributed to specific package tests, so interlacing them + // is OK. + if testShowPass && (len(pkgs) == 1 || cfg.BuildP == 1) || testJSON { + // Write both to stdout and buf, for possible saving + // to cache, and for looking for the "no tests to run" message. + cmd.Stdout = io.MultiWriter(stdout, &buf) + } else { + cmd.Stdout = &buf + } + } + cmd.Stderr = cmd.Stdout // If there are any local SWIG dependencies, we want to load // the shared library from the build directory. @@ -1227,43 +1361,156 @@ func builderRunTest(b *work.Builder, a *work.Action) error { cmd.Process.Signal(base.SignalTrace) select { case err = <-done: - fmt.Fprintf(&buf, "*** Test killed with %v: ran too long (%v).\n", base.SignalTrace, testKillTimeout) + fmt.Fprintf(cmd.Stdout, "*** Test killed with %v: ran too long (%v).\n", base.SignalTrace, testKillTimeout) break Outer case <-time.After(5 * time.Second): } } cmd.Process.Kill() err = <-done - fmt.Fprintf(&buf, "*** Test killed: ran too long (%v).\n", testKillTimeout) + fmt.Fprintf(cmd.Stdout, "*** Test killed: ran too long (%v).\n", testKillTimeout) } tick.Stop() } out := buf.Bytes() + a.TestOutput = &buf t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds()) + + mergeCoverProfile(cmd.Stdout, a.Objdir+"_cover_.out") + if err == nil { norun := "" - if testShowPass { - a.TestOutput.Write(out) + if !testShowPass { + buf.Reset() } if bytes.HasPrefix(out, noTestsToRun[1:]) || bytes.Contains(out, noTestsToRun) { norun = " [no tests to run]" } - fmt.Fprintf(a.TestOutput, "ok \t%s\t%s%s%s\n", a.Package.ImportPath, t, coveragePercentage(out), norun) - return nil - } - - base.SetExitStatus(1) - if len(out) > 0 { - a.TestOutput.Write(out) - // assume printing the test binary's exit status is superfluous + fmt.Fprintf(cmd.Stdout, "ok \t%s\t%s%s%s\n", a.Package.ImportPath, t, coveragePercentage(out), norun) + c.saveOutput(a) } else { - fmt.Fprintf(a.TestOutput, "%s\n", err) + base.SetExitStatus(1) + // If there was test output, assume we don't need to print the exit status. + // Buf there's no test output, do print the exit status. + if len(out) == 0 { + fmt.Fprintf(cmd.Stdout, "%s\n", err) + } + fmt.Fprintf(cmd.Stdout, "FAIL\t%s\t%s\n", a.Package.ImportPath, t) } - fmt.Fprintf(a.TestOutput, "FAIL\t%s\t%s\n", a.Package.ImportPath, t) + if cmd.Stdout != &buf { + buf.Reset() // cmd.Stdout was going to os.Stdout already + } return nil } +// tryCache is called just before the link attempt, +// to see if the test result is cached and therefore the link is unneeded. +// It reports whether the result can be satisfied from cache. +func (c *runCache) tryCache(b *work.Builder, a *work.Action) bool { + return c.tryCacheWithID(b, a, a.Deps[0].BuildActionID()) +} + +func (c *runCache) tryCacheWithID(b *work.Builder, a *work.Action, id string) bool { + if len(pkgArgs) == 0 { + // Caching does not apply to "go test", + // only to "go test foo" (including "go test ."). + c.disableCache = true + return false + } + + var cacheArgs []string + for _, arg := range testArgs { + i := strings.Index(arg, "=") + if i < 0 || !strings.HasPrefix(arg, "-test.") { + c.disableCache = true + return false + } + switch arg[:i] { + case "-test.cpu", + "-test.list", + "-test.parallel", + "-test.run", + "-test.short", + "-test.v": + // These are cacheable. + // Note that this list is documented above, + // so if you add to this list, update the docs too. + cacheArgs = append(cacheArgs, arg) + + case "-test.timeout": + // Special case: this is cacheable but ignored during the hash. + // Do not add to cacheArgs. + + default: + // nothing else is cacheable + c.disableCache = true + return false + } + } + + if cache.Default() == nil { + c.disableCache = true + return false + } + + h := cache.NewHash("testResult") + fmt.Fprintf(h, "test binary %s args %q execcmd %q", id, cacheArgs, work.ExecCmd) + // TODO(rsc): How to handle other test dependencies like environment variables or input files? + // We could potentially add new API like testing.UsedEnv(envName string) + // or testing.UsedFile(inputFile string) to let tests declare what external inputs + // they consulted. These could be recorded and rechecked. + // The lookup here would become a two-step lookup: first use the binary+args + // to fetch the list of other inputs, then add the other inputs to produce a + // second key for fetching the results. + // For now, we'll assume that users will use -count=1 (or "go test") to bypass the test result + // cache when modifying those things. + testID := h.Sum() + if c.id1 == (cache.ActionID{}) { + c.id1 = testID + } else { + c.id2 = testID + } + + // Parse cached result in preparation for changing run time to "(cached)". + // If we can't parse the cached result, don't use it. + data, entry, _ := cache.Default().GetBytes(testID) + if len(data) == 0 || data[len(data)-1] != '\n' { + return false + } + if entry.Time.Before(testCacheExpire) { + return false + } + i := bytes.LastIndexByte(data[:len(data)-1], '\n') + 1 + if !bytes.HasPrefix(data[i:], []byte("ok \t")) { + return false + } + j := bytes.IndexByte(data[i+len("ok \t"):], '\t') + if j < 0 { + return false + } + j += i + len("ok \t") + 1 + + // Committed to printing. + c.buf = new(bytes.Buffer) + c.buf.Write(data[:j]) + c.buf.WriteString("(cached)") + for j < len(data) && ('0' <= data[j] && data[j] <= '9' || data[j] == '.' || data[j] == 's') { + j++ + } + c.buf.Write(data[j:]) + return true +} + +func (c *runCache) saveOutput(a *work.Action) { + if c.id1 != (cache.ActionID{}) { + cache.Default().PutNoVerify(c.id1, bytes.NewReader(a.TestOutput.Bytes())) + } + if c.id2 != (cache.ActionID{}) { + cache.Default().PutNoVerify(c.id2, bytes.NewReader(a.TestOutput.Bytes())) + } +} + // coveragePercentage returns the coverage results (if enabled) for the // test. It uncovers the data by scanning the output from the test run. func coveragePercentage(out []byte) string { @@ -1288,9 +1535,10 @@ func builderCleanTest(b *work.Builder, a *work.Action) error { if cfg.BuildWork { return nil } - run := a.Deps[0] - testDir := filepath.Join(b.WorkDir, filepath.FromSlash(run.Package.ImportPath+"/_test")) - os.RemoveAll(testDir) + if cfg.BuildX { + b.Showcmd("", "rm -r %s", a.Objdir) + } + os.RemoveAll(a.Objdir) return nil } @@ -1298,14 +1546,22 @@ func builderCleanTest(b *work.Builder, a *work.Action) error { func builderPrintTest(b *work.Builder, a *work.Action) error { clean := a.Deps[0] run := clean.Deps[0] - os.Stdout.Write(run.TestOutput.Bytes()) - run.TestOutput = nil + if run.TestOutput != nil { + os.Stdout.Write(run.TestOutput.Bytes()) + run.TestOutput = nil + } return nil } // builderNoTest is the action for testing a package with no test files. func builderNoTest(b *work.Builder, a *work.Action) error { - fmt.Printf("? \t%s\t[no test files]\n", a.Package.ImportPath) + var stdout io.Writer = os.Stdout + if testJSON { + json := test2json.NewConverter(lockedStdout{}, a.Package.ImportPath, test2json.Timestamp) + defer json.Close() + stdout = json + } + fmt.Fprintf(stdout, "? \t%s\t[no test files]\n", a.Package.ImportPath) return nil } @@ -1397,7 +1653,6 @@ type testFuncs struct { NeedTest bool ImportXtest bool NeedXtest bool - NeedCgo bool Cover []coverInfo } @@ -1462,7 +1717,16 @@ func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error { } name := n.Name.String() switch { - case name == "TestMain" && isTestFunc(n, "M"): + case name == "TestMain": + if isTestFunc(n, "T") { + t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + continue + } + err := checkTestFunc(n, "M") + if err != nil { + return err + } if t.TestMain != nil { return errors.New("multiple definitions of TestMain") } @@ -1526,10 +1790,6 @@ import ( {{range $i, $p := .Cover}} _cover{{$i}} {{$p.Package.ImportPath | printf "%q"}} {{end}} - -{{if .NeedCgo}} - _ "runtime/cgo" -{{end}} ) var tests = []testing.InternalTest{ diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index bff8656a4c1..8a908f7e215 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -33,20 +33,23 @@ var testFlagDefn = []*cmdflag.Defn{ {Name: "covermode"}, {Name: "coverpkg"}, {Name: "exec"}, + {Name: "json", BoolVar: &testJSON}, + {Name: "vet"}, // Passed to 6.out, adding a "test." prefix to the name if necessary: -v becomes -test.v. {Name: "bench", PassToTest: true}, {Name: "benchmem", BoolVar: new(bool), PassToTest: true}, {Name: "benchtime", PassToTest: true}, + {Name: "blockprofile", PassToTest: true}, + {Name: "blockprofilerate", PassToTest: true}, {Name: "count", PassToTest: true}, {Name: "coverprofile", PassToTest: true}, {Name: "cpu", PassToTest: true}, {Name: "cpuprofile", PassToTest: true}, + {Name: "failfast", BoolVar: new(bool), PassToTest: true}, {Name: "list", PassToTest: true}, {Name: "memprofile", PassToTest: true}, {Name: "memprofilerate", PassToTest: true}, - {Name: "blockprofile", PassToTest: true}, - {Name: "blockprofilerate", PassToTest: true}, {Name: "mutexprofile", PassToTest: true}, {Name: "mutexprofilefraction", PassToTest: true}, {Name: "outputdir", PassToTest: true}, @@ -85,7 +88,6 @@ func init() { // go test -x math func testFlags(args []string) (packageNames, passToTest []string) { inPkg := false - outputDir := "" var explicitArgs []string for i := 0; i < len(args); i++ { if !strings.HasPrefix(args[i], "-") { @@ -132,8 +134,11 @@ func testFlags(args []string) (packageNames, passToTest []string) { // Arguably should be handled by f.Value, but aren't. switch f.Name { // bool flags. - case "c", "i", "v", "cover": + case "c", "i", "v", "cover", "json": cmdflag.SetBool(cmd, f.BoolVar, value) + if f.Name == "json" && testJSON { + passToTest = append(passToTest, "-test.v") + } case "o": testO = value testNeedBinary = true @@ -151,10 +156,10 @@ func testFlags(args []string) (packageNames, passToTest []string) { case "timeout": testTimeout = value case "blockprofile", "cpuprofile", "memprofile", "mutexprofile": - testProfile = true + testProfile = "-" + f.Name testNeedBinary = true case "trace": - testProfile = true + testProfile = "-trace" case "coverpkg": testCover = true if value == "" { @@ -164,7 +169,7 @@ func testFlags(args []string) (packageNames, passToTest []string) { } case "coverprofile": testCover = true - testProfile = true + testCoverProfile = value case "covermode": switch value { case "set", "count", "atomic": @@ -174,7 +179,9 @@ func testFlags(args []string) (packageNames, passToTest []string) { } testCover = true case "outputdir": - outputDir = value + testOutputDir = value + case "vet": + testVetList = value } } if extraWord { @@ -193,12 +200,26 @@ func testFlags(args []string) (packageNames, passToTest []string) { } } + if testVetList != "" && testVetList != "off" { + if strings.Contains(testVetList, "=") { + base.Fatalf("-vet argument cannot contain equal signs") + } + if strings.Contains(testVetList, " ") { + base.Fatalf("-vet argument is comma-separated list, cannot contain spaces") + } + list := strings.Split(testVetList, ",") + for i, arg := range list { + list[i] = "-" + arg + } + testVetFlags = list + } + if cfg.BuildRace && testCoverMode != "atomic" { base.Fatalf(`-covermode must be "atomic", not %q, when -race is enabled`, testCoverMode) } // Tell the test what directory we're running in, so it can write the profiles there. - if testProfile && outputDir == "" { + if testProfile != "" && testOutputDir == "" { dir, err := os.Getwd() if err != nil { base.Fatalf("error from os.Getwd: %s", err) diff --git a/src/cmd/go/internal/tool/tool.go b/src/cmd/go/internal/tool/tool.go index 51675262e59..db92884f6aa 100644 --- a/src/cmd/go/internal/tool/tool.go +++ b/src/cmd/go/internal/tool/tool.go @@ -27,7 +27,7 @@ With no arguments it prints the list of known tools. The -n flag causes tool to print the command that would be executed but not execute it. -For more about each tool command, see 'go tool command -h'. +For more about each tool command, see 'go doc cmd/'. `, } diff --git a/src/cmd/go/internal/vet/vet.go b/src/cmd/go/internal/vet/vet.go index ddacd085b06..db734c9d846 100644 --- a/src/cmd/go/internal/vet/vet.go +++ b/src/cmd/go/internal/vet/vet.go @@ -6,19 +6,16 @@ package vet import ( - "path/filepath" - "cmd/go/internal/base" - "cmd/go/internal/cfg" "cmd/go/internal/load" - "cmd/go/internal/str" + "cmd/go/internal/work" ) var CmdVet = &base.Command{ Run: runVet, CustomFlags: true, UsageLine: "vet [-n] [-x] [build flags] [vet flags] [packages]", - Short: "run go tool vet on packages", + Short: "report likely mistakes in packages", Long: ` Vet runs the Go vet command on the packages named by the import paths. @@ -28,29 +25,31 @@ For more about specifying packages, see 'go help packages'. The -n flag prints commands that would be executed. The -x flag prints commands as they are executed. -For more about build flags, see 'go help build'. +The build flags supported by go vet are those that control package resolution +and execution, such as -n, -x, -v, -tags, and -toolexec. +For more about these flags, see 'go help build'. See also: go fmt, go fix. `, } func runVet(cmd *base.Command, args []string) { - vetFlags, packages := vetFlags(args) - for _, p := range load.Packages(packages) { - // Vet expects to be given a set of files all from the same package. - // Run once for package p and once for package p_test. - if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles) > 0 { - runVetFiles(p, vetFlags, str.StringList(p.GoFiles, p.CgoFiles, p.TestGoFiles, p.SFiles)) - } - if len(p.XTestGoFiles) > 0 { - runVetFiles(p, vetFlags, str.StringList(p.XTestGoFiles)) - } - } -} + vetFlags, pkgArgs := vetFlags(args) -func runVetFiles(p *load.Package, flags, files []string) { - for i := range files { - files[i] = filepath.Join(p.Dir, files[i]) + work.BuildInit() + work.VetFlags = vetFlags + + pkgs := load.PackagesForBuild(pkgArgs) + if len(pkgs) == 0 { + base.Fatalf("no packages to vet") } - base.Run(cfg.BuildToolexec, base.Tool("vet"), flags, base.RelPaths(files)) + + var b work.Builder + b.Init() + + root := &work.Action{Mode: "go vet"} + for _, p := range pkgs { + root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, p)) + } + b.Do(root) } diff --git a/src/cmd/go/internal/vet/vetflag.go b/src/cmd/go/internal/vet/vetflag.go index 8cd21bb72b7..36ee04ede77 100644 --- a/src/cmd/go/internal/vet/vetflag.go +++ b/src/cmd/go/internal/vet/vetflag.go @@ -44,6 +44,7 @@ var vetFlagDefn = []*cmdflag.Defn{ {Name: "rangeloops", BoolVar: new(bool)}, {Name: "shadow", BoolVar: new(bool)}, {Name: "shadowstrict", BoolVar: new(bool)}, + {Name: "shift", BoolVar: new(bool)}, {Name: "source", BoolVar: new(bool)}, {Name: "structtags", BoolVar: new(bool)}, {Name: "tests", BoolVar: new(bool)}, diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go new file mode 100644 index 00000000000..46ba3447c66 --- /dev/null +++ b/src/cmd/go/internal/work/action.go @@ -0,0 +1,747 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Action graph creation (planning). + +package work + +import ( + "bufio" + "bytes" + "container/heap" + "debug/elf" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/internal/buildid" +) + +// A Builder holds global state about a build. +// It does not hold per-package state, because we +// build packages in parallel, and the builder is shared. +type Builder struct { + WorkDir string // the temporary work directory (ends in filepath.Separator) + actionCache map[cacheKey]*Action // a cache of already-constructed actions + mkdirCache map[string]bool // a cache of created directories + flagCache map[[2]string]bool // a cache of supported compiler flags + Print func(args ...interface{}) (int, error) + + ComputeStaleOnly bool // compute staleness for go list; no actual build + + objdirSeq int // counter for NewObjdir + pkgSeq int + + output sync.Mutex + scriptDir string // current directory in printed script + + exec sync.Mutex + readySema chan bool + ready actionQueue + + id sync.Mutex + toolIDCache map[string]string // tool name -> tool ID + buildIDCache map[string]string // file name -> build ID +} + +// NOTE: Much of Action would not need to be exported if not for test. +// Maybe test functionality should move into this package too? + +// An Action represents a single action in the action graph. +type Action struct { + Mode string // description of action operation + Package *load.Package // the package this action works on + Deps []*Action // actions that must happen before this one + Func func(*Builder, *Action) error // the action itself (nil = no-op) + IgnoreFail bool // whether to run f even if dependencies fail + TestOutput *bytes.Buffer // test output buffer + Args []string // additional args for runProgram + + triggers []*Action // inverse of deps + + buggyInstall bool // is this a buggy install (see -linkshared)? + + TryCache func(*Builder, *Action) bool // callback for cache bypass + + // Generated files, directories. + Objdir string // directory for intermediate objects + Target string // goal of the action: the created package or executable + built string // the actual created package or executable + actionID cache.ActionID // cache ID of action input + buildID string // build ID of action output + + needVet bool // Mode=="build": need to fill in vet config + vetCfg *vetConfig // vet config + output []byte // output redirect buffer (nil means use b.Print) + + // Execution state. + pending int // number of deps yet to complete + priority int // relative execution priority + Failed bool // whether the action failed +} + +// BuildActionID returns the action ID section of a's build ID. +func (a *Action) BuildActionID() string { return actionID(a.buildID) } + +// BuildContentID returns the content ID section of a's build ID. +func (a *Action) BuildContentID() string { return contentID(a.buildID) } + +// BuildID returns a's build ID. +func (a *Action) BuildID() string { return a.buildID } + +// An actionQueue is a priority queue of actions. +type actionQueue []*Action + +// Implement heap.Interface +func (q *actionQueue) Len() int { return len(*q) } +func (q *actionQueue) Swap(i, j int) { (*q)[i], (*q)[j] = (*q)[j], (*q)[i] } +func (q *actionQueue) Less(i, j int) bool { return (*q)[i].priority < (*q)[j].priority } +func (q *actionQueue) Push(x interface{}) { *q = append(*q, x.(*Action)) } +func (q *actionQueue) Pop() interface{} { + n := len(*q) - 1 + x := (*q)[n] + *q = (*q)[:n] + return x +} + +func (q *actionQueue) push(a *Action) { + heap.Push(q, a) +} + +func (q *actionQueue) pop() *Action { + return heap.Pop(q).(*Action) +} + +type actionJSON struct { + ID int + Mode string + Package string + Deps []int `json:",omitempty"` + IgnoreFail bool `json:",omitempty"` + Args []string `json:",omitempty"` + Link bool `json:",omitempty"` + Objdir string `json:",omitempty"` + Target string `json:",omitempty"` + Priority int `json:",omitempty"` + Failed bool `json:",omitempty"` + Built string `json:",omitempty"` +} + +// cacheKey is the key for the action cache. +type cacheKey struct { + mode string + p *load.Package +} + +func actionGraphJSON(a *Action) string { + var workq []*Action + var inWorkq = make(map[*Action]int) + + add := func(a *Action) { + if _, ok := inWorkq[a]; ok { + return + } + inWorkq[a] = len(workq) + workq = append(workq, a) + } + add(a) + + for i := 0; i < len(workq); i++ { + for _, dep := range workq[i].Deps { + add(dep) + } + } + + var list []*actionJSON + for id, a := range workq { + aj := &actionJSON{ + Mode: a.Mode, + ID: id, + IgnoreFail: a.IgnoreFail, + Args: a.Args, + Objdir: a.Objdir, + Target: a.Target, + Failed: a.Failed, + Priority: a.priority, + Built: a.built, + } + if a.Package != nil { + // TODO(rsc): Make this a unique key for a.Package somehow. + aj.Package = a.Package.ImportPath + } + for _, a1 := range a.Deps { + aj.Deps = append(aj.Deps, inWorkq[a1]) + } + list = append(list, aj) + } + + js, err := json.MarshalIndent(list, "", "\t") + if err != nil { + fmt.Fprintf(os.Stderr, "go: writing debug action graph: %v\n", err) + return "" + } + return string(js) +} + +// BuildMode specifies the build mode: +// are we just building things or also installing the results? +type BuildMode int + +const ( + ModeBuild BuildMode = iota + ModeInstall + ModeBuggyInstall +) + +func (b *Builder) Init() { + var err error + b.Print = func(a ...interface{}) (int, error) { + return fmt.Fprint(os.Stderr, a...) + } + b.actionCache = make(map[cacheKey]*Action) + b.mkdirCache = make(map[string]bool) + b.toolIDCache = make(map[string]string) + b.buildIDCache = make(map[string]string) + + if cfg.BuildN { + b.WorkDir = "$WORK" + } else { + b.WorkDir, err = ioutil.TempDir(os.Getenv("GOTMPDIR"), "go-build") + if err != nil { + base.Fatalf("%s", err) + } + if cfg.BuildX || cfg.BuildWork { + fmt.Fprintf(os.Stderr, "WORK=%s\n", b.WorkDir) + } + if !cfg.BuildWork { + workdir := b.WorkDir + base.AtExit(func() { os.RemoveAll(workdir) }) + } + } + + if _, ok := cfg.OSArchSupportsCgo[cfg.Goos+"/"+cfg.Goarch]; !ok && cfg.BuildContext.Compiler == "gc" { + fmt.Fprintf(os.Stderr, "cmd/go: unsupported GOOS/GOARCH pair %s/%s\n", cfg.Goos, cfg.Goarch) + os.Exit(2) + } + for _, tag := range cfg.BuildContext.BuildTags { + if strings.Contains(tag, ",") { + fmt.Fprintf(os.Stderr, "cmd/go: -tags space-separated list contains comma\n") + os.Exit(2) + } + } +} + +// NewObjdir returns the name of a fresh object directory under b.WorkDir. +// It is up to the caller to call b.Mkdir on the result at an appropriate time. +// The result ends in a slash, so that file names in that directory +// can be constructed with direct string addition. +// +// NewObjdir must be called only from a single goroutine at a time, +// so it is safe to call during action graph construction, but it must not +// be called during action graph execution. +func (b *Builder) NewObjdir() string { + b.objdirSeq++ + return filepath.Join(b.WorkDir, fmt.Sprintf("b%03d", b.objdirSeq)) + string(filepath.Separator) +} + +// readpkglist returns the list of packages that were built into the shared library +// at shlibpath. For the native toolchain this list is stored, newline separated, in +// an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the +// .go_export section. +func readpkglist(shlibpath string) (pkgs []*load.Package) { + var stk load.ImportStack + if cfg.BuildToolchainName == "gccgo" { + f, _ := elf.Open(shlibpath) + sect := f.Section(".go_export") + data, _ := sect.Data() + scanner := bufio.NewScanner(bytes.NewBuffer(data)) + for scanner.Scan() { + t := scanner.Text() + if strings.HasPrefix(t, "pkgpath ") { + t = strings.TrimPrefix(t, "pkgpath ") + t = strings.TrimSuffix(t, ";") + pkgs = append(pkgs, load.LoadPackage(t, &stk)) + } + } + } else { + pkglistbytes, err := buildid.ReadELFNote(shlibpath, "Go\x00\x00", 1) + if err != nil { + base.Fatalf("readELFNote failed: %v", err) + } + scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes)) + for scanner.Scan() { + t := scanner.Text() + pkgs = append(pkgs, load.LoadPackage(t, &stk)) + } + } + return +} + +// cacheAction looks up {mode, p} in the cache and returns the resulting action. +// If the cache has no such action, f() is recorded and returned. +// TODO(rsc): Change the second key from *load.Package to interface{}, +// to make the caching in linkShared less awkward? +func (b *Builder) cacheAction(mode string, p *load.Package, f func() *Action) *Action { + a := b.actionCache[cacheKey{mode, p}] + if a == nil { + a = f() + b.actionCache[cacheKey{mode, p}] = a + } + return a +} + +// AutoAction returns the "right" action for go build or go install of p. +func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action { + if p.Name == "main" { + return b.LinkAction(mode, depMode, p) + } + return b.CompileAction(mode, depMode, p) +} + +// CompileAction returns the action for compiling and possibly installing +// (according to mode) the given package. The resulting action is only +// for building packages (archives), never for linking executables. +// depMode is the action (build or install) to use when building dependencies. +// To turn package main into an executable, call b.Link instead. +func (b *Builder) CompileAction(mode, depMode BuildMode, p *load.Package) *Action { + if mode != ModeBuild && p.Internal.Local && p.Target == "" { + // Imported via local path. No permanent target. + mode = ModeBuild + } + if mode != ModeBuild && p.Name == "main" { + // We never install the .a file for a main package. + mode = ModeBuild + } + + // Construct package build action. + a := b.cacheAction("build", p, func() *Action { + a := &Action{ + Mode: "build", + Package: p, + Func: (*Builder).build, + Objdir: b.NewObjdir(), + } + + for _, p1 := range p.Internal.Imports { + a.Deps = append(a.Deps, b.CompileAction(depMode, depMode, p1)) + } + + if p.Standard { + switch p.ImportPath { + case "builtin", "unsafe": + // Fake packages - nothing to build. + a.Mode = "built-in package" + a.Func = nil + return a + } + + // gccgo standard library is "fake" too. + if cfg.BuildToolchainName == "gccgo" { + // the target name is needed for cgo. + a.Mode = "gccgo stdlib" + a.Target = p.Target + a.Func = nil + return a + } + } + + return a + }) + + // Construct install action. + if mode == ModeInstall || mode == ModeBuggyInstall { + a = b.installAction(a, mode) + } + + return a +} + +// VetAction returns the action for running go vet on package p. +// It depends on the action for compiling p. +// If the caller may be causing p to be installed, it is up to the caller +// to make sure that the install depends on (runs after) vet. +func (b *Builder) VetAction(mode, depMode BuildMode, p *load.Package) *Action { + // Construct vet action. + a := b.cacheAction("vet", p, func() *Action { + a1 := b.CompileAction(mode, depMode, p) + + // vet expects to be able to import "fmt". + var stk load.ImportStack + stk.Push("vet") + p1 := load.LoadPackage("fmt", &stk) + stk.Pop() + aFmt := b.CompileAction(ModeBuild, depMode, p1) + + a := &Action{ + Mode: "vet", + Package: p, + Deps: []*Action{a1, aFmt}, + Objdir: a1.Objdir, + } + if a1.Func == nil { + // Built-in packages like unsafe. + return a + } + a1.needVet = true + a.Func = (*Builder).vet + + return a + }) + return a +} + +// LinkAction returns the action for linking p into an executable +// and possibly installing the result (according to mode). +// depMode is the action (build or install) to use when compiling dependencies. +func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action { + // Construct link action. + a := b.cacheAction("link", p, func() *Action { + a := &Action{ + Mode: "link", + Package: p, + } + + a1 := b.CompileAction(ModeBuild, depMode, p) + a.Func = (*Builder).link + a.Deps = []*Action{a1} + a.Objdir = a1.Objdir + + // An executable file. (This is the name of a temporary file.) + // Because we run the temporary file in 'go run' and 'go test', + // the name will show up in ps listings. If the caller has specified + // a name, use that instead of a.out. The binary is generated + // in an otherwise empty subdirectory named exe to avoid + // naming conflicts. The only possible conflict is if we were + // to create a top-level package named exe. + name := "a.out" + if p.Internal.ExeName != "" { + name = p.Internal.ExeName + } else if (cfg.Goos == "darwin" || cfg.Goos == "windows") && cfg.BuildBuildmode == "c-shared" && p.Target != "" { + // On OS X, the linker output name gets recorded in the + // shared library's LC_ID_DYLIB load command. + // The code invoking the linker knows to pass only the final + // path element. Arrange that the path element matches what + // we'll install it as; otherwise the library is only loadable as "a.out". + // On Windows, DLL file name is recorded in PE file + // export section, so do like on OS X. + _, name = filepath.Split(p.Target) + } + a.Target = a.Objdir + filepath.Join("exe", name) + cfg.ExeSuffix + a.built = a.Target + b.addTransitiveLinkDeps(a, a1, "") + + // Sequence the build of the main package (a1) strictly after the build + // of all other dependencies that go into the link. It is likely to be after + // them anyway, but just make sure. This is required by the build ID-based + // shortcut in (*Builder).useCache(a1), which will call b.linkActionID(a). + // In order for that linkActionID call to compute the right action ID, all the + // dependencies of a (except a1) must have completed building and have + // recorded their build IDs. + a1.Deps = append(a1.Deps, &Action{Mode: "nop", Deps: a.Deps[1:]}) + return a + }) + + if mode == ModeInstall || mode == ModeBuggyInstall { + a = b.installAction(a, mode) + } + + return a +} + +// installAction returns the action for installing the result of a1. +func (b *Builder) installAction(a1 *Action, mode BuildMode) *Action { + // Because we overwrite the build action with the install action below, + // a1 may already be an install action fetched from the "build" cache key, + // and the caller just doesn't realize. + if strings.HasSuffix(a1.Mode, "-install") { + if a1.buggyInstall && mode == ModeInstall { + // Congratulations! The buggy install is now a proper install. + a1.buggyInstall = false + } + return a1 + } + + // If there's no actual action to build a1, + // there's nothing to install either. + // This happens if a1 corresponds to reusing an already-built object. + if a1.Func == nil { + return a1 + } + + p := a1.Package + return b.cacheAction(a1.Mode+"-install", p, func() *Action { + // The install deletes the temporary build result, + // so we need all other actions, both past and future, + // that attempt to depend on the build to depend instead + // on the install. + + // Make a private copy of a1 (the build action), + // no longer accessible to any other rules. + buildAction := new(Action) + *buildAction = *a1 + + // Overwrite a1 with the install action. + // This takes care of updating past actions that + // point at a1 for the build action; now they will + // point at a1 and get the install action. + // We also leave a1 in the action cache as the result + // for "build", so that actions not yet created that + // try to depend on the build will instead depend + // on the install. + *a1 = Action{ + Mode: buildAction.Mode + "-install", + Func: BuildInstallFunc, + Package: p, + Objdir: buildAction.Objdir, + Deps: []*Action{buildAction}, + Target: p.Target, + built: p.Target, + + buggyInstall: mode == ModeBuggyInstall, + } + + b.addInstallHeaderAction(a1) + return a1 + }) +} + +// addTransitiveLinkDeps adds to the link action a all packages +// that are transitive dependencies of a1.Deps. +// That is, if a is a link of package main, a1 is the compile of package main +// and a1.Deps is the actions for building packages directly imported by +// package main (what the compiler needs). The linker needs all packages +// transitively imported by the whole program; addTransitiveLinkDeps +// makes sure those are present in a.Deps. +// If shlib is non-empty, then a corresponds to the build and installation of shlib, +// so any rebuild of shlib should not be added as a dependency. +func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) { + // Expand Deps to include all built packages, for the linker. + // Use breadth-first search to find rebuilt-for-test packages + // before the standard ones. + // TODO(rsc): Eliminate the standard ones from the action graph, + // which will require doing a little bit more rebuilding. + workq := []*Action{a1} + haveDep := map[string]bool{} + if a1.Package != nil { + haveDep[a1.Package.ImportPath] = true + } + for i := 0; i < len(workq); i++ { + a1 := workq[i] + for _, a2 := range a1.Deps { + // TODO(rsc): Find a better discriminator than the Mode strings, once the dust settles. + if a2.Package == nil || (a2.Mode != "build-install" && a2.Mode != "build") || haveDep[a2.Package.ImportPath] { + continue + } + haveDep[a2.Package.ImportPath] = true + a.Deps = append(a.Deps, a2) + if a2.Mode == "build-install" { + a2 = a2.Deps[0] // walk children of "build" action + } + workq = append(workq, a2) + } + } + + // If this is go build -linkshared, then the link depends on the shared libraries + // in addition to the packages themselves. (The compile steps do not.) + if cfg.BuildLinkshared { + haveShlib := map[string]bool{shlib: true} + for _, a1 := range a.Deps { + p1 := a1.Package + if p1 == nil || p1.Shlib == "" || haveShlib[filepath.Base(p1.Shlib)] { + continue + } + haveShlib[filepath.Base(p1.Shlib)] = true + // TODO(rsc): The use of ModeInstall here is suspect, but if we only do ModeBuild, + // we'll end up building an overall library or executable that depends at runtime + // on other libraries that are out-of-date, which is clearly not good either. + // We call it ModeBuggyInstall to make clear that this is not right. + a.Deps = append(a.Deps, b.linkSharedAction(ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil)) + } + } +} + +// addInstallHeaderAction adds an install header action to a, if needed. +// The action a should be an install action as generated by either +// b.CompileAction or b.LinkAction with mode=ModeInstall, +// and so a.Deps[0] is the corresponding build action. +func (b *Builder) addInstallHeaderAction(a *Action) { + // Install header for cgo in c-archive and c-shared modes. + p := a.Package + if p.UsesCgo() && (cfg.BuildBuildmode == "c-archive" || cfg.BuildBuildmode == "c-shared") { + hdrTarget := a.Target[:len(a.Target)-len(filepath.Ext(a.Target))] + ".h" + if cfg.BuildContext.Compiler == "gccgo" { + // For the header file, remove the "lib" + // added by go/build, so we generate pkg.h + // rather than libpkg.h. + dir, file := filepath.Split(hdrTarget) + file = strings.TrimPrefix(file, "lib") + hdrTarget = filepath.Join(dir, file) + } + ah := &Action{ + Mode: "install header", + Package: a.Package, + Deps: []*Action{a.Deps[0]}, + Func: (*Builder).installHeader, + Objdir: a.Deps[0].Objdir, + Target: hdrTarget, + } + a.Deps = append(a.Deps, ah) + } +} + +// buildmodeShared takes the "go build" action a1 into the building of a shared library of a1.Deps. +// That is, the input a1 represents "go build pkgs" and the result represents "go build -buidmode=shared pkgs". +func (b *Builder) buildmodeShared(mode, depMode BuildMode, args []string, pkgs []*load.Package, a1 *Action) *Action { + name, err := libname(args, pkgs) + if err != nil { + base.Fatalf("%v", err) + } + return b.linkSharedAction(mode, depMode, name, a1) +} + +// linkSharedAction takes a grouping action a1 corresponding to a list of built packages +// and returns an action that links them together into a shared library with the name shlib. +// If a1 is nil, shlib should be an absolute path to an existing shared library, +// and then linkSharedAction reads that library to find out the package list. +func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Action) *Action { + fullShlib := shlib + shlib = filepath.Base(shlib) + a := b.cacheAction("build-shlib "+shlib, nil, func() *Action { + if a1 == nil { + // TODO(rsc): Need to find some other place to store config, + // not in pkg directory. See golang.org/issue/22196. + pkgs := readpkglist(fullShlib) + a1 = &Action{ + Mode: "shlib packages", + } + for _, p := range pkgs { + a1.Deps = append(a1.Deps, b.CompileAction(mode, depMode, p)) + } + } + + // Fake package to hold ldflags. + // As usual shared libraries are a kludgy, abstraction-violating special case: + // we let them use the flags specified for the command-line arguments. + p := &load.Package{} + p.Internal.CmdlinePkg = true + p.Internal.Ldflags = load.BuildLdflags.For(p) + p.Internal.Gccgoflags = load.BuildGccgoflags.For(p) + + // Add implicit dependencies to pkgs list. + // Currently buildmode=shared forces external linking mode, and + // external linking mode forces an import of runtime/cgo (and + // math on arm). So if it was not passed on the command line and + // it is not present in another shared library, add it here. + // TODO(rsc): Maybe this should only happen if "runtime" is in the original package set. + // TODO(rsc): This should probably be changed to use load.LinkerDeps(p). + // TODO(rsc): Find out and explain here why gccgo is excluded. + // If the answer is that gccgo is different in implicit linker deps, maybe + // load.LinkerDeps should be used and updated. + // Link packages into a shared library. + + a := &Action{ + Mode: "go build -buildmode=shared", + Package: p, + Objdir: b.NewObjdir(), + Func: (*Builder).linkShared, + Deps: []*Action{a1}, + } + a.Target = filepath.Join(a.Objdir, shlib) + if cfg.BuildToolchainName != "gccgo" { + add := func(a1 *Action, pkg string, force bool) { + for _, a2 := range a1.Deps { + if a2.Package != nil && a2.Package.ImportPath == pkg { + return + } + } + var stk load.ImportStack + p := load.LoadPackage(pkg, &stk) + if p.Error != nil { + base.Fatalf("load %s: %v", pkg, p.Error) + } + // Assume that if pkg (runtime/cgo or math) + // is already accounted for in a different shared library, + // then that shared library also contains runtime, + // so that anything we do will depend on that library, + // so we don't need to include pkg in our shared library. + if force || p.Shlib == "" || filepath.Base(p.Shlib) == pkg { + a1.Deps = append(a1.Deps, b.CompileAction(depMode, depMode, p)) + } + } + add(a1, "runtime/cgo", false) + if cfg.Goarch == "arm" { + add(a1, "math", false) + } + + // The linker step still needs all the usual linker deps. + // (For example, the linker always opens runtime.a.) + for _, dep := range load.LinkerDeps(nil) { + add(a, dep, true) + } + } + b.addTransitiveLinkDeps(a, a1, shlib) + return a + }) + + // Install result. + if (mode == ModeInstall || mode == ModeBuggyInstall) && a.Func != nil { + buildAction := a + + a = b.cacheAction("install-shlib "+shlib, nil, func() *Action { + // Determine the eventual install target. + // The install target is root/pkg/shlib, where root is the source root + // in which all the packages lie. + // TODO(rsc): Perhaps this cross-root check should apply to the full + // transitive package dependency list, not just the ones named + // on the command line? + pkgDir := a1.Deps[0].Package.Internal.Build.PkgTargetRoot + for _, a2 := range a1.Deps { + if dir := a2.Package.Internal.Build.PkgTargetRoot; dir != pkgDir { + base.Fatalf("installing shared library: cannot use packages %s and %s from different roots %s and %s", + a1.Deps[0].Package.ImportPath, + a2.Package.ImportPath, + pkgDir, + dir) + } + } + // TODO(rsc): Find out and explain here why gccgo is different. + if cfg.BuildToolchainName == "gccgo" { + pkgDir = filepath.Join(pkgDir, "shlibs") + } + target := filepath.Join(pkgDir, shlib) + + a := &Action{ + Mode: "go install -buildmode=shared", + Objdir: buildAction.Objdir, + Func: BuildInstallFunc, + Deps: []*Action{buildAction}, + Target: target, + } + for _, a2 := range buildAction.Deps[0].Deps { + p := a2.Package + if p.Target == "" { + continue + } + a.Deps = append(a.Deps, &Action{ + Mode: "shlibname", + Package: p, + Func: (*Builder).installShlibname, + Target: strings.TrimSuffix(p.Target, ".a") + ".shlibname", + Deps: []*Action{a.Deps[0]}, + }) + } + return a + }) + } + + return a +} diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 7d667ff552e..57b7b008791 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -5,33 +5,19 @@ package work import ( - "bufio" - "bytes" - "container/heap" - "debug/elf" "errors" - "flag" "fmt" "go/build" - "io" - "io/ioutil" - "log" "os" "os/exec" "path" "path/filepath" - "regexp" "runtime" - "strconv" "strings" - "sync" - "time" "cmd/go/internal/base" - "cmd/go/internal/buildid" "cmd/go/internal/cfg" "cmd/go/internal/load" - "cmd/go/internal/str" ) var CmdBuild = &base.Command{ @@ -89,15 +75,15 @@ and test commands: -x print the commands. - -asmflags 'flag list' + -asmflags '[pattern=]arg list' arguments to pass on each go tool asm invocation. -buildmode mode build mode to use. See 'go help buildmode' for more. -compiler name name of compiler to use, as in runtime.Compiler (gccgo or gc). - -gccgoflags 'arg list' + -gccgoflags '[pattern=]arg list' arguments to pass on each gccgo compiler/linker invocation. - -gcflags 'arg list' + -gcflags '[pattern=]arg list' arguments to pass on each go tool compile invocation. -installsuffix suffix a suffix to use in the name of the package installation directory, @@ -106,7 +92,7 @@ and test commands: or, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect. - -ldflags 'flag list' + -ldflags '[pattern=]arg list' arguments to pass on each go tool link invocation. -linkshared link against shared libraries previously created with @@ -124,9 +110,21 @@ and test commands: For example, instead of running asm, the go command will run 'cmd args /path/to/asm '. -All the flags that take a list of arguments accept a space-separated -list of strings. To embed spaces in an element in the list, surround -it with either single or double quotes. +The -asmflags, -gccgoflags, -gcflags, and -ldflags flags accept a +space-separated list of arguments to pass to an underlying tool +during the build. To embed spaces in an element in the list, surround +it with either single or double quotes. The argument list may be +preceded by a package pattern and an equal sign, which restricts +the use of that argument list to the building of packages matching +that pattern (see 'go help packages' for a description of package +patterns). Without a pattern, the argument list applies only to the +packages named on the command line. The flags may be repeated +with different patterns in order to specify different arguments for +different sets of packages. If a package matches patterns given in +multiple flags, the latest match on the command line wins. +For example, 'go build -gcflags=-S fmt' prints the disassembly +only for package fmt, while 'go build -gcflags=all=-S fmt' +prints the disassembly for fmt and all its dependencies. For more about specifying packages, see 'go help packages'. For more about where packages and binaries are installed, @@ -154,6 +152,8 @@ func init() { CmdBuild.Flag.BoolVar(&cfg.BuildI, "i", false, "") CmdBuild.Flag.StringVar(&cfg.BuildO, "o", "", "output file") + CmdInstall.Flag.BoolVar(&cfg.BuildI, "i", false, "") + AddBuildFlags(CmdBuild) AddBuildFlags(CmdInstall) } @@ -161,9 +161,12 @@ func init() { // Note that flags consulted by other parts of the code // (for example, buildV) are in cmd/go/internal/cfg. -var buildAsmflags []string // -asmflags flag -var buildGcflags []string // -gcflags flag -var buildGccgoflags []string // -gccgoflags flag +var ( + forcedAsmflags []string // internally-forced flags for cmd/asm + forcedGcflags []string // internally-forced flags for cmd/compile + forcedLdflags []string // internally-forced flags for cmd/link + forcedGccgoflags []string // internally-forced flags for gccgo +) var BuildToolchain toolchain = noToolchain{} var ldBuildmode string @@ -209,13 +212,13 @@ func AddBuildFlags(cmd *base.Command) { cmd.Flag.BoolVar(&cfg.BuildV, "v", false, "") cmd.Flag.BoolVar(&cfg.BuildX, "x", false, "") - cmd.Flag.Var((*base.StringsFlag)(&buildAsmflags), "asmflags", "") + cmd.Flag.Var(&load.BuildAsmflags, "asmflags", "") cmd.Flag.Var(buildCompiler{}, "compiler", "") cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", "default", "") - cmd.Flag.Var((*base.StringsFlag)(&buildGcflags), "gcflags", "") - cmd.Flag.Var((*base.StringsFlag)(&buildGccgoflags), "gccgoflags", "") + cmd.Flag.Var(&load.BuildGcflags, "gcflags", "") + cmd.Flag.Var(&load.BuildGccgoflags, "gccgoflags", "") cmd.Flag.StringVar(&cfg.BuildContext.InstallSuffix, "installsuffix", "", "") - cmd.Flag.Var((*base.StringsFlag)(&cfg.BuildLdflags), "ldflags", "") + cmd.Flag.Var(&load.BuildLdflags, "ldflags", "") cmd.Flag.BoolVar(&cfg.BuildLinkshared, "linkshared", false, "") cmd.Flag.StringVar(&cfg.BuildPkgdir, "pkgdir", "", "") cmd.Flag.BoolVar(&cfg.BuildRace, "race", false, "") @@ -223,6 +226,10 @@ func AddBuildFlags(cmd *base.Command) { cmd.Flag.Var((*base.StringsFlag)(&cfg.BuildContext.BuildTags), "tags", "") cmd.Flag.Var((*base.StringsFlag)(&cfg.BuildToolexec), "toolexec", "") cmd.Flag.BoolVar(&cfg.BuildWork, "work", false, "") + + // Undocumented, unstable debugging flags. + cmd.Flag.StringVar(&cfg.DebugActiongraph, "debug-actiongraph", "", "") + cmd.Flag.Var(&load.DebugDeprecatedImportcfg, "debug-deprecated-importcfg", "") } // fileExtSplit expects a filename and returns the name @@ -264,148 +271,10 @@ func oneMainPkg(pkgs []*load.Package) []*load.Package { var pkgsFilter = func(pkgs []*load.Package) []*load.Package { return pkgs } -func BuildModeInit() { - gccgo := cfg.BuildToolchainName == "gccgo" - var codegenArg string - platform := cfg.Goos + "/" + cfg.Goarch - switch cfg.BuildBuildmode { - case "archive": - pkgsFilter = pkgsNotMain - case "c-archive": - pkgsFilter = oneMainPkg - switch platform { - case "darwin/arm", "darwin/arm64": - codegenArg = "-shared" - default: - switch cfg.Goos { - case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": - // Use -shared so that the result is - // suitable for inclusion in a PIE or - // shared library. - codegenArg = "-shared" - } - } - cfg.ExeSuffix = ".a" - ldBuildmode = "c-archive" - case "c-shared": - pkgsFilter = oneMainPkg - if gccgo { - codegenArg = "-fPIC" - } else { - switch platform { - case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", - "android/amd64", "android/arm", "android/arm64", "android/386": - codegenArg = "-shared" - case "darwin/amd64", "darwin/386": - default: - base.Fatalf("-buildmode=c-shared not supported on %s\n", platform) - } - } - ldBuildmode = "c-shared" - case "default": - switch platform { - case "android/arm", "android/arm64", "android/amd64", "android/386": - codegenArg = "-shared" - ldBuildmode = "pie" - case "darwin/arm", "darwin/arm64": - codegenArg = "-shared" - fallthrough - default: - ldBuildmode = "exe" - } - case "exe": - pkgsFilter = pkgsMain - ldBuildmode = "exe" - case "pie": - if cfg.BuildRace { - base.Fatalf("-buildmode=pie not supported when -race is enabled") - } - if gccgo { - base.Fatalf("-buildmode=pie not supported by gccgo") - } else { - switch platform { - case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x", - "android/amd64", "android/arm", "android/arm64", "android/386": - codegenArg = "-shared" - default: - base.Fatalf("-buildmode=pie not supported on %s\n", platform) - } - } - ldBuildmode = "pie" - case "shared": - pkgsFilter = pkgsNotMain - if gccgo { - codegenArg = "-fPIC" - } else { - switch platform { - case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x": - default: - base.Fatalf("-buildmode=shared not supported on %s\n", platform) - } - codegenArg = "-dynlink" - } - if cfg.BuildO != "" { - base.Fatalf("-buildmode=shared and -o not supported together") - } - ldBuildmode = "shared" - case "plugin": - pkgsFilter = oneMainPkg - if gccgo { - codegenArg = "-fPIC" - } else { - switch platform { - case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/s390x", - "android/amd64", "android/arm", "android/arm64", "android/386": - default: - base.Fatalf("-buildmode=plugin not supported on %s\n", platform) - } - codegenArg = "-dynlink" - } - cfg.ExeSuffix = ".so" - ldBuildmode = "plugin" - default: - base.Fatalf("buildmode=%s not supported", cfg.BuildBuildmode) - } - if cfg.BuildLinkshared { - if gccgo { - codegenArg = "-fPIC" - } else { - switch platform { - case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x": - buildAsmflags = append(buildAsmflags, "-D=GOBUILDMODE_shared=1") - default: - base.Fatalf("-linkshared not supported on %s\n", platform) - } - codegenArg = "-dynlink" - // TODO(mwhudson): remove -w when that gets fixed in linker. - cfg.BuildLdflags = append(cfg.BuildLdflags, "-linkshared", "-w") - } - } - if codegenArg != "" { - if gccgo { - buildGccgoflags = append([]string{codegenArg}, buildGccgoflags...) - } else { - buildAsmflags = append([]string{codegenArg}, buildAsmflags...) - buildGcflags = append([]string{codegenArg}, buildGcflags...) - } - // Don't alter InstallSuffix when modifying default codegen args. - if cfg.BuildBuildmode != "default" || cfg.BuildLinkshared { - if cfg.BuildContext.InstallSuffix != "" { - cfg.BuildContext.InstallSuffix += "_" - } - cfg.BuildContext.InstallSuffix += codegenArg[1:] - } - } - if strings.HasPrefix(runtimeVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") { - buildGcflags = append(buildGcflags, "-goversion", runtimeVersion) - } -} - var runtimeVersion = runtime.Version() func runBuild(cmd *base.Command, args []string) { - InstrumentInit() - BuildModeInit() + BuildInit() var b Builder b.Init() @@ -424,14 +293,14 @@ func runBuild(cmd *base.Command, args []string) { // sanity check some often mis-used options switch cfg.BuildContext.Compiler { case "gccgo": - if len(buildGcflags) != 0 { + if load.BuildGcflags.Present() { fmt.Println("go build: when using gccgo toolchain, please pass compiler flags using -gccgoflags, not -gcflags") } - if len(cfg.BuildLdflags) != 0 { + if load.BuildLdflags.Present() { fmt.Println("go build: when using gccgo toolchain, please pass linker flags using -gccgoflags, not -ldflags") } case "gc": - if len(buildGccgoflags) != 0 { + if load.BuildGccgoflags.Present() { fmt.Println("go build: when using gc toolchain, please pass compile flags using -gcflags, and linker flags using -ldflags") } } @@ -441,6 +310,8 @@ func runBuild(cmd *base.Command, args []string) { depMode = ModeInstall } + pkgs = pkgsFilter(load.Packages(args)) + if cfg.BuildO != "" { if len(pkgs) > 1 { base.Fatalf("go build: cannot use -o with multiple packages") @@ -448,38 +319,31 @@ func runBuild(cmd *base.Command, args []string) { base.Fatalf("no packages to build") } p := pkgs[0] - p.Internal.Target = cfg.BuildO + p.Target = cfg.BuildO p.Stale = true // must build - not up to date p.StaleReason = "build -o flag in use" - a := b.Action(ModeInstall, depMode, p) + a := b.AutoAction(ModeInstall, depMode, p) b.Do(a) return } - pkgs = pkgsFilter(load.Packages(args)) - - var a *Action + a := &Action{Mode: "go build"} + for _, p := range pkgs { + a.Deps = append(a.Deps, b.AutoAction(ModeBuild, depMode, p)) + } if cfg.BuildBuildmode == "shared" { - if libName, err := libname(args, pkgs); err != nil { - base.Fatalf("%s", err.Error()) - } else { - a = b.libaction(libName, pkgs, ModeBuild, depMode) - } - } else { - a = &Action{} - for _, p := range pkgs { - a.Deps = append(a.Deps, b.Action(ModeBuild, depMode, p)) - } + a = b.buildmodeShared(ModeBuild, depMode, args, pkgs, a) } b.Do(a) } var CmdInstall = &base.Command{ - UsageLine: "install [build flags] [packages]", + UsageLine: "install [-i] [build flags] [packages]", Short: "compile and install packages and dependencies", Long: ` -Install compiles and installs the packages named by the import paths, -along with their dependencies. +Install compiles and installs the packages named by the import paths. + +The -i flag installs the dependencies of the named packages as well. For more about the build flags, see 'go help build'. For more about specifying packages, see 'go help packages'. @@ -545,8 +409,7 @@ func libname(args []string, pkgs []*load.Package) (string, error) { } func runInstall(cmd *base.Command, args []string) { - InstrumentInit() - BuildModeInit() + BuildInit() InstallPackages(args, false) } @@ -561,14 +424,14 @@ func InstallPackages(args []string, forGet bool) { if p.Target == "" && (!p.Standard || p.ImportPath != "unsafe") { switch { case p.Internal.GobinSubdir: - base.Errorf("go install: cannot install cross-compiled binaries when GOBIN is set") - case p.Internal.Cmdline: - base.Errorf("go install: no install location for .go files listed on command line (GOBIN not set)") + base.Errorf("go %s: cannot install cross-compiled binaries when GOBIN is set", cfg.CmdName) + case p.Internal.CmdlineFiles: + base.Errorf("go %s: no install location for .go files listed on command line (GOBIN not set)", cfg.CmdName) case p.ConflictDir != "": - base.Errorf("go install: no install location for %s: hidden by %s", p.Dir, p.ConflictDir) + base.Errorf("go %s: no install location for %s: hidden by %s", cfg.CmdName, p.Dir, p.ConflictDir) default: - base.Errorf("go install: no install location for directory %s outside GOPATH\n"+ - "\tFor more details see: 'go help gopath'", p.Dir) + base.Errorf("go %s: no install location for directory %s outside GOPATH\n"+ + "\tFor more details see: 'go help gopath'", cfg.CmdName, p.Dir) } } } @@ -576,42 +439,46 @@ func InstallPackages(args []string, forGet bool) { var b Builder b.Init() - var a *Action - if cfg.BuildBuildmode == "shared" { - if libName, err := libname(args, pkgs); err != nil { - base.Fatalf("%s", err.Error()) - } else { - a = b.libaction(libName, pkgs, ModeInstall, ModeInstall) + depMode := ModeBuild + if cfg.BuildI { + depMode = ModeInstall + } + a := &Action{Mode: "go install"} + var tools []*Action + for _, p := range pkgs { + // During 'go get', don't attempt (and fail) to install packages with only tests. + // TODO(rsc): It's not clear why 'go get' should be different from 'go install' here. See #20760. + if forGet && len(p.GoFiles)+len(p.CgoFiles) == 0 && len(p.TestGoFiles)+len(p.XTestGoFiles) > 0 { + continue } - } else { - a = &Action{} - var tools []*Action - for _, p := range pkgs { - // During 'go get', don't attempt (and fail) to install packages with only tests. - // TODO(rsc): It's not clear why 'go get' should be different from 'go install' here. See #20760. - if forGet && len(p.GoFiles)+len(p.CgoFiles) == 0 && len(p.TestGoFiles)+len(p.XTestGoFiles) > 0 { - continue - } - // If p is a tool, delay the installation until the end of the build. - // This avoids installing assemblers/compilers that are being executed - // by other steps in the build. - // cmd/cgo is handled specially in b.Action, so that we can - // both build and use it in the same 'go install'. - Action := b.Action(ModeInstall, ModeInstall, p) - if load.GoTools[p.ImportPath] == load.ToTool && p.ImportPath != "cmd/cgo" { - a.Deps = append(a.Deps, Action.Deps...) - Action.Deps = append(Action.Deps, a) - tools = append(tools, Action) - continue - } - a.Deps = append(a.Deps, Action) + // If p is a tool, delay the installation until the end of the build. + // This avoids installing assemblers/compilers that are being executed + // by other steps in the build. + a1 := b.AutoAction(ModeInstall, depMode, p) + if load.InstallTargetDir(p) == load.ToTool { + a.Deps = append(a.Deps, a1.Deps...) + a1.Deps = append(a1.Deps, a) + tools = append(tools, a1) + continue } - if len(tools) > 0 { - a = &Action{ - Deps: tools, - } + a.Deps = append(a.Deps, a1) + } + if len(tools) > 0 { + a = &Action{ + Mode: "go install (tools)", + Deps: tools, } } + + if cfg.BuildBuildmode == "shared" { + // Note: If buildmode=shared then only non-main packages + // are present in the pkgs list, so all the special case code about + // tools above did not apply, and a is just a simple Action + // with a list of Deps, one per package named in pkgs, + // the same as in runBuild. + a = b.buildmodeShared(ModeInstall, ModeInstall, args, pkgs, a) + } + b.Do(a) base.ExitIfErrors() @@ -643,3208 +510,6 @@ func InstallPackages(args []string, forGet bool) { } } -// A Builder holds global state about a build. -// It does not hold per-package state, because we -// build packages in parallel, and the builder is shared. -type Builder struct { - WorkDir string // the temporary work directory (ends in filepath.Separator) - actionCache map[cacheKey]*Action // a cache of already-constructed actions - mkdirCache map[string]bool // a cache of created directories - flagCache map[string]bool // a cache of supported compiler flags - Print func(args ...interface{}) (int, error) - - output sync.Mutex - scriptDir string // current directory in printed script - - exec sync.Mutex - readySema chan bool - ready actionQueue -} - -// NOTE: Much of Action would not need to be exported if not for test. -// Maybe test functionality should move into this package too? - -// An Action represents a single action in the action graph. -type Action struct { - Package *load.Package // the package this action works on - Deps []*Action // actions that must happen before this one - Func func(*Builder, *Action) error // the action itself (nil = no-op) - IgnoreFail bool // whether to run f even if dependencies fail - TestOutput *bytes.Buffer // test output buffer - Args []string // additional args for runProgram - - triggers []*Action // inverse of deps - cgo *Action // action for cgo binary if needed - - // Generated files, directories. - Link bool // target is executable, not just package - Pkgdir string // the -I or -L argument to use when importing this package - Objdir string // directory for intermediate objects - Objpkg string // the intermediate package .a file created during the action - Target string // goal of the action: the created package or executable - - // Execution state. - pending int // number of deps yet to complete - priority int // relative execution priority - Failed bool // whether the action failed -} - -// cacheKey is the key for the action cache. -type cacheKey struct { - mode BuildMode - p *load.Package - shlib string -} - -// BuildMode specifies the build mode: -// are we just building things or also installing the results? -type BuildMode int - -const ( - ModeBuild BuildMode = iota - ModeInstall -) - -func (b *Builder) Init() { - var err error - b.Print = func(a ...interface{}) (int, error) { - return fmt.Fprint(os.Stderr, a...) - } - b.actionCache = make(map[cacheKey]*Action) - b.mkdirCache = make(map[string]bool) - - if cfg.BuildN { - b.WorkDir = "$WORK" - } else { - b.WorkDir, err = ioutil.TempDir("", "go-build") - if err != nil { - base.Fatalf("%s", err) - } - if cfg.BuildX || cfg.BuildWork { - fmt.Fprintf(os.Stderr, "WORK=%s\n", b.WorkDir) - } - if !cfg.BuildWork { - workdir := b.WorkDir - base.AtExit(func() { os.RemoveAll(workdir) }) - } - } -} - -// readpkglist returns the list of packages that were built into the shared library -// at shlibpath. For the native toolchain this list is stored, newline separated, in -// an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the -// .go_export section. -func readpkglist(shlibpath string) (pkgs []*load.Package) { - var stk load.ImportStack - if cfg.BuildToolchainName == "gccgo" { - f, _ := elf.Open(shlibpath) - sect := f.Section(".go_export") - data, _ := sect.Data() - scanner := bufio.NewScanner(bytes.NewBuffer(data)) - for scanner.Scan() { - t := scanner.Text() - if strings.HasPrefix(t, "pkgpath ") { - t = strings.TrimPrefix(t, "pkgpath ") - t = strings.TrimSuffix(t, ";") - pkgs = append(pkgs, load.LoadPackage(t, &stk)) - } - } - } else { - pkglistbytes, err := buildid.ReadELFNote(shlibpath, "Go\x00\x00", 1) - if err != nil { - base.Fatalf("readELFNote failed: %v", err) - } - scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes)) - for scanner.Scan() { - t := scanner.Text() - pkgs = append(pkgs, load.LoadPackage(t, &stk)) - } - } - return -} - -// Action returns the action for applying the given operation (mode) to the package. -// depMode is the action to use when building dependencies. -// action never looks for p in a shared library, but may find p's dependencies in a -// shared library if buildLinkshared is true. -func (b *Builder) Action(mode BuildMode, depMode BuildMode, p *load.Package) *Action { - return b.action1(mode, depMode, p, false, "") -} - -// action1 returns the action for applying the given operation (mode) to the package. -// depMode is the action to use when building dependencies. -// action1 will look for p in a shared library if lookshared is true. -// forShlib is the shared library that p will become part of, if any. -func (b *Builder) action1(mode BuildMode, depMode BuildMode, p *load.Package, lookshared bool, forShlib string) *Action { - shlib := "" - if lookshared { - shlib = p.Shlib - } - key := cacheKey{mode, p, shlib} - - a := b.actionCache[key] - if a != nil { - return a - } - if shlib != "" { - key2 := cacheKey{ModeInstall, nil, shlib} - a = b.actionCache[key2] - if a != nil { - b.actionCache[key] = a - return a - } - pkgs := readpkglist(shlib) - a = b.libaction(filepath.Base(shlib), pkgs, ModeInstall, depMode) - b.actionCache[key2] = a - b.actionCache[key] = a - return a - } - - a = &Action{Package: p, Pkgdir: p.Internal.Build.PkgRoot} - if p.Internal.Pkgdir != "" { // overrides p.t - a.Pkgdir = p.Internal.Pkgdir - } - b.actionCache[key] = a - - for _, p1 := range p.Internal.Imports { - if forShlib != "" { - // p is part of a shared library. - if p1.Shlib != "" && p1.Shlib != forShlib { - // p1 is explicitly part of a different shared library. - // Put the action for that shared library into a.Deps. - a.Deps = append(a.Deps, b.action1(depMode, depMode, p1, true, p1.Shlib)) - } else { - // p1 is (implicitly or not) part of this shared library. - // Put the action for p1 into a.Deps. - a.Deps = append(a.Deps, b.action1(depMode, depMode, p1, false, forShlib)) - } - } else { - // p is not part of a shared library. - // If p1 is in a shared library, put the action for that into - // a.Deps, otherwise put the action for p1 into a.Deps. - a.Deps = append(a.Deps, b.action1(depMode, depMode, p1, cfg.BuildLinkshared, p1.Shlib)) - } - } - - // If we are not doing a cross-build, then record the binary we'll - // generate for cgo as a dependency of the build of any package - // using cgo, to make sure we do not overwrite the binary while - // a package is using it. If this is a cross-build, then the cgo we - // are writing is not the cgo we need to use. - if cfg.Goos == runtime.GOOS && cfg.Goarch == runtime.GOARCH && !cfg.BuildRace && !cfg.BuildMSan { - if (len(p.CgoFiles) > 0 || p.Standard && p.ImportPath == "runtime/cgo") && !cfg.BuildLinkshared && cfg.BuildBuildmode != "shared" { - var stk load.ImportStack - p1 := load.LoadPackage("cmd/cgo", &stk) - if p1.Error != nil { - base.Fatalf("load cmd/cgo: %v", p1.Error) - } - a.cgo = b.Action(depMode, depMode, p1) - a.Deps = append(a.Deps, a.cgo) - } - } - - if p.Standard { - switch p.ImportPath { - case "builtin", "unsafe": - // Fake packages - nothing to build. - return a - } - // gccgo standard library is "fake" too. - if cfg.BuildToolchainName == "gccgo" { - // the target name is needed for cgo. - a.Target = p.Internal.Target - return a - } - } - - if !p.Stale && p.Internal.Target != "" { - // p.Stale==false implies that p.Internal.Target is up-to-date. - // Record target name for use by actions depending on this one. - a.Target = p.Internal.Target - return a - } - - if p.Internal.Local && p.Internal.Target == "" { - // Imported via local path. No permanent target. - mode = ModeBuild - } - work := p.Internal.Pkgdir - if work == "" { - work = b.WorkDir - } - a.Objdir = filepath.Join(work, a.Package.ImportPath, "_obj") + string(filepath.Separator) - a.Objpkg = BuildToolchain.Pkgpath(work, a.Package) - a.Link = p.Name == "main" - - switch mode { - case ModeInstall: - a.Func = BuildInstallFunc - a.Deps = []*Action{b.action1(ModeBuild, depMode, p, lookshared, forShlib)} - a.Target = a.Package.Internal.Target - - // Install header for cgo in c-archive and c-shared modes. - if p.UsesCgo() && (cfg.BuildBuildmode == "c-archive" || cfg.BuildBuildmode == "c-shared") { - hdrTarget := a.Target[:len(a.Target)-len(filepath.Ext(a.Target))] + ".h" - if cfg.BuildContext.Compiler == "gccgo" { - // For the header file, remove the "lib" - // added by go/build, so we generate pkg.h - // rather than libpkg.h. - dir, file := filepath.Split(hdrTarget) - file = strings.TrimPrefix(file, "lib") - hdrTarget = filepath.Join(dir, file) - } - ah := &Action{ - Package: a.Package, - Deps: []*Action{a.Deps[0]}, - Func: (*Builder).installHeader, - Pkgdir: a.Pkgdir, - Objdir: a.Objdir, - Target: hdrTarget, - } - a.Deps = append(a.Deps, ah) - } - - case ModeBuild: - a.Func = (*Builder).build - a.Target = a.Objpkg - if a.Link { - // An executable file. (This is the name of a temporary file.) - // Because we run the temporary file in 'go run' and 'go test', - // the name will show up in ps listings. If the caller has specified - // a name, use that instead of a.out. The binary is generated - // in an otherwise empty subdirectory named exe to avoid - // naming conflicts. The only possible conflict is if we were - // to create a top-level package named exe. - name := "a.out" - if p.Internal.ExeName != "" { - name = p.Internal.ExeName - } else if cfg.Goos == "darwin" && cfg.BuildBuildmode == "c-shared" && p.Internal.Target != "" { - // On OS X, the linker output name gets recorded in the - // shared library's LC_ID_DYLIB load command. - // The code invoking the linker knows to pass only the final - // path element. Arrange that the path element matches what - // we'll install it as; otherwise the library is only loadable as "a.out". - _, name = filepath.Split(p.Internal.Target) - } - a.Target = a.Objdir + filepath.Join("exe", name) + cfg.ExeSuffix - } - } - - return a -} - -func (b *Builder) libaction(libname string, pkgs []*load.Package, mode, depMode BuildMode) *Action { - a := &Action{} - switch mode { - default: - base.Fatalf("unrecognized mode %v", mode) - - case ModeBuild: - a.Func = (*Builder).linkShared - a.Target = filepath.Join(b.WorkDir, libname) - for _, p := range pkgs { - if p.Internal.Target == "" { - continue - } - a.Deps = append(a.Deps, b.Action(depMode, depMode, p)) - } - - case ModeInstall: - // Currently build mode shared forces external linking mode, and - // external linking mode forces an import of runtime/cgo (and - // math on arm). So if it was not passed on the command line and - // it is not present in another shared library, add it here. - gccgo := cfg.BuildToolchainName == "gccgo" - if !gccgo { - seencgo := false - for _, p := range pkgs { - seencgo = seencgo || (p.Standard && p.ImportPath == "runtime/cgo") - } - if !seencgo { - var stk load.ImportStack - p := load.LoadPackage("runtime/cgo", &stk) - if p.Error != nil { - base.Fatalf("load runtime/cgo: %v", p.Error) - } - load.ComputeStale(p) - // If runtime/cgo is in another shared library, then that's - // also the shared library that contains runtime, so - // something will depend on it and so runtime/cgo's staleness - // will be checked when processing that library. - if p.Shlib == "" || p.Shlib == libname { - pkgs = append([]*load.Package{}, pkgs...) - pkgs = append(pkgs, p) - } - } - if cfg.Goarch == "arm" { - seenmath := false - for _, p := range pkgs { - seenmath = seenmath || (p.Standard && p.ImportPath == "math") - } - if !seenmath { - var stk load.ImportStack - p := load.LoadPackage("math", &stk) - if p.Error != nil { - base.Fatalf("load math: %v", p.Error) - } - load.ComputeStale(p) - // If math is in another shared library, then that's - // also the shared library that contains runtime, so - // something will depend on it and so math's staleness - // will be checked when processing that library. - if p.Shlib == "" || p.Shlib == libname { - pkgs = append([]*load.Package{}, pkgs...) - pkgs = append(pkgs, p) - } - } - } - } - - // Figure out where the library will go. - var libdir string - for _, p := range pkgs { - plibdir := p.Internal.Build.PkgTargetRoot - if gccgo { - plibdir = filepath.Join(plibdir, "shlibs") - } - if libdir == "" { - libdir = plibdir - } else if libdir != plibdir { - base.Fatalf("multiple roots %s & %s", libdir, plibdir) - } - } - a.Target = filepath.Join(libdir, libname) - - // Now we can check whether we need to rebuild it. - stale := false - var built time.Time - if fi, err := os.Stat(a.Target); err == nil { - built = fi.ModTime() - } - for _, p := range pkgs { - if p.Internal.Target == "" { - continue - } - stale = stale || p.Stale - lstat, err := os.Stat(p.Internal.Target) - if err != nil || lstat.ModTime().After(built) { - stale = true - } - a.Deps = append(a.Deps, b.action1(depMode, depMode, p, false, a.Target)) - } - - if stale { - a.Func = BuildInstallFunc - buildAction := b.libaction(libname, pkgs, ModeBuild, depMode) - a.Deps = []*Action{buildAction} - for _, p := range pkgs { - if p.Internal.Target == "" { - continue - } - shlibnameaction := &Action{} - shlibnameaction.Func = (*Builder).installShlibname - shlibnameaction.Target = p.Internal.Target[:len(p.Internal.Target)-2] + ".shlibname" - a.Deps = append(a.Deps, shlibnameaction) - shlibnameaction.Deps = append(shlibnameaction.Deps, buildAction) - } - } - } - return a -} - -// ActionList returns the list of actions in the dag rooted at root -// as visited in a depth-first post-order traversal. -func ActionList(root *Action) []*Action { - seen := map[*Action]bool{} - all := []*Action{} - var walk func(*Action) - walk = func(a *Action) { - if seen[a] { - return - } - seen[a] = true - for _, a1 := range a.Deps { - walk(a1) - } - all = append(all, a) - } - walk(root) - return all -} - -// allArchiveActions returns a list of the archive dependencies of root. -// This is needed because if package p depends on package q that is in libr.so, the -// action graph looks like p->libr.so->q and so just scanning through p's -// dependencies does not find the import dir for q. -func allArchiveActions(root *Action) []*Action { - seen := map[*Action]bool{} - r := []*Action{} - var walk func(*Action) - walk = func(a *Action) { - if seen[a] { - return - } - seen[a] = true - if strings.HasSuffix(a.Target, ".so") || a == root { - for _, a1 := range a.Deps { - walk(a1) - } - } else if strings.HasSuffix(a.Target, ".a") { - r = append(r, a) - } - } - walk(root) - return r -} - -// do runs the action graph rooted at root. -func (b *Builder) Do(root *Action) { - if _, ok := cfg.OSArchSupportsCgo[cfg.Goos+"/"+cfg.Goarch]; !ok && cfg.BuildContext.Compiler == "gc" { - fmt.Fprintf(os.Stderr, "cmd/go: unsupported GOOS/GOARCH pair %s/%s\n", cfg.Goos, cfg.Goarch) - os.Exit(2) - } - for _, tag := range cfg.BuildContext.BuildTags { - if strings.Contains(tag, ",") { - fmt.Fprintf(os.Stderr, "cmd/go: -tags space-separated list contains comma\n") - os.Exit(2) - } - } - - // Build list of all actions, assigning depth-first post-order priority. - // The original implementation here was a true queue - // (using a channel) but it had the effect of getting - // distracted by low-level leaf actions to the detriment - // of completing higher-level actions. The order of - // work does not matter much to overall execution time, - // but when running "go test std" it is nice to see each test - // results as soon as possible. The priorities assigned - // ensure that, all else being equal, the execution prefers - // to do what it would have done first in a simple depth-first - // dependency order traversal. - all := ActionList(root) - for i, a := range all { - a.priority = i - } - - b.readySema = make(chan bool, len(all)) - - // Initialize per-action execution state. - for _, a := range all { - for _, a1 := range a.Deps { - a1.triggers = append(a1.triggers, a) - } - a.pending = len(a.Deps) - if a.pending == 0 { - b.ready.push(a) - b.readySema <- true - } - } - - // Handle runs a single action and takes care of triggering - // any actions that are runnable as a result. - handle := func(a *Action) { - var err error - if a.Func != nil && (!a.Failed || a.IgnoreFail) { - err = a.Func(b, a) - } - - // The actions run in parallel but all the updates to the - // shared work state are serialized through b.exec. - b.exec.Lock() - defer b.exec.Unlock() - - if err != nil { - if err == errPrintedOutput { - base.SetExitStatus(2) - } else { - base.Errorf("%s", err) - } - a.Failed = true - } - - for _, a0 := range a.triggers { - if a.Failed { - a0.Failed = true - } - if a0.pending--; a0.pending == 0 { - b.ready.push(a0) - b.readySema <- true - } - } - - if a == root { - close(b.readySema) - } - } - - var wg sync.WaitGroup - - // Kick off goroutines according to parallelism. - // If we are using the -n flag (just printing commands) - // drop the parallelism to 1, both to make the output - // deterministic and because there is no real work anyway. - par := cfg.BuildP - if cfg.BuildN { - par = 1 - } - for i := 0; i < par; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case _, ok := <-b.readySema: - if !ok { - return - } - // Receiving a value from b.readySema entitles - // us to take from the ready queue. - b.exec.Lock() - a := b.ready.pop() - b.exec.Unlock() - handle(a) - case <-base.Interrupted: - base.SetExitStatus(1) - return - } - } - }() - } - - wg.Wait() -} - -// build is the action for building a single package or command. -func (b *Builder) build(a *Action) (err error) { - // Return an error for binary-only package. - // We only reach this if isStale believes the binary form is - // either not present or not usable. - if a.Package.BinaryOnly { - return fmt.Errorf("missing or invalid package binary for binary-only package %s", a.Package.ImportPath) - } - - // Return an error if the package has CXX files but it's not using - // cgo nor SWIG, since the CXX files can only be processed by cgo - // and SWIG. - if len(a.Package.CXXFiles) > 0 && !a.Package.UsesCgo() && !a.Package.UsesSwig() { - return fmt.Errorf("can't build package %s because it contains C++ files (%s) but it's not using cgo nor SWIG", - a.Package.ImportPath, strings.Join(a.Package.CXXFiles, ",")) - } - // Same as above for Objective-C files - if len(a.Package.MFiles) > 0 && !a.Package.UsesCgo() && !a.Package.UsesSwig() { - return fmt.Errorf("can't build package %s because it contains Objective-C files (%s) but it's not using cgo nor SWIG", - a.Package.ImportPath, strings.Join(a.Package.MFiles, ",")) - } - // Same as above for Fortran files - if len(a.Package.FFiles) > 0 && !a.Package.UsesCgo() && !a.Package.UsesSwig() { - return fmt.Errorf("can't build package %s because it contains Fortran files (%s) but it's not using cgo nor SWIG", - a.Package.ImportPath, strings.Join(a.Package.FFiles, ",")) - } - - defer func() { - if err != nil && err != errPrintedOutput { - err = fmt.Errorf("go build %s: %v", a.Package.ImportPath, err) - } - }() - if cfg.BuildN { - // In -n mode, print a banner between packages. - // The banner is five lines so that when changes to - // different sections of the bootstrap script have to - // be merged, the banners give patch something - // to use to find its context. - b.Print("\n#\n# " + a.Package.ImportPath + "\n#\n\n") - } - - if cfg.BuildV { - b.Print(a.Package.ImportPath + "\n") - } - - // Make build directory. - obj := a.Objdir - if err := b.Mkdir(obj); err != nil { - return err - } - - // make target directory - dir, _ := filepath.Split(a.Target) - if dir != "" { - if err := b.Mkdir(dir); err != nil { - return err - } - } - - var gofiles, cgofiles, objdirCgofiles, cfiles, sfiles, cxxfiles, objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string - - gofiles = append(gofiles, a.Package.GoFiles...) - cgofiles = append(cgofiles, a.Package.CgoFiles...) - cfiles = append(cfiles, a.Package.CFiles...) - sfiles = append(sfiles, a.Package.SFiles...) - cxxfiles = append(cxxfiles, a.Package.CXXFiles...) - - if a.Package.UsesCgo() || a.Package.UsesSwig() { - if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(a.Package); err != nil { - return - } - } - - // Run SWIG on each .swig and .swigcxx file. - // Each run will generate two files, a .go file and a .c or .cxx file. - // The .go file will use import "C" and is to be processed by cgo. - if a.Package.UsesSwig() { - outGo, outC, outCXX, err := b.swig(a.Package, obj, pcCFLAGS) - if err != nil { - return err - } - objdirCgofiles = append(objdirCgofiles, outGo...) - cfiles = append(cfiles, outC...) - cxxfiles = append(cxxfiles, outCXX...) - } - - // Run cgo. - if a.Package.UsesCgo() || a.Package.UsesSwig() { - // In a package using cgo, cgo compiles the C, C++ and assembly files with gcc. - // There is one exception: runtime/cgo's job is to bridge the - // cgo and non-cgo worlds, so it necessarily has files in both. - // In that case gcc only gets the gcc_* files. - var gccfiles []string - gccfiles = append(gccfiles, cfiles...) - cfiles = nil - if a.Package.Standard && a.Package.ImportPath == "runtime/cgo" { - filter := func(files, nongcc, gcc []string) ([]string, []string) { - for _, f := range files { - if strings.HasPrefix(f, "gcc_") { - gcc = append(gcc, f) - } else { - nongcc = append(nongcc, f) - } - } - return nongcc, gcc - } - sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles) - } else { - for _, sfile := range sfiles { - data, err := ioutil.ReadFile(filepath.Join(a.Package.Dir, sfile)) - if err == nil { - if bytes.HasPrefix(data, []byte("TEXT")) || bytes.Contains(data, []byte("\nTEXT")) || - bytes.HasPrefix(data, []byte("DATA")) || bytes.Contains(data, []byte("\nDATA")) || - bytes.HasPrefix(data, []byte("GLOBL")) || bytes.Contains(data, []byte("\nGLOBL")) { - return fmt.Errorf("package using cgo has Go assembly file %s", sfile) - } - } - } - gccfiles = append(gccfiles, sfiles...) - sfiles = nil - } - - var cgoExe string - if a.cgo != nil && a.cgo.Target != "" { - cgoExe = a.cgo.Target - } else { - cgoExe = base.Tool("cgo") - } - outGo, outObj, err := b.cgo(a, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, objdirCgofiles, gccfiles, cxxfiles, a.Package.MFiles, a.Package.FFiles) - if err != nil { - return err - } - if cfg.BuildToolchainName == "gccgo" { - cgoObjects = append(cgoObjects, filepath.Join(a.Objdir, "_cgo_flags")) - } - cgoObjects = append(cgoObjects, outObj...) - gofiles = append(gofiles, outGo...) - } - - if len(gofiles) == 0 { - return &load.NoGoError{Package: a.Package} - } - - // If we're doing coverage, preprocess the .go files and put them in the work directory - if a.Package.Internal.CoverMode != "" { - for i, file := range gofiles { - var sourceFile string - var coverFile string - var key string - if strings.HasSuffix(file, ".cgo1.go") { - // cgo files have absolute paths - base := filepath.Base(file) - sourceFile = file - coverFile = filepath.Join(obj, base) - key = strings.TrimSuffix(base, ".cgo1.go") + ".go" - } else { - sourceFile = filepath.Join(a.Package.Dir, file) - coverFile = filepath.Join(obj, file) - key = file - } - cover := a.Package.Internal.CoverVars[key] - if cover == nil || base.IsTestFile(file) { - // Not covering this file. - continue - } - if err := b.cover(a, coverFile, sourceFile, 0666, cover.Var); err != nil { - return err - } - gofiles[i] = coverFile - } - } - - // Prepare Go import path list. - inc := b.includeArgs("-I", allArchiveActions(a)) - - // Compile Go. - ofile, out, err := BuildToolchain.gc(b, a.Package, a.Objpkg, obj, len(sfiles) > 0, inc, gofiles) - if len(out) > 0 { - b.showOutput(a.Package.Dir, a.Package.ImportPath, b.processOutput(out)) - if err != nil { - return errPrintedOutput - } - } - if err != nil { - return err - } - if ofile != a.Objpkg { - objects = append(objects, ofile) - } - - // Copy .h files named for goos or goarch or goos_goarch - // to names using GOOS and GOARCH. - // For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h. - _goos_goarch := "_" + cfg.Goos + "_" + cfg.Goarch - _goos := "_" + cfg.Goos - _goarch := "_" + cfg.Goarch - for _, file := range a.Package.HFiles { - name, ext := fileExtSplit(file) - switch { - case strings.HasSuffix(name, _goos_goarch): - targ := file[:len(name)-len(_goos_goarch)] + "_GOOS_GOARCH." + ext - if err := b.copyFile(a, obj+targ, filepath.Join(a.Package.Dir, file), 0666, true); err != nil { - return err - } - case strings.HasSuffix(name, _goarch): - targ := file[:len(name)-len(_goarch)] + "_GOARCH." + ext - if err := b.copyFile(a, obj+targ, filepath.Join(a.Package.Dir, file), 0666, true); err != nil { - return err - } - case strings.HasSuffix(name, _goos): - targ := file[:len(name)-len(_goos)] + "_GOOS." + ext - if err := b.copyFile(a, obj+targ, filepath.Join(a.Package.Dir, file), 0666, true); err != nil { - return err - } - } - } - - for _, file := range cfiles { - out := file[:len(file)-len(".c")] + ".o" - if err := BuildToolchain.cc(b, a.Package, obj, obj+out, file); err != nil { - return err - } - objects = append(objects, out) - } - - // Assemble .s files. - if len(sfiles) > 0 { - ofiles, err := BuildToolchain.asm(b, a.Package, obj, sfiles) - if err != nil { - return err - } - objects = append(objects, ofiles...) - } - - // NOTE(rsc): On Windows, it is critically important that the - // gcc-compiled objects (cgoObjects) be listed after the ordinary - // objects in the archive. I do not know why this is. - // https://golang.org/issue/2601 - objects = append(objects, cgoObjects...) - - // Add system object files. - for _, syso := range a.Package.SysoFiles { - objects = append(objects, filepath.Join(a.Package.Dir, syso)) - } - - // Pack into archive in obj directory. - // If the Go compiler wrote an archive, we only need to add the - // object files for non-Go sources to the archive. - // If the Go compiler wrote an archive and the package is entirely - // Go sources, there is no pack to execute at all. - if len(objects) > 0 { - if err := BuildToolchain.pack(b, a.Package, obj, a.Objpkg, objects); err != nil { - return err - } - } - - // Link if needed. - if a.Link { - // The compiler only cares about direct imports, but the - // linker needs the whole dependency tree. - all := ActionList(a) - all = all[:len(all)-1] // drop a - if err := BuildToolchain.ld(b, a, a.Target, all, a.Objpkg, objects); err != nil { - return err - } - } - - return nil -} - -// PkgconfigCmd returns a pkg-config binary name -// defaultPkgConfig is defined in zdefaultcc.go, written by cmd/dist. -func (b *Builder) PkgconfigCmd() string { - return envList("PKG_CONFIG", cfg.DefaultPkgConfig)[0] -} - -// splitPkgConfigOutput parses the pkg-config output into a slice of -// flags. pkg-config always uses \ to escape special characters. -func splitPkgConfigOutput(out []byte) []string { - if len(out) == 0 { - return nil - } - var flags []string - flag := make([]byte, len(out)) - r, w := 0, 0 - for r < len(out) { - switch out[r] { - case ' ', '\t', '\r', '\n': - if w > 0 { - flags = append(flags, string(flag[:w])) - } - w = 0 - case '\\': - r++ - fallthrough - default: - if r < len(out) { - flag[w] = out[r] - w++ - } - } - r++ - } - if w > 0 { - flags = append(flags, string(flag[:w])) - } - return flags -} - -// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package. -func (b *Builder) getPkgConfigFlags(p *load.Package) (cflags, ldflags []string, err error) { - if pkgs := p.CgoPkgConfig; len(pkgs) > 0 { - var out []byte - out, err = b.runOut(p.Dir, p.ImportPath, nil, b.PkgconfigCmd(), "--cflags", pkgs) - if err != nil { - b.showOutput(p.Dir, b.PkgconfigCmd()+" --cflags "+strings.Join(pkgs, " "), string(out)) - b.Print(err.Error() + "\n") - err = errPrintedOutput - return - } - if len(out) > 0 { - cflags = splitPkgConfigOutput(out) - } - out, err = b.runOut(p.Dir, p.ImportPath, nil, b.PkgconfigCmd(), "--libs", pkgs) - if err != nil { - b.showOutput(p.Dir, b.PkgconfigCmd()+" --libs "+strings.Join(pkgs, " "), string(out)) - b.Print(err.Error() + "\n") - err = errPrintedOutput - return - } - if len(out) > 0 { - ldflags = strings.Fields(string(out)) - } - } - return -} - -func (b *Builder) installShlibname(a *Action) error { - a1 := a.Deps[0] - err := ioutil.WriteFile(a.Target, []byte(filepath.Base(a1.Target)+"\n"), 0666) - if err != nil { - return err - } - if cfg.BuildX { - b.Showcmd("", "echo '%s' > %s # internal", filepath.Base(a1.Target), a.Target) - } - return nil -} - -func (b *Builder) linkShared(a *Action) (err error) { - allactions := ActionList(a) - allactions = allactions[:len(allactions)-1] - return BuildToolchain.ldShared(b, a.Deps, a.Target, allactions) -} - -// BuildInstallFunc is the action for installing a single package or executable. -func BuildInstallFunc(b *Builder, a *Action) (err error) { - defer func() { - if err != nil && err != errPrintedOutput { - err = fmt.Errorf("go install %s: %v", a.Package.ImportPath, err) - } - }() - a1 := a.Deps[0] - perm := os.FileMode(0666) - if a1.Link { - switch cfg.BuildBuildmode { - case "c-archive", "c-shared", "plugin": - default: - perm = 0777 - } - } - - // make target directory - dir, _ := filepath.Split(a.Target) - if dir != "" { - if err := b.Mkdir(dir); err != nil { - return err - } - } - - // remove object dir to keep the amount of - // garbage down in a large build. On an operating system - // with aggressive buffering, cleaning incrementally like - // this keeps the intermediate objects from hitting the disk. - if !cfg.BuildWork { - defer os.RemoveAll(a1.Objdir) - defer os.Remove(a1.Target) - } - - return b.moveOrCopyFile(a, a.Target, a1.Target, perm, false) -} - -// includeArgs returns the -I or -L directory list for access -// to the results of the list of actions. -func (b *Builder) includeArgs(flag string, all []*Action) []string { - inc := []string{} - incMap := map[string]bool{ - b.WorkDir: true, // handled later - cfg.GOROOTpkg: true, - "": true, // ignore empty strings - } - - // Look in the temporary space for results of test-specific actions. - // This is the $WORK/my/package/_test directory for the - // package being built, so there are few of these. - for _, a1 := range all { - if a1.Package == nil { - continue - } - if dir := a1.Pkgdir; dir != a1.Package.Internal.Build.PkgRoot && !incMap[dir] { - incMap[dir] = true - inc = append(inc, flag, dir) - } - } - - // Also look in $WORK for any non-test packages that have - // been built but not installed. - inc = append(inc, flag, b.WorkDir) - - // Finally, look in the installed package directories for each action. - // First add the package dirs corresponding to GOPATH entries - // in the original GOPATH order. - need := map[string]*build.Package{} - for _, a1 := range all { - if a1.Package != nil && a1.Pkgdir == a1.Package.Internal.Build.PkgRoot { - need[a1.Package.Internal.Build.Root] = a1.Package.Internal.Build - } - } - for _, root := range cfg.Gopath { - if p := need[root]; p != nil && !incMap[p.PkgRoot] { - incMap[p.PkgRoot] = true - inc = append(inc, flag, p.PkgTargetRoot) - } - } - - // Then add anything that's left. - for _, a1 := range all { - if a1.Package == nil { - continue - } - if dir := a1.Pkgdir; dir == a1.Package.Internal.Build.PkgRoot && !incMap[dir] { - incMap[dir] = true - inc = append(inc, flag, a1.Package.Internal.Build.PkgTargetRoot) - } - } - - return inc -} - -// moveOrCopyFile is like 'mv src dst' or 'cp src dst'. -func (b *Builder) moveOrCopyFile(a *Action, dst, src string, perm os.FileMode, force bool) error { - if cfg.BuildN { - b.Showcmd("", "mv %s %s", src, dst) - return nil - } - - // If we can update the mode and rename to the dst, do it. - // Otherwise fall back to standard copy. - - // If the destination directory has the group sticky bit set, - // we have to copy the file to retain the correct permissions. - // https://golang.org/issue/18878 - if fi, err := os.Stat(filepath.Dir(dst)); err == nil { - if fi.IsDir() && (fi.Mode()&os.ModeSetgid) != 0 { - return b.copyFile(a, dst, src, perm, force) - } - } - - // The perm argument is meant to be adjusted according to umask, - // but we don't know what the umask is. - // Create a dummy file to find out. - // This avoids build tags and works even on systems like Plan 9 - // where the file mask computation incorporates other information. - mode := perm - f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) - if err == nil { - fi, err := f.Stat() - if err == nil { - mode = fi.Mode() & 0777 - } - name := f.Name() - f.Close() - os.Remove(name) - } - - if err := os.Chmod(src, mode); err == nil { - if err := os.Rename(src, dst); err == nil { - if cfg.BuildX { - b.Showcmd("", "mv %s %s", src, dst) - } - return nil - } - } - - return b.copyFile(a, dst, src, perm, force) -} - -// copyFile is like 'cp src dst'. -func (b *Builder) copyFile(a *Action, dst, src string, perm os.FileMode, force bool) error { - if cfg.BuildN || cfg.BuildX { - b.Showcmd("", "cp %s %s", src, dst) - if cfg.BuildN { - return nil - } - } - - sf, err := os.Open(src) - if err != nil { - return err - } - defer sf.Close() - - // Be careful about removing/overwriting dst. - // Do not remove/overwrite if dst exists and is a directory - // or a non-object file. - if fi, err := os.Stat(dst); err == nil { - if fi.IsDir() { - return fmt.Errorf("build output %q already exists and is a directory", dst) - } - if !force && fi.Mode().IsRegular() && !isObject(dst) { - return fmt.Errorf("build output %q already exists and is not an object file", dst) - } - } - - // On Windows, remove lingering ~ file from last attempt. - if base.ToolIsWindows { - if _, err := os.Stat(dst + "~"); err == nil { - os.Remove(dst + "~") - } - } - - mayberemovefile(dst) - df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil && base.ToolIsWindows { - // Windows does not allow deletion of a binary file - // while it is executing. Try to move it out of the way. - // If the move fails, which is likely, we'll try again the - // next time we do an install of this binary. - if err := os.Rename(dst, dst+"~"); err == nil { - os.Remove(dst + "~") - } - df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - } - if err != nil { - return err - } - - _, err = io.Copy(df, sf) - df.Close() - if err != nil { - mayberemovefile(dst) - return fmt.Errorf("copying %s to %s: %v", src, dst, err) - } - return nil -} - -// Install the cgo export header file, if there is one. -func (b *Builder) installHeader(a *Action) error { - src := a.Objdir + "_cgo_install.h" - if _, err := os.Stat(src); os.IsNotExist(err) { - // If the file does not exist, there are no exported - // functions, and we do not install anything. - return nil - } - - dir, _ := filepath.Split(a.Target) - if dir != "" { - if err := b.Mkdir(dir); err != nil { - return err - } - } - - return b.moveOrCopyFile(a, a.Target, src, 0666, true) -} - -// cover runs, in effect, -// go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go -func (b *Builder) cover(a *Action, dst, src string, perm os.FileMode, varName string) error { - return b.run(a.Objdir, "cover "+a.Package.ImportPath, nil, - cfg.BuildToolexec, - base.Tool("cover"), - "-mode", a.Package.Internal.CoverMode, - "-var", varName, - "-o", dst, - src) -} - -var objectMagic = [][]byte{ - {'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive - {'\x7F', 'E', 'L', 'F'}, // ELF - {0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit - {0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit - {0xCE, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 32-bit - {0xCF, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 64-bit - {0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00}, // PE (Windows) as generated by 6l/8l and gcc - {0x00, 0x00, 0x01, 0xEB}, // Plan 9 i386 - {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64 - {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm -} - -func isObject(s string) bool { - f, err := os.Open(s) - if err != nil { - return false - } - defer f.Close() - buf := make([]byte, 64) - io.ReadFull(f, buf) - for _, magic := range objectMagic { - if bytes.HasPrefix(buf, magic) { - return true - } - } - return false -} - -// mayberemovefile removes a file only if it is a regular file -// When running as a user with sufficient privileges, we may delete -// even device files, for example, which is not intended. -func mayberemovefile(s string) { - if fi, err := os.Lstat(s); err == nil && !fi.Mode().IsRegular() { - return - } - os.Remove(s) -} - -// fmtcmd formats a command in the manner of fmt.Sprintf but also: -// -// If dir is non-empty and the script is not in dir right now, -// fmtcmd inserts "cd dir\n" before the command. -// -// fmtcmd replaces the value of b.WorkDir with $WORK. -// fmtcmd replaces the value of goroot with $GOROOT. -// fmtcmd replaces the value of b.gobin with $GOBIN. -// -// fmtcmd replaces the name of the current directory with dot (.) -// but only when it is at the beginning of a space-separated token. -// -func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string { - cmd := fmt.Sprintf(format, args...) - if dir != "" && dir != "/" { - cmd = strings.Replace(" "+cmd, " "+dir, " .", -1)[1:] - if b.scriptDir != dir { - b.scriptDir = dir - cmd = "cd " + dir + "\n" + cmd - } - } - if b.WorkDir != "" { - cmd = strings.Replace(cmd, b.WorkDir, "$WORK", -1) - } - return cmd -} - -// showcmd prints the given command to standard output -// for the implementation of -n or -x. -func (b *Builder) Showcmd(dir string, format string, args ...interface{}) { - b.output.Lock() - defer b.output.Unlock() - b.Print(b.fmtcmd(dir, format, args...) + "\n") -} - -// showOutput prints "# desc" followed by the given output. -// The output is expected to contain references to 'dir', usually -// the source directory for the package that has failed to build. -// showOutput rewrites mentions of dir with a relative path to dir -// when the relative path is shorter. This is usually more pleasant. -// For example, if fmt doesn't compile and we are in src/html, -// the output is -// -// $ go build -// # fmt -// ../fmt/print.go:1090: undefined: asdf -// $ -// -// instead of -// -// $ go build -// # fmt -// /usr/gopher/go/src/fmt/print.go:1090: undefined: asdf -// $ -// -// showOutput also replaces references to the work directory with $WORK. -// -func (b *Builder) showOutput(dir, desc, out string) { - prefix := "# " + desc - suffix := "\n" + out - if reldir := base.ShortPath(dir); reldir != dir { - suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1) - suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1) - } - suffix = strings.Replace(suffix, " "+b.WorkDir, " $WORK", -1) - - b.output.Lock() - defer b.output.Unlock() - b.Print(prefix, suffix) -} - -// errPrintedOutput is a special error indicating that a command failed -// but that it generated output as well, and that output has already -// been printed, so there's no point showing 'exit status 1' or whatever -// the wait status was. The main executor, builder.do, knows not to -// print this error. -var errPrintedOutput = errors.New("already printed output - no need to show error") - -var cgoLine = regexp.MustCompile(`\[[^\[\]]+\.cgo1\.go:[0-9]+(:[0-9]+)?\]`) -var cgoTypeSigRe = regexp.MustCompile(`\b_Ctype_\B`) - -// run runs the command given by cmdline in the directory dir. -// If the command fails, run prints information about the failure -// and returns a non-nil error. -func (b *Builder) run(dir string, desc string, env []string, cmdargs ...interface{}) error { - out, err := b.runOut(dir, desc, env, cmdargs...) - if len(out) > 0 { - if desc == "" { - desc = b.fmtcmd(dir, "%s", strings.Join(str.StringList(cmdargs...), " ")) - } - b.showOutput(dir, desc, b.processOutput(out)) - if err != nil { - err = errPrintedOutput - } - } - return err -} - -// processOutput prepares the output of runOut to be output to the console. -func (b *Builder) processOutput(out []byte) string { - if out[len(out)-1] != '\n' { - out = append(out, '\n') - } - messages := string(out) - // Fix up output referring to cgo-generated code to be more readable. - // Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19. - // Replace *[100]_Ctype_foo with *[100]C.foo. - // If we're using -x, assume we're debugging and want the full dump, so disable the rewrite. - if !cfg.BuildX && cgoLine.MatchString(messages) { - messages = cgoLine.ReplaceAllString(messages, "") - messages = cgoTypeSigRe.ReplaceAllString(messages, "C.") - } - return messages -} - -// runOut runs the command given by cmdline in the directory dir. -// It returns the command output and any errors that occurred. -func (b *Builder) runOut(dir string, desc string, env []string, cmdargs ...interface{}) ([]byte, error) { - cmdline := str.StringList(cmdargs...) - if cfg.BuildN || cfg.BuildX { - var envcmdline string - for i := range env { - envcmdline += env[i] - envcmdline += " " - } - envcmdline += joinUnambiguously(cmdline) - b.Showcmd(dir, "%s", envcmdline) - if cfg.BuildN { - return nil, nil - } - } - - nbusy := 0 - for { - var buf bytes.Buffer - cmd := exec.Command(cmdline[0], cmdline[1:]...) - cmd.Stdout = &buf - cmd.Stderr = &buf - cmd.Dir = dir - cmd.Env = base.MergeEnvLists(env, base.EnvForDir(cmd.Dir, os.Environ())) - err := cmd.Run() - - // cmd.Run will fail on Unix if some other process has the binary - // we want to run open for writing. This can happen here because - // we build and install the cgo command and then run it. - // If another command was kicked off while we were writing the - // cgo binary, the child process for that command may be holding - // a reference to the fd, keeping us from running exec. - // - // But, you might reasonably wonder, how can this happen? - // The cgo fd, like all our fds, is close-on-exec, so that we need - // not worry about other processes inheriting the fd accidentally. - // The answer is that running a command is fork and exec. - // A child forked while the cgo fd is open inherits that fd. - // Until the child has called exec, it holds the fd open and the - // kernel will not let us run cgo. Even if the child were to close - // the fd explicitly, it would still be open from the time of the fork - // until the time of the explicit close, and the race would remain. - // - // On Unix systems, this results in ETXTBSY, which formats - // as "text file busy". Rather than hard-code specific error cases, - // we just look for that string. If this happens, sleep a little - // and try again. We let this happen three times, with increasing - // sleep lengths: 100+200+400 ms = 0.7 seconds. - // - // An alternate solution might be to split the cmd.Run into - // separate cmd.Start and cmd.Wait, and then use an RWLock - // to make sure that copyFile only executes when no cmd.Start - // call is in progress. However, cmd.Start (really syscall.forkExec) - // only guarantees that when it returns, the exec is committed to - // happen and succeed. It uses a close-on-exec file descriptor - // itself to determine this, so we know that when cmd.Start returns, - // at least one close-on-exec file descriptor has been closed. - // However, we cannot be sure that all of them have been closed, - // so the program might still encounter ETXTBSY even with such - // an RWLock. The race window would be smaller, perhaps, but not - // guaranteed to be gone. - // - // Sleeping when we observe the race seems to be the most reliable - // option we have. - // - // https://golang.org/issue/3001 - // - if err != nil && nbusy < 3 && strings.Contains(err.Error(), "text file busy") { - time.Sleep(100 * time.Millisecond << uint(nbusy)) - nbusy++ - continue - } - - // err can be something like 'exit status 1'. - // Add information about what program was running. - // Note that if buf.Bytes() is non-empty, the caller usually - // shows buf.Bytes() and does not print err at all, so the - // prefix here does not make most output any more verbose. - if err != nil { - err = errors.New(cmdline[0] + ": " + err.Error()) - } - return buf.Bytes(), err - } -} - -// joinUnambiguously prints the slice, quoting where necessary to make the -// output unambiguous. -// TODO: See issue 5279. The printing of commands needs a complete redo. -func joinUnambiguously(a []string) string { - var buf bytes.Buffer - for i, s := range a { - if i > 0 { - buf.WriteByte(' ') - } - q := strconv.Quote(s) - if s == "" || strings.Contains(s, " ") || len(q) > len(s)+2 { - buf.WriteString(q) - } else { - buf.WriteString(s) - } - } - return buf.String() -} - -// mkdir makes the named directory. -func (b *Builder) Mkdir(dir string) error { - b.exec.Lock() - defer b.exec.Unlock() - // We can be a little aggressive about being - // sure directories exist. Skip repeated calls. - if b.mkdirCache[dir] { - return nil - } - b.mkdirCache[dir] = true - - if cfg.BuildN || cfg.BuildX { - b.Showcmd("", "mkdir -p %s", dir) - if cfg.BuildN { - return nil - } - } - - if err := os.MkdirAll(dir, 0777); err != nil { - return err - } - return nil -} - -// mkAbs returns an absolute path corresponding to -// evaluating f in the directory dir. -// We always pass absolute paths of source files so that -// the error messages will include the full path to a file -// in need of attention. -func mkAbs(dir, f string) string { - // Leave absolute paths alone. - // Also, during -n mode we use the pseudo-directory $WORK - // instead of creating an actual work directory that won't be used. - // Leave paths beginning with $WORK alone too. - if filepath.IsAbs(f) || strings.HasPrefix(f, "$WORK") { - return f - } - return filepath.Join(dir, f) -} - -type toolchain interface { - // gc runs the compiler in a specific directory on a set of files - // and returns the name of the generated output file. - gc(b *Builder, p *load.Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error) - // cc runs the toolchain's C compiler in a directory on a C file - // to produce an output file. - cc(b *Builder, p *load.Package, objdir, ofile, cfile string) error - // asm runs the assembler in a specific directory on specific files - // and returns a list of named output files. - asm(b *Builder, p *load.Package, obj string, sfiles []string) ([]string, error) - // pkgpath builds an appropriate path for a temporary package file. - Pkgpath(basedir string, p *load.Package) string - // pack runs the archive packer in a specific directory to create - // an archive from a set of object files. - // typically it is run in the object directory. - pack(b *Builder, p *load.Package, objDir, afile string, ofiles []string) error - // ld runs the linker to create an executable starting at mainpkg. - ld(b *Builder, root *Action, out string, allactions []*Action, mainpkg string, ofiles []string) error - // ldShared runs the linker to create a shared library containing the pkgs built by toplevelactions - ldShared(b *Builder, toplevelactions []*Action, out string, allactions []*Action) error - - compiler() string - linker() string -} - -type noToolchain struct{} - -func noCompiler() error { - log.Fatalf("unknown compiler %q", cfg.BuildContext.Compiler) - return nil -} - -func (noToolchain) compiler() string { - noCompiler() - return "" -} - -func (noToolchain) linker() string { - noCompiler() - return "" -} - -func (noToolchain) gc(b *Builder, p *load.Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error) { - return "", nil, noCompiler() -} - -func (noToolchain) asm(b *Builder, p *load.Package, obj string, sfiles []string) ([]string, error) { - return nil, noCompiler() -} - -func (noToolchain) Pkgpath(basedir string, p *load.Package) string { - noCompiler() - return "" -} - -func (noToolchain) pack(b *Builder, p *load.Package, objDir, afile string, ofiles []string) error { - return noCompiler() -} - -func (noToolchain) ld(b *Builder, root *Action, out string, allactions []*Action, mainpkg string, ofiles []string) error { - return noCompiler() -} - -func (noToolchain) ldShared(b *Builder, toplevelactions []*Action, out string, allactions []*Action) error { - return noCompiler() -} - -func (noToolchain) cc(b *Builder, p *load.Package, objdir, ofile, cfile string) error { - return noCompiler() -} - -// The Go toolchain. -type gcToolchain struct{} - -func (gcToolchain) compiler() string { - return base.Tool("compile") -} - -func (gcToolchain) linker() string { - return base.Tool("link") -} - -func (gcToolchain) gc(b *Builder, p *load.Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) { - if archive != "" { - ofile = archive - } else { - out := "_go_.o" - ofile = obj + out - } - - gcargs := []string{"-p", p.ImportPath} - if p.Name == "main" { - gcargs[1] = "main" - } - if p.Standard { - gcargs = append(gcargs, "-std") - } - compilingRuntime := p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) - if compilingRuntime { - // runtime compiles with a special gc flag to emit - // additional reflect type data. - gcargs = append(gcargs, "-+") - } - - // If we're giving the compiler the entire package (no C etc files), tell it that, - // so that it can give good error messages about forward declarations. - // Exceptions: a few standard packages have forward declarations for - // pieces supplied behind-the-scenes by package runtime. - extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.FFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles) - if p.Standard { - switch p.ImportPath { - case "bytes", "internal/poll", "net", "os", "runtime/pprof", "sync", "syscall", "time": - extFiles++ - } - } - if extFiles == 0 { - gcargs = append(gcargs, "-complete") - } - if cfg.BuildContext.InstallSuffix != "" { - gcargs = append(gcargs, "-installsuffix", cfg.BuildContext.InstallSuffix) - } - if p.Internal.BuildID != "" { - gcargs = append(gcargs, "-buildid", p.Internal.BuildID) - } - platform := cfg.Goos + "/" + cfg.Goarch - if p.Internal.OmitDebug || platform == "nacl/amd64p32" || platform == "darwin/arm" || platform == "darwin/arm64" || cfg.Goos == "plan9" { - gcargs = append(gcargs, "-dwarf=false") - } - - for _, path := range p.Imports { - if i := strings.LastIndex(path, "/vendor/"); i >= 0 { - gcargs = append(gcargs, "-importmap", path[i+len("/vendor/"):]+"="+path) - } else if strings.HasPrefix(path, "vendor/") { - gcargs = append(gcargs, "-importmap", path[len("vendor/"):]+"="+path) - } - } - - gcflags := buildGcflags - if compilingRuntime { - // Remove -N, if present. - // It is not possible to build the runtime with no optimizations, - // because the compiler cannot eliminate enough write barriers. - gcflags = make([]string, len(buildGcflags)) - copy(gcflags, buildGcflags) - for i := 0; i < len(gcflags); i++ { - if gcflags[i] == "-N" { - copy(gcflags[i:], gcflags[i+1:]) - gcflags = gcflags[:len(gcflags)-1] - i-- - } - } - } - args := []interface{}{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", b.WorkDir, gcflags, gcargs, "-D", p.Internal.LocalPrefix, importArgs} - if ofile == archive { - args = append(args, "-pack") - } - if asmhdr { - args = append(args, "-asmhdr", obj+"go_asm.h") - } - - // Add -c=N to use concurrent backend compilation, if possible. - if c := gcBackendConcurrency(gcflags); c > 1 { - args = append(args, fmt.Sprintf("-c=%d", c)) - } - - for _, f := range gofiles { - args = append(args, mkAbs(p.Dir, f)) - } - - output, err = b.runOut(p.Dir, p.ImportPath, nil, args...) - return ofile, output, err -} - -// gcBackendConcurrency returns the backend compiler concurrency level for a package compilation. -func gcBackendConcurrency(gcflags []string) int { - // First, check whether we can use -c at all for this compilation. - canDashC := concurrentGCBackendCompilationEnabledByDefault - - switch e := os.Getenv("GO19CONCURRENTCOMPILATION"); e { - case "0": - canDashC = false - case "1": - canDashC = true - case "": - // Not set. Use default. - default: - log.Fatalf("GO19CONCURRENTCOMPILATION must be 0, 1, or unset, got %q", e) - } - - if os.Getenv("GOEXPERIMENT") != "" { - // Concurrent compilation is presumed incompatible with GOEXPERIMENTs. - canDashC = false - } - -CheckFlags: - for _, flag := range gcflags { - // Concurrent compilation is presumed incompatible with any gcflags, - // except for a small whitelist of commonly used flags. - // If the user knows better, they can manually add their own -c to the gcflags. - switch flag { - case "-N", "-l", "-S", "-B", "-C", "-I": - // OK - default: - canDashC = false - break CheckFlags - } - } - - if !canDashC { - return 1 - } - - // Decide how many concurrent backend compilations to allow. - // - // If we allow too many, in theory we might end up with p concurrent processes, - // each with c concurrent backend compiles, all fighting over the same resources. - // However, in practice, that seems not to happen too much. - // Most build graphs are surprisingly serial, so p==1 for much of the build. - // Furthermore, concurrent backend compilation is only enabled for a part - // of the overall compiler execution, so c==1 for much of the build. - // So don't worry too much about that interaction for now. - // - // However, in practice, setting c above 4 tends not to help very much. - // See the analysis in CL 41192. - // - // TODO(josharian): attempt to detect whether this particular compilation - // is likely to be a bottleneck, e.g. when: - // - it has no successor packages to compile (usually package main) - // - all paths through the build graph pass through it - // - critical path scheduling says it is high priority - // and in such a case, set c to runtime.NumCPU. - // We do this now when p==1. - if cfg.BuildP == 1 { - // No process parallelism. Max out c. - return runtime.NumCPU() - } - // Some process parallelism. Set c to min(4, numcpu). - c := 4 - if ncpu := runtime.NumCPU(); ncpu < c { - c = ncpu - } - return c -} - -func (gcToolchain) asm(b *Builder, p *load.Package, obj string, sfiles []string) ([]string, error) { - // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files. - inc := filepath.Join(cfg.GOROOT, "pkg", "include") - args := []interface{}{cfg.BuildToolexec, base.Tool("asm"), "-trimpath", b.WorkDir, "-I", obj, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, buildAsmflags} - if p.ImportPath == "runtime" && cfg.Goarch == "386" { - for _, arg := range buildAsmflags { - if arg == "-dynlink" { - args = append(args, "-D=GOBUILDMODE_shared=1") - } - } - } - var ofiles []string - for _, sfile := range sfiles { - ofile := obj + sfile[:len(sfile)-len(".s")] + ".o" - ofiles = append(ofiles, ofile) - a := append(args, "-o", ofile, mkAbs(p.Dir, sfile)) - if err := b.run(p.Dir, p.ImportPath, nil, a...); err != nil { - return nil, err - } - } - return ofiles, nil -} - -// toolVerify checks that the command line args writes the same output file -// if run using newTool instead. -// Unused now but kept around for future use. -func toolVerify(b *Builder, p *load.Package, newTool string, ofile string, args []interface{}) error { - newArgs := make([]interface{}, len(args)) - copy(newArgs, args) - newArgs[1] = base.Tool(newTool) - newArgs[3] = ofile + ".new" // x.6 becomes x.6.new - if err := b.run(p.Dir, p.ImportPath, nil, newArgs...); err != nil { - return err - } - data1, err := ioutil.ReadFile(ofile) - if err != nil { - return err - } - data2, err := ioutil.ReadFile(ofile + ".new") - if err != nil { - return err - } - if !bytes.Equal(data1, data2) { - return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(str.StringList(args...), " "), strings.Join(str.StringList(newArgs...), " ")) - } - os.Remove(ofile + ".new") - return nil -} - -func (gcToolchain) Pkgpath(basedir string, p *load.Package) string { - end := filepath.FromSlash(p.ImportPath + ".a") - return filepath.Join(basedir, end) -} - -func (gcToolchain) pack(b *Builder, p *load.Package, objDir, afile string, ofiles []string) error { - var absOfiles []string - for _, f := range ofiles { - absOfiles = append(absOfiles, mkAbs(objDir, f)) - } - absAfile := mkAbs(objDir, afile) - - // The archive file should have been created by the compiler. - // Since it used to not work that way, verify. - if !cfg.BuildN { - if _, err := os.Stat(absAfile); err != nil { - base.Fatalf("os.Stat of archive file failed: %v", err) - } - } - - if cfg.BuildN || cfg.BuildX { - cmdline := str.StringList("pack", "r", absAfile, absOfiles) - b.Showcmd(p.Dir, "%s # internal", joinUnambiguously(cmdline)) - } - if cfg.BuildN { - return nil - } - if err := packInternal(b, absAfile, absOfiles); err != nil { - b.showOutput(p.Dir, p.ImportPath, err.Error()+"\n") - return errPrintedOutput - } - return nil -} - -func packInternal(b *Builder, afile string, ofiles []string) error { - dst, err := os.OpenFile(afile, os.O_WRONLY|os.O_APPEND, 0) - if err != nil { - return err - } - defer dst.Close() // only for error returns or panics - w := bufio.NewWriter(dst) - - for _, ofile := range ofiles { - src, err := os.Open(ofile) - if err != nil { - return err - } - fi, err := src.Stat() - if err != nil { - src.Close() - return err - } - // Note: Not using %-16.16s format because we care - // about bytes, not runes. - name := fi.Name() - if len(name) > 16 { - name = name[:16] - } else { - name += strings.Repeat(" ", 16-len(name)) - } - size := fi.Size() - fmt.Fprintf(w, "%s%-12d%-6d%-6d%-8o%-10d`\n", - name, 0, 0, 0, 0644, size) - n, err := io.Copy(w, src) - src.Close() - if err == nil && n < size { - err = io.ErrUnexpectedEOF - } else if err == nil && n > size { - err = fmt.Errorf("file larger than size reported by stat") - } - if err != nil { - return fmt.Errorf("copying %s to %s: %v", ofile, afile, err) - } - if size&1 != 0 { - w.WriteByte(0) - } - } - - if err := w.Flush(); err != nil { - return err - } - return dst.Close() -} - -// setextld sets the appropriate linker flags for the specified compiler. -func setextld(ldflags []string, compiler []string) []string { - for _, f := range ldflags { - if f == "-extld" || strings.HasPrefix(f, "-extld=") { - // don't override -extld if supplied - return ldflags - } - } - ldflags = append(ldflags, "-extld="+compiler[0]) - if len(compiler) > 1 { - extldflags := false - add := strings.Join(compiler[1:], " ") - for i, f := range ldflags { - if f == "-extldflags" && i+1 < len(ldflags) { - ldflags[i+1] = add + " " + ldflags[i+1] - extldflags = true - break - } else if strings.HasPrefix(f, "-extldflags=") { - ldflags[i] = "-extldflags=" + add + " " + ldflags[i][len("-extldflags="):] - extldflags = true - break - } - } - if !extldflags { - ldflags = append(ldflags, "-extldflags="+add) - } - } - return ldflags -} - -func (gcToolchain) ld(b *Builder, root *Action, out string, allactions []*Action, mainpkg string, ofiles []string) error { - importArgs := b.includeArgs("-L", allactions) - cxx := len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0 - for _, a := range allactions { - if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) { - cxx = true - } - } - var ldflags []string - if cfg.BuildContext.InstallSuffix != "" { - ldflags = append(ldflags, "-installsuffix", cfg.BuildContext.InstallSuffix) - } - if root.Package.Internal.OmitDebug { - ldflags = append(ldflags, "-s", "-w") - } - if cfg.BuildBuildmode == "plugin" { - pluginpath := root.Package.ImportPath - if pluginpath == "command-line-arguments" { - pluginpath = "plugin/unnamed-" + root.Package.Internal.BuildID - } - ldflags = append(ldflags, "-pluginpath", pluginpath) - } - - // If the user has not specified the -extld option, then specify the - // appropriate linker. In case of C++ code, use the compiler named - // by the CXX environment variable or defaultCXX if CXX is not set. - // Else, use the CC environment variable and defaultCC as fallback. - var compiler []string - if cxx { - compiler = envList("CXX", cfg.DefaultCXX) - } else { - compiler = envList("CC", cfg.DefaultCC) - } - ldflags = setextld(ldflags, compiler) - ldflags = append(ldflags, "-buildmode="+ldBuildmode) - if root.Package.Internal.BuildID != "" { - ldflags = append(ldflags, "-buildid="+root.Package.Internal.BuildID) - } - ldflags = append(ldflags, cfg.BuildLdflags...) - - // On OS X when using external linking to build a shared library, - // the argument passed here to -o ends up recorded in the final - // shared library in the LC_ID_DYLIB load command. - // To avoid putting the temporary output directory name there - // (and making the resulting shared library useless), - // run the link in the output directory so that -o can name - // just the final path element. - dir := "." - if cfg.Goos == "darwin" && cfg.BuildBuildmode == "c-shared" { - dir, out = filepath.Split(out) - } - - return b.run(dir, root.Package.ImportPath, nil, cfg.BuildToolexec, base.Tool("link"), "-o", out, importArgs, ldflags, mainpkg) -} - -func (gcToolchain) ldShared(b *Builder, toplevelactions []*Action, out string, allactions []*Action) error { - importArgs := b.includeArgs("-L", allactions) - ldflags := []string{"-installsuffix", cfg.BuildContext.InstallSuffix} - ldflags = append(ldflags, "-buildmode=shared") - ldflags = append(ldflags, cfg.BuildLdflags...) - cxx := false - for _, a := range allactions { - if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) { - cxx = true - } - } - // If the user has not specified the -extld option, then specify the - // appropriate linker. In case of C++ code, use the compiler named - // by the CXX environment variable or defaultCXX if CXX is not set. - // Else, use the CC environment variable and defaultCC as fallback. - var compiler []string - if cxx { - compiler = envList("CXX", cfg.DefaultCXX) - } else { - compiler = envList("CC", cfg.DefaultCC) - } - ldflags = setextld(ldflags, compiler) - for _, d := range toplevelactions { - if !strings.HasSuffix(d.Target, ".a") { // omit unsafe etc and actions for other shared libraries - continue - } - ldflags = append(ldflags, d.Package.ImportPath+"="+d.Target) - } - return b.run(".", out, nil, cfg.BuildToolexec, base.Tool("link"), "-o", out, importArgs, ldflags) -} - -func (gcToolchain) cc(b *Builder, p *load.Package, objdir, ofile, cfile string) error { - return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(p.Dir, cfile)) -} - -// The Gccgo toolchain. -type gccgoToolchain struct{} - -var GccgoName, GccgoBin string -var gccgoErr error - -func init() { - GccgoName = os.Getenv("GCCGO") - if GccgoName == "" { - GccgoName = "gccgo" - } - GccgoBin, gccgoErr = exec.LookPath(GccgoName) -} - -func (gccgoToolchain) compiler() string { - checkGccgoBin() - return GccgoBin -} - -func (gccgoToolchain) linker() string { - checkGccgoBin() - return GccgoBin -} - -func checkGccgoBin() { - if gccgoErr == nil { - return - } - fmt.Fprintf(os.Stderr, "cmd/go: gccgo: %s\n", gccgoErr) - os.Exit(2) -} - -func (tools gccgoToolchain) gc(b *Builder, p *load.Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) { - out := "_go_.o" - ofile = obj + out - gcargs := []string{"-g"} - gcargs = append(gcargs, b.gccArchArgs()...) - if pkgpath := gccgoPkgpath(p); pkgpath != "" { - gcargs = append(gcargs, "-fgo-pkgpath="+pkgpath) - } - if p.Internal.LocalPrefix != "" { - gcargs = append(gcargs, "-fgo-relative-import-path="+p.Internal.LocalPrefix) - } - - // Handle vendor directories - savedirs := []string{} - for _, incdir := range importArgs { - if incdir != "-I" { - savedirs = append(savedirs, incdir) - } - } - - for _, path := range p.Imports { - // If this is a new vendor path, add it to the list of importArgs - if i := strings.LastIndex(path, "/vendor"); i >= 0 { - for _, dir := range savedirs { - // Check if the vendor path is already included in dir - if strings.HasSuffix(dir, path[:i+len("/vendor")]) { - continue - } - // Make sure this vendor path is not already in the list for importArgs - vendorPath := dir + "/" + path[:i+len("/vendor")] - for _, imp := range importArgs { - if imp == "-I" { - continue - } - // This vendorPath is already in the list - if imp == vendorPath { - goto nextSuffixPath - } - } - // New vendorPath not yet in the importArgs list, so add it - importArgs = append(importArgs, "-I", vendorPath) - nextSuffixPath: - } - } else if strings.HasPrefix(path, "vendor/") { - for _, dir := range savedirs { - // Make sure this vendor path is not already in the list for importArgs - vendorPath := dir + "/" + path[len("/vendor"):] - for _, imp := range importArgs { - if imp == "-I" { - continue - } - if imp == vendorPath { - goto nextPrefixPath - } - } - // This vendor path is needed and not already in the list, so add it - importArgs = append(importArgs, "-I", vendorPath) - nextPrefixPath: - } - } - } - - args := str.StringList(tools.compiler(), importArgs, "-c", gcargs, "-o", ofile, buildGccgoflags) - for _, f := range gofiles { - args = append(args, mkAbs(p.Dir, f)) - } - - output, err = b.runOut(p.Dir, p.ImportPath, nil, args) - return ofile, output, err -} - -func (tools gccgoToolchain) asm(b *Builder, p *load.Package, obj string, sfiles []string) ([]string, error) { - var ofiles []string - for _, sfile := range sfiles { - ofile := obj + sfile[:len(sfile)-len(".s")] + ".o" - ofiles = append(ofiles, ofile) - sfile = mkAbs(p.Dir, sfile) - defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch} - if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { - defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath) - } - defs = tools.maybePIC(defs) - defs = append(defs, b.gccArchArgs()...) - err := b.run(p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", obj, "-c", "-o", ofile, defs, sfile) - if err != nil { - return nil, err - } - } - return ofiles, nil -} - -func (gccgoToolchain) Pkgpath(basedir string, p *load.Package) string { - end := filepath.FromSlash(p.ImportPath + ".a") - afile := filepath.Join(basedir, end) - // add "lib" to the final element - return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile)) -} - -func (gccgoToolchain) pack(b *Builder, p *load.Package, objDir, afile string, ofiles []string) error { - var absOfiles []string - for _, f := range ofiles { - absOfiles = append(absOfiles, mkAbs(objDir, f)) - } - return b.run(p.Dir, p.ImportPath, nil, "ar", "rc", mkAbs(objDir, afile), absOfiles) -} - -func (tools gccgoToolchain) link(b *Builder, root *Action, out string, allactions []*Action, mainpkg string, ofiles []string, buildmode, desc string) error { - // gccgo needs explicit linking with all package dependencies, - // and all LDFLAGS from cgo dependencies. - apackagePathsSeen := make(map[string]bool) - afiles := []string{} - shlibs := []string{} - ldflags := b.gccArchArgs() - cgoldflags := []string{} - usesCgo := false - cxx := false - objc := false - fortran := false - if root.Package != nil { - cxx = len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0 - objc = len(root.Package.MFiles) > 0 - fortran = len(root.Package.FFiles) > 0 - } - - readCgoFlags := func(flagsFile string) error { - flags, err := ioutil.ReadFile(flagsFile) - if err != nil { - return err - } - const ldflagsPrefix = "_CGO_LDFLAGS=" - for _, line := range strings.Split(string(flags), "\n") { - if strings.HasPrefix(line, ldflagsPrefix) { - newFlags := strings.Fields(line[len(ldflagsPrefix):]) - for _, flag := range newFlags { - // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS - // but they don't mean anything to the linker so filter - // them out. - if flag != "-g" && !strings.HasPrefix(flag, "-O") { - cgoldflags = append(cgoldflags, flag) - } - } - } - } - return nil - } - - readAndRemoveCgoFlags := func(archive string) (string, error) { - newa, err := ioutil.TempFile(b.WorkDir, filepath.Base(archive)) - if err != nil { - return "", err - } - olda, err := os.Open(archive) - if err != nil { - return "", err - } - _, err = io.Copy(newa, olda) - if err != nil { - return "", err - } - err = olda.Close() - if err != nil { - return "", err - } - err = newa.Close() - if err != nil { - return "", err - } - - newarchive := newa.Name() - err = b.run(b.WorkDir, desc, nil, "ar", "x", newarchive, "_cgo_flags") - if err != nil { - return "", err - } - err = b.run(".", desc, nil, "ar", "d", newarchive, "_cgo_flags") - if err != nil { - return "", err - } - err = readCgoFlags(filepath.Join(b.WorkDir, "_cgo_flags")) - if err != nil { - return "", err - } - return newarchive, nil - } - - actionsSeen := make(map[*Action]bool) - // Make a pre-order depth-first traversal of the action graph, taking note of - // whether a shared library action has been seen on the way to an action (the - // construction of the graph means that if any path to a node passes through - // a shared library action, they all do). - var walk func(a *Action, seenShlib bool) - var err error - walk = func(a *Action, seenShlib bool) { - if actionsSeen[a] { - return - } - actionsSeen[a] = true - if a.Package != nil && !seenShlib { - if a.Package.Standard { - return - } - // We record the target of the first time we see a .a file - // for a package to make sure that we prefer the 'install' - // rather than the 'build' location (which may not exist any - // more). We still need to traverse the dependencies of the - // build action though so saying - // if apackagePathsSeen[a.Package.ImportPath] { return } - // doesn't work. - if !apackagePathsSeen[a.Package.ImportPath] { - apackagePathsSeen[a.Package.ImportPath] = true - target := a.Target - if len(a.Package.CgoFiles) > 0 || a.Package.UsesSwig() { - target, err = readAndRemoveCgoFlags(target) - if err != nil { - return - } - } - afiles = append(afiles, target) - } - } - if strings.HasSuffix(a.Target, ".so") { - shlibs = append(shlibs, a.Target) - seenShlib = true - } - for _, a1 := range a.Deps { - walk(a1, seenShlib) - if err != nil { - return - } - } - } - for _, a1 := range root.Deps { - walk(a1, false) - if err != nil { - return err - } - } - - for _, a := range allactions { - // Gather CgoLDFLAGS, but not from standard packages. - // The go tool can dig up runtime/cgo from GOROOT and - // think that it should use its CgoLDFLAGS, but gccgo - // doesn't use runtime/cgo. - if a.Package == nil { - continue - } - if !a.Package.Standard { - cgoldflags = append(cgoldflags, a.Package.CgoLDFLAGS...) - } - if len(a.Package.CgoFiles) > 0 { - usesCgo = true - } - if a.Package.UsesSwig() { - usesCgo = true - } - if len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0 { - cxx = true - } - if len(a.Package.MFiles) > 0 { - objc = true - } - if len(a.Package.FFiles) > 0 { - fortran = true - } - } - - for i, o := range ofiles { - if filepath.Base(o) == "_cgo_flags" { - readCgoFlags(o) - ofiles = append(ofiles[:i], ofiles[i+1:]...) - break - } - } - - ldflags = append(ldflags, "-Wl,--whole-archive") - ldflags = append(ldflags, afiles...) - ldflags = append(ldflags, "-Wl,--no-whole-archive") - - ldflags = append(ldflags, cgoldflags...) - ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...) - if root.Package != nil { - ldflags = append(ldflags, root.Package.CgoLDFLAGS...) - } - - ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)") - - for _, shlib := range shlibs { - ldflags = append( - ldflags, - "-L"+filepath.Dir(shlib), - "-Wl,-rpath="+filepath.Dir(shlib), - "-l"+strings.TrimSuffix( - strings.TrimPrefix(filepath.Base(shlib), "lib"), - ".so")) - } - - var realOut string - switch buildmode { - case "exe": - if usesCgo && cfg.Goos == "linux" { - ldflags = append(ldflags, "-Wl,-E") - } - - case "c-archive": - // Link the Go files into a single .o, and also link - // in -lgolibbegin. - // - // We need to use --whole-archive with -lgolibbegin - // because it doesn't define any symbols that will - // cause the contents to be pulled in; it's just - // initialization code. - // - // The user remains responsible for linking against - // -lgo -lpthread -lm in the final link. We can't use - // -r to pick them up because we can't combine - // split-stack and non-split-stack code in a single -r - // link, and libgo picks up non-split-stack code from - // libffi. - ldflags = append(ldflags, "-Wl,-r", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive") - - if nopie := b.gccNoPie(); nopie != "" { - ldflags = append(ldflags, nopie) - } - - // We are creating an object file, so we don't want a build ID. - ldflags = b.disableBuildID(ldflags) - - realOut = out - out = out + ".o" - - case "c-shared": - ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc") - case "shared": - ldflags = append(ldflags, "-zdefs", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc") - - default: - base.Fatalf("-buildmode=%s not supported for gccgo", buildmode) - } - - switch buildmode { - case "exe", "c-shared": - if cxx { - ldflags = append(ldflags, "-lstdc++") - } - if objc { - ldflags = append(ldflags, "-lobjc") - } - if fortran { - fc := os.Getenv("FC") - if fc == "" { - fc = "gfortran" - } - // support gfortran out of the box and let others pass the correct link options - // via CGO_LDFLAGS - if strings.Contains(fc, "gfortran") { - ldflags = append(ldflags, "-lgfortran") - } - } - } - - if err := b.run(".", desc, nil, tools.linker(), "-o", out, ofiles, ldflags, buildGccgoflags); err != nil { - return err - } - - switch buildmode { - case "c-archive": - if err := b.run(".", desc, nil, "ar", "rc", realOut, out); err != nil { - return err - } - } - return nil -} - -func (tools gccgoToolchain) ld(b *Builder, root *Action, out string, allactions []*Action, mainpkg string, ofiles []string) error { - return tools.link(b, root, out, allactions, mainpkg, ofiles, ldBuildmode, root.Package.ImportPath) -} - -func (tools gccgoToolchain) ldShared(b *Builder, toplevelactions []*Action, out string, allactions []*Action) error { - fakeRoot := &Action{} - fakeRoot.Deps = toplevelactions - return tools.link(b, fakeRoot, out, allactions, "", nil, "shared", out) -} - -func (tools gccgoToolchain) cc(b *Builder, p *load.Package, objdir, ofile, cfile string) error { - inc := filepath.Join(cfg.GOROOT, "pkg", "include") - cfile = mkAbs(p.Dir, cfile) - defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch} - defs = append(defs, b.gccArchArgs()...) - if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { - defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`) - } - switch cfg.Goarch { - case "386", "amd64": - defs = append(defs, "-fsplit-stack") - } - defs = tools.maybePIC(defs) - return b.run(p.Dir, p.ImportPath, nil, envList("CC", cfg.DefaultCC), "-Wall", "-g", - "-I", objdir, "-I", inc, "-o", ofile, defs, "-c", cfile) -} - -// maybePIC adds -fPIC to the list of arguments if needed. -func (tools gccgoToolchain) maybePIC(args []string) []string { - switch cfg.BuildBuildmode { - case "c-shared", "shared", "plugin": - args = append(args, "-fPIC") - } - return args -} - -func gccgoPkgpath(p *load.Package) string { - if p.Internal.Build.IsCommand() && !p.Internal.ForceLibrary { - return "" - } - return p.ImportPath -} - -func gccgoCleanPkgpath(p *load.Package) string { - clean := func(r rune) rune { - switch { - case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z', - '0' <= r && r <= '9': - return r - } - return '_' - } - return strings.Map(clean, gccgoPkgpath(p)) -} - -// gcc runs the gcc C compiler to create an object from a single C file. -func (b *Builder) gcc(p *load.Package, out string, flags []string, cfile string) error { - return b.ccompile(p, out, flags, cfile, b.GccCmd(p.Dir)) -} - -// gxx runs the g++ C++ compiler to create an object from a single C++ file. -func (b *Builder) gxx(p *load.Package, out string, flags []string, cxxfile string) error { - return b.ccompile(p, out, flags, cxxfile, b.GxxCmd(p.Dir)) -} - -// gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file. -func (b *Builder) gfortran(p *load.Package, out string, flags []string, ffile string) error { - return b.ccompile(p, out, flags, ffile, b.gfortranCmd(p.Dir)) -} - -// ccompile runs the given C or C++ compiler and creates an object from a single source file. -func (b *Builder) ccompile(p *load.Package, outfile string, flags []string, file string, compiler []string) error { - file = mkAbs(p.Dir, file) - desc := p.ImportPath - if !filepath.IsAbs(outfile) { - outfile = filepath.Join(p.Dir, outfile) - } - output, err := b.runOut(filepath.Dir(file), desc, nil, compiler, flags, "-o", outfile, "-c", filepath.Base(file)) - if len(output) > 0 { - // On FreeBSD 11, when we pass -g to clang 3.8 it - // invokes its internal assembler with -dwarf-version=2. - // When it sees .section .note.GNU-stack, it warns - // "DWARF2 only supports one section per compilation unit". - // This warning makes no sense, since the section is empty, - // but it confuses people. - // We work around the problem by detecting the warning - // and dropping -g and trying again. - if bytes.Contains(output, []byte("DWARF2 only supports one section per compilation unit")) { - newFlags := make([]string, 0, len(flags)) - for _, f := range flags { - if !strings.HasPrefix(f, "-g") { - newFlags = append(newFlags, f) - } - } - if len(newFlags) < len(flags) { - return b.ccompile(p, outfile, newFlags, file, compiler) - } - } - - b.showOutput(p.Dir, desc, b.processOutput(output)) - if err != nil { - err = errPrintedOutput - } else if os.Getenv("GO_BUILDER_NAME") != "" { - return errors.New("C compiler warning promoted to error on Go builders") - } - } - return err -} - -// gccld runs the gcc linker to create an executable from a set of object files. -func (b *Builder) gccld(p *load.Package, out string, flags []string, obj []string) error { - var cmd []string - if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 { - cmd = b.GxxCmd(p.Dir) - } else { - cmd = b.GccCmd(p.Dir) - } - return b.run(p.Dir, p.ImportPath, nil, cmd, "-o", out, obj, flags) -} - -// gccCmd returns a gcc command line prefix -// defaultCC is defined in zdefaultcc.go, written by cmd/dist. -func (b *Builder) GccCmd(objdir string) []string { - return b.ccompilerCmd("CC", cfg.DefaultCC, objdir) -} - -// gxxCmd returns a g++ command line prefix -// defaultCXX is defined in zdefaultcc.go, written by cmd/dist. -func (b *Builder) GxxCmd(objdir string) []string { - return b.ccompilerCmd("CXX", cfg.DefaultCXX, objdir) -} - -// gfortranCmd returns a gfortran command line prefix. -func (b *Builder) gfortranCmd(objdir string) []string { - return b.ccompilerCmd("FC", "gfortran", objdir) -} - -// ccompilerCmd returns a command line prefix for the given environment -// variable and using the default command when the variable is empty. -func (b *Builder) ccompilerCmd(envvar, defcmd, objdir string) []string { - // NOTE: env.go's mkEnv knows that the first three - // strings returned are "gcc", "-I", objdir (and cuts them off). - - compiler := envList(envvar, defcmd) - a := []string{compiler[0], "-I", objdir} - a = append(a, compiler[1:]...) - - // Definitely want -fPIC but on Windows gcc complains - // "-fPIC ignored for target (all code is position independent)" - if cfg.Goos != "windows" { - a = append(a, "-fPIC") - } - a = append(a, b.gccArchArgs()...) - // gcc-4.5 and beyond require explicit "-pthread" flag - // for multithreading with pthread library. - if cfg.BuildContext.CgoEnabled { - switch cfg.Goos { - case "windows": - a = append(a, "-mthreads") - default: - a = append(a, "-pthread") - } - } - - if strings.Contains(a[0], "clang") { - // disable ASCII art in clang errors, if possible - a = append(a, "-fno-caret-diagnostics") - // clang is too smart about command-line arguments - a = append(a, "-Qunused-arguments") - } - - // disable word wrapping in error messages - a = append(a, "-fmessage-length=0") - - // Tell gcc not to include the work directory in object files. - if b.gccSupportsFlag("-fdebug-prefix-map=a=b") { - a = append(a, "-fdebug-prefix-map="+b.WorkDir+"=/tmp/go-build") - } - - // Tell gcc not to include flags in object files, which defeats the - // point of -fdebug-prefix-map above. - if b.gccSupportsFlag("-gno-record-gcc-switches") { - a = append(a, "-gno-record-gcc-switches") - } - - // On OS X, some of the compilers behave as if -fno-common - // is always set, and the Mach-O linker in 6l/8l assumes this. - // See https://golang.org/issue/3253. - if cfg.Goos == "darwin" { - a = append(a, "-fno-common") - } - - return a -} - -// gccNoPie returns the flag to use to request non-PIE. On systems -// with PIE (position independent executables) enabled by default, -// -no-pie must be passed when doing a partial link with -Wl,-r. -// But -no-pie is not supported by all compilers, and clang spells it -nopie. -func (b *Builder) gccNoPie() string { - if b.gccSupportsFlag("-no-pie") { - return "-no-pie" - } - if b.gccSupportsFlag("-nopie") { - return "-nopie" - } - return "" -} - -// gccSupportsFlag checks to see if the compiler supports a flag. -func (b *Builder) gccSupportsFlag(flag string) bool { - b.exec.Lock() - defer b.exec.Unlock() - if b, ok := b.flagCache[flag]; ok { - return b - } - if b.flagCache == nil { - src := filepath.Join(b.WorkDir, "trivial.c") - if err := ioutil.WriteFile(src, []byte{}, 0666); err != nil { - return false - } - b.flagCache = make(map[string]bool) - } - cmdArgs := append(envList("CC", cfg.DefaultCC), flag, "-c", "trivial.c") - if cfg.BuildN || cfg.BuildX { - b.Showcmd(b.WorkDir, "%s", joinUnambiguously(cmdArgs)) - if cfg.BuildN { - return false - } - } - cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) - cmd.Dir = b.WorkDir - cmd.Env = base.MergeEnvLists([]string{"LC_ALL=C"}, base.EnvForDir(cmd.Dir, os.Environ())) - out, err := cmd.CombinedOutput() - supported := err == nil && !bytes.Contains(out, []byte("unrecognized")) - b.flagCache[flag] = supported - return supported -} - -// gccArchArgs returns arguments to pass to gcc based on the architecture. -func (b *Builder) gccArchArgs() []string { - switch cfg.Goarch { - case "386": - return []string{"-m32"} - case "amd64", "amd64p32": - return []string{"-m64"} - case "arm": - return []string{"-marm"} // not thumb - case "s390x": - return []string{"-m64", "-march=z196"} - case "mips64", "mips64le": - return []string{"-mabi=64"} - case "mips", "mipsle": - return []string{"-mabi=32", "-march=mips32"} - } - return nil -} - -// envList returns the value of the given environment variable broken -// into fields, using the default value when the variable is empty. -func envList(key, def string) []string { - v := os.Getenv(key) - if v == "" { - v = def - } - return strings.Fields(v) -} - -// CFlags returns the flags to use when invoking the C, C++ or Fortran compilers, or cgo. -func (b *Builder) CFlags(p *load.Package) (cppflags, cflags, cxxflags, fflags, ldflags []string) { - defaults := "-g -O2" - - cppflags = str.StringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) - cflags = str.StringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) - cxxflags = str.StringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) - fflags = str.StringList(envList("CGO_FFLAGS", defaults), p.CgoFFLAGS) - ldflags = str.StringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) - return -} - -var cgoRe = regexp.MustCompile(`[/\\:]`) - -func (b *Builder) cgo(a *Action, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofiles, objdirCgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) { - p := a.Package - cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS := b.CFlags(p) - cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) - cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...) - // If we are compiling Objective-C code, then we need to link against libobjc - if len(mfiles) > 0 { - cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc") - } - - // Likewise for Fortran, except there are many Fortran compilers. - // Support gfortran out of the box and let others pass the correct link options - // via CGO_LDFLAGS - if len(ffiles) > 0 { - fc := os.Getenv("FC") - if fc == "" { - fc = "gfortran" - } - if strings.Contains(fc, "gfortran") { - cgoLDFLAGS = append(cgoLDFLAGS, "-lgfortran") - } - } - - if cfg.BuildMSan { - cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...) - cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...) - } - - // Allows including _cgo_export.h from .[ch] files in the package. - cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", obj) - - // If we have cgo files in the object directory, then copy any - // other cgo files into the object directory, and pass a - // -srcdir option to cgo. - var srcdirarg []string - if len(objdirCgofiles) > 0 { - for _, fn := range cgofiles { - if err := b.copyFile(a, obj+filepath.Base(fn), filepath.Join(p.Dir, fn), 0666, false); err != nil { - return nil, nil, err - } - } - cgofiles = append(cgofiles, objdirCgofiles...) - srcdirarg = []string{"-srcdir", obj} - } - - // cgo - // TODO: CGO_FLAGS? - gofiles := []string{obj + "_cgo_gotypes.go"} - cfiles := []string{"_cgo_export.c"} - for _, fn := range cgofiles { - f := cgoRe.ReplaceAllString(fn[:len(fn)-2], "_") - gofiles = append(gofiles, obj+f+"cgo1.go") - cfiles = append(cfiles, f+"cgo2.c") - } - - // TODO: make cgo not depend on $GOARCH? - - cgoflags := []string{} - if p.Standard && p.ImportPath == "runtime/cgo" { - cgoflags = append(cgoflags, "-import_runtime_cgo=false") - } - if p.Standard && (p.ImportPath == "runtime/race" || p.ImportPath == "runtime/msan" || p.ImportPath == "runtime/cgo") { - cgoflags = append(cgoflags, "-import_syscall=false") - } - - // Update $CGO_LDFLAGS with p.CgoLDFLAGS. - var cgoenv []string - if len(cgoLDFLAGS) > 0 { - flags := make([]string, len(cgoLDFLAGS)) - for i, f := range cgoLDFLAGS { - flags[i] = strconv.Quote(f) - } - cgoenv = []string{"CGO_LDFLAGS=" + strings.Join(flags, " ")} - } - - if cfg.BuildToolchainName == "gccgo" { - switch cfg.Goarch { - case "386", "amd64": - cgoCFLAGS = append(cgoCFLAGS, "-fsplit-stack") - } - cgoflags = append(cgoflags, "-gccgo") - if pkgpath := gccgoPkgpath(p); pkgpath != "" { - cgoflags = append(cgoflags, "-gccgopkgpath="+pkgpath) - } - } - - switch cfg.BuildBuildmode { - case "c-archive", "c-shared": - // Tell cgo that if there are any exported functions - // it should generate a header file that C code can - // #include. - cgoflags = append(cgoflags, "-exportheader="+obj+"_cgo_install.h") - } - - if err := b.run(p.Dir, p.ImportPath, cgoenv, cfg.BuildToolexec, cgoExe, srcdirarg, "-objdir", obj, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoCFLAGS, cgofiles); err != nil { - return nil, nil, err - } - outGo = append(outGo, gofiles...) - - // gcc - cflags := str.StringList(cgoCPPFLAGS, cgoCFLAGS) - for _, cfile := range cfiles { - ofile := obj + cfile[:len(cfile)-1] + "o" - if err := b.gcc(p, ofile, cflags, obj+cfile); err != nil { - return nil, nil, err - } - outObj = append(outObj, ofile) - } - - for _, file := range gccfiles { - base := filepath.Base(file) - ofile := obj + cgoRe.ReplaceAllString(base[:len(base)-1], "_") + "o" - if err := b.gcc(p, ofile, cflags, file); err != nil { - return nil, nil, err - } - outObj = append(outObj, ofile) - } - - cxxflags := str.StringList(cgoCPPFLAGS, cgoCXXFLAGS) - for _, file := range gxxfiles { - // Append .o to the file, just in case the pkg has file.c and file.cpp - ofile := obj + cgoRe.ReplaceAllString(filepath.Base(file), "_") + ".o" - if err := b.gxx(p, ofile, cxxflags, file); err != nil { - return nil, nil, err - } - outObj = append(outObj, ofile) - } - - for _, file := range mfiles { - // Append .o to the file, just in case the pkg has file.c and file.m - ofile := obj + cgoRe.ReplaceAllString(filepath.Base(file), "_") + ".o" - if err := b.gcc(p, ofile, cflags, file); err != nil { - return nil, nil, err - } - outObj = append(outObj, ofile) - } - - fflags := str.StringList(cgoCPPFLAGS, cgoFFLAGS) - for _, file := range ffiles { - // Append .o to the file, just in case the pkg has file.c and file.f - ofile := obj + cgoRe.ReplaceAllString(filepath.Base(file), "_") + ".o" - if err := b.gfortran(p, ofile, fflags, file); err != nil { - return nil, nil, err - } - outObj = append(outObj, ofile) - } - - switch cfg.BuildToolchainName { - case "gc": - importGo := obj + "_cgo_import.go" - if err := b.dynimport(p, obj, importGo, cgoExe, cflags, cgoLDFLAGS, outObj); err != nil { - return nil, nil, err - } - outGo = append(outGo, importGo) - - ofile := obj + "_all.o" - if err := b.collect(p, obj, ofile, cgoLDFLAGS, outObj); err != nil { - return nil, nil, err - } - outObj = []string{ofile} - - case "gccgo": - defunC := obj + "_cgo_defun.c" - defunObj := obj + "_cgo_defun.o" - if err := BuildToolchain.cc(b, p, obj, defunObj, defunC); err != nil { - return nil, nil, err - } - outObj = append(outObj, defunObj) - - default: - noCompiler() - } - - return outGo, outObj, nil -} - -// dynimport creates a Go source file named importGo containing -// //go:cgo_import_dynamic directives for each symbol or library -// dynamically imported by the object files outObj. -func (b *Builder) dynimport(p *load.Package, obj, importGo, cgoExe string, cflags, cgoLDFLAGS, outObj []string) error { - cfile := obj + "_cgo_main.c" - ofile := obj + "_cgo_main.o" - if err := b.gcc(p, ofile, cflags, cfile); err != nil { - return err - } - - linkobj := str.StringList(ofile, outObj, p.SysoFiles) - dynobj := obj + "_cgo_.o" - - // we need to use -pie for Linux/ARM to get accurate imported sym - ldflags := cgoLDFLAGS - if (cfg.Goarch == "arm" && cfg.Goos == "linux") || cfg.Goos == "android" { - ldflags = append(ldflags, "-pie") - } - if err := b.gccld(p, dynobj, ldflags, linkobj); err != nil { - return err - } - - // cgo -dynimport - var cgoflags []string - if p.Standard && p.ImportPath == "runtime/cgo" { - cgoflags = []string{"-dynlinker"} // record path to dynamic linker - } - return b.run(p.Dir, p.ImportPath, nil, cfg.BuildToolexec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags) -} - -// collect partially links the object files outObj into a single -// relocatable object file named ofile. -func (b *Builder) collect(p *load.Package, obj, ofile string, cgoLDFLAGS, outObj []string) error { - // When linking relocatable objects, various flags need to be - // filtered out as they are inapplicable and can cause some linkers - // to fail. - var ldflags []string - for i := 0; i < len(cgoLDFLAGS); i++ { - f := cgoLDFLAGS[i] - switch { - // skip "-lc" or "-l somelib" - case strings.HasPrefix(f, "-l"): - if f == "-l" { - i++ - } - // skip "-framework X" on Darwin - case cfg.Goos == "darwin" && f == "-framework": - i++ - // skip "*.{dylib,so,dll,o,a}" - case strings.HasSuffix(f, ".dylib"), - strings.HasSuffix(f, ".so"), - strings.HasSuffix(f, ".dll"), - strings.HasSuffix(f, ".o"), - strings.HasSuffix(f, ".a"): - // Remove any -fsanitize=foo flags. - // Otherwise the compiler driver thinks that we are doing final link - // and links sanitizer runtime into the object file. But we are not doing - // the final link, we will link the resulting object file again. And - // so the program ends up with two copies of sanitizer runtime. - // See issue 8788 for details. - case strings.HasPrefix(f, "-fsanitize="): - continue - // runpath flags not applicable unless building a shared - // object or executable; see issue 12115 for details. This - // is necessary as Go currently does not offer a way to - // specify the set of LDFLAGS that only apply to shared - // objects. - case strings.HasPrefix(f, "-Wl,-rpath"): - if f == "-Wl,-rpath" || f == "-Wl,-rpath-link" { - // Skip following argument to -rpath* too. - i++ - } - default: - ldflags = append(ldflags, f) - } - } - - ldflags = append(ldflags, "-Wl,-r", "-nostdlib") - - if flag := b.gccNoPie(); flag != "" { - ldflags = append(ldflags, flag) - } - - // We are creating an object file, so we don't want a build ID. - ldflags = b.disableBuildID(ldflags) - - return b.gccld(p, ofile, ldflags, outObj) -} - -// Run SWIG on all SWIG input files. -// TODO: Don't build a shared library, once SWIG emits the necessary -// pragmas for external linking. -func (b *Builder) swig(p *load.Package, obj string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) { - if err := b.swigVersionCheck(); err != nil { - return nil, nil, nil, err - } - - intgosize, err := b.swigIntSize(obj) - if err != nil { - return nil, nil, nil, err - } - - for _, f := range p.SwigFiles { - goFile, cFile, err := b.swigOne(p, f, obj, pcCFLAGS, false, intgosize) - if err != nil { - return nil, nil, nil, err - } - if goFile != "" { - outGo = append(outGo, goFile) - } - if cFile != "" { - outC = append(outC, cFile) - } - } - for _, f := range p.SwigCXXFiles { - goFile, cxxFile, err := b.swigOne(p, f, obj, pcCFLAGS, true, intgosize) - if err != nil { - return nil, nil, nil, err - } - if goFile != "" { - outGo = append(outGo, goFile) - } - if cxxFile != "" { - outCXX = append(outCXX, cxxFile) - } - } - return outGo, outC, outCXX, nil -} - -// Make sure SWIG is new enough. -var ( - swigCheckOnce sync.Once - swigCheck error -) - -func (b *Builder) swigDoVersionCheck() error { - out, err := b.runOut("", "", nil, "swig", "-version") - if err != nil { - return err - } - re := regexp.MustCompile(`[vV]ersion +([\d]+)([.][\d]+)?([.][\d]+)?`) - matches := re.FindSubmatch(out) - if matches == nil { - // Can't find version number; hope for the best. - return nil - } - - major, err := strconv.Atoi(string(matches[1])) - if err != nil { - // Can't find version number; hope for the best. - return nil - } - const errmsg = "must have SWIG version >= 3.0.6" - if major < 3 { - return errors.New(errmsg) - } - if major > 3 { - // 4.0 or later - return nil - } - - // We have SWIG version 3.x. - if len(matches[2]) > 0 { - minor, err := strconv.Atoi(string(matches[2][1:])) - if err != nil { - return nil - } - if minor > 0 { - // 3.1 or later - return nil - } - } - - // We have SWIG version 3.0.x. - if len(matches[3]) > 0 { - patch, err := strconv.Atoi(string(matches[3][1:])) - if err != nil { - return nil - } - if patch < 6 { - // Before 3.0.6. - return errors.New(errmsg) - } - } - - return nil -} - -func (b *Builder) swigVersionCheck() error { - swigCheckOnce.Do(func() { - swigCheck = b.swigDoVersionCheck() - }) - return swigCheck -} - -// Find the value to pass for the -intgosize option to swig. -var ( - swigIntSizeOnce sync.Once - swigIntSize string - swigIntSizeError error -) - -// This code fails to build if sizeof(int) <= 32 -const swigIntSizeCode = ` -package main -const i int = 1 << 32 -` - -// Determine the size of int on the target system for the -intgosize option -// of swig >= 2.0.9. Run only once. -func (b *Builder) swigDoIntSize(obj string) (intsize string, err error) { - if cfg.BuildN { - return "$INTBITS", nil - } - src := filepath.Join(b.WorkDir, "swig_intsize.go") - if err = ioutil.WriteFile(src, []byte(swigIntSizeCode), 0666); err != nil { - return - } - srcs := []string{src} - - p := load.GoFilesPackage(srcs) - - if _, _, e := BuildToolchain.gc(b, p, "", obj, false, nil, srcs); e != nil { - return "32", nil - } - return "64", nil -} - -// Determine the size of int on the target system for the -intgosize option -// of swig >= 2.0.9. -func (b *Builder) swigIntSize(obj string) (intsize string, err error) { - swigIntSizeOnce.Do(func() { - swigIntSize, swigIntSizeError = b.swigDoIntSize(obj) - }) - return swigIntSize, swigIntSizeError -} - -// Run SWIG on one SWIG input file. -func (b *Builder) swigOne(p *load.Package, file, obj string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) { - cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _ := b.CFlags(p) - var cflags []string - if cxx { - cflags = str.StringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS) - } else { - cflags = str.StringList(cgoCPPFLAGS, pcCFLAGS, cgoCFLAGS) - } - - n := 5 // length of ".swig" - if cxx { - n = 8 // length of ".swigcxx" - } - base := file[:len(file)-n] - goFile := base + ".go" - gccBase := base + "_wrap." - gccExt := "c" - if cxx { - gccExt = "cxx" - } - - gccgo := cfg.BuildToolchainName == "gccgo" - - // swig - args := []string{ - "-go", - "-cgo", - "-intgosize", intgosize, - "-module", base, - "-o", obj + gccBase + gccExt, - "-outdir", obj, - } - - for _, f := range cflags { - if len(f) > 3 && f[:2] == "-I" { - args = append(args, f) - } - } - - if gccgo { - args = append(args, "-gccgo") - if pkgpath := gccgoPkgpath(p); pkgpath != "" { - args = append(args, "-go-pkgpath", pkgpath) - } - } - if cxx { - args = append(args, "-c++") - } - - out, err := b.runOut(p.Dir, p.ImportPath, nil, "swig", args, file) - if err != nil { - if len(out) > 0 { - if bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo")) { - return "", "", errors.New("must have SWIG version >= 3.0.6") - } - b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig error - return "", "", errPrintedOutput - } - return "", "", err - } - if len(out) > 0 { - b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig warning - } - - return goFile, obj + gccBase + gccExt, nil -} - -// disableBuildID adjusts a linker command line to avoid creating a -// build ID when creating an object file rather than an executable or -// shared library. Some systems, such as Ubuntu, always add -// --build-id to every link, but we don't want a build ID when we are -// producing an object file. On some of those system a plain -r (not -// -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a -// plain -r. I don't know how to turn off --build-id when using clang -// other than passing a trailing --build-id=none. So that is what we -// do, but only on systems likely to support it, which is to say, -// systems that normally use gold or the GNU linker. -func (b *Builder) disableBuildID(ldflags []string) []string { - switch cfg.Goos { - case "android", "dragonfly", "linux", "netbsd": - ldflags = append(ldflags, "-Wl,--build-id=none") - } - return ldflags -} - -// An actionQueue is a priority queue of actions. -type actionQueue []*Action - -// Implement heap.Interface -func (q *actionQueue) Len() int { return len(*q) } -func (q *actionQueue) Swap(i, j int) { (*q)[i], (*q)[j] = (*q)[j], (*q)[i] } -func (q *actionQueue) Less(i, j int) bool { return (*q)[i].priority < (*q)[j].priority } -func (q *actionQueue) Push(x interface{}) { *q = append(*q, x.(*Action)) } -func (q *actionQueue) Pop() interface{} { - n := len(*q) - 1 - x := (*q)[n] - *q = (*q)[:n] - return x -} - -func (q *actionQueue) push(a *Action) { - heap.Push(q, a) -} - -func (q *actionQueue) pop() *Action { - return heap.Pop(q).(*Action) -} - -func InstrumentInit() { - if !cfg.BuildRace && !cfg.BuildMSan { - return - } - if cfg.BuildRace && cfg.BuildMSan { - fmt.Fprintf(os.Stderr, "go %s: may not use -race and -msan simultaneously\n", flag.Args()[0]) - os.Exit(2) - } - if cfg.BuildMSan && (cfg.Goos != "linux" || cfg.Goarch != "amd64") { - fmt.Fprintf(os.Stderr, "-msan is not supported on %s/%s\n", cfg.Goos, cfg.Goarch) - os.Exit(2) - } - if cfg.Goarch != "amd64" || cfg.Goos != "linux" && cfg.Goos != "freebsd" && cfg.Goos != "darwin" && cfg.Goos != "windows" { - fmt.Fprintf(os.Stderr, "go %s: -race and -msan are only supported on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) - os.Exit(2) - } - if !cfg.BuildContext.CgoEnabled { - fmt.Fprintf(os.Stderr, "go %s: -race requires cgo; enable cgo by setting CGO_ENABLED=1\n", flag.Args()[0]) - os.Exit(2) - } - if cfg.BuildRace { - buildGcflags = append(buildGcflags, "-race") - cfg.BuildLdflags = append(cfg.BuildLdflags, "-race") - } else { - buildGcflags = append(buildGcflags, "-msan") - cfg.BuildLdflags = append(cfg.BuildLdflags, "-msan") - } - if cfg.BuildContext.InstallSuffix != "" { - cfg.BuildContext.InstallSuffix += "_" - } - - if cfg.BuildRace { - cfg.BuildContext.InstallSuffix += "race" - cfg.BuildContext.BuildTags = append(cfg.BuildContext.BuildTags, "race") - } else { - cfg.BuildContext.InstallSuffix += "msan" - cfg.BuildContext.BuildTags = append(cfg.BuildContext.BuildTags, "msan") - } -} - // ExecCmd is the command to use to run user binaries. // Normally it is empty, meaning run the binaries directly. // If cross-compiling and running on a remote system or diff --git a/src/cmd/go/internal/work/build_test.go b/src/cmd/go/internal/work/build_test.go index 294b83c6b2a..3f5ba37c641 100644 --- a/src/cmd/go/internal/work/build_test.go +++ b/src/cmd/go/internal/work/build_test.go @@ -175,8 +175,13 @@ func pkgImportPath(pkgpath string) *load.Package { // directory. // See https://golang.org/issue/18878. func TestRespectSetgidDir(t *testing.T) { - if runtime.GOOS == "nacl" { + switch runtime.GOOS { + case "nacl": t.Skip("can't set SetGID bit with chmod on nacl") + case "darwin": + if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" { + t.Skip("can't set SetGID bit with chmod on iOS") + } } var b Builder diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go new file mode 100644 index 00000000000..3c90c15a701 --- /dev/null +++ b/src/cmd/go/internal/work/buildid.go @@ -0,0 +1,487 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package work + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/str" + "cmd/internal/buildid" +) + +// Build IDs +// +// Go packages and binaries are stamped with build IDs that record both +// the action ID, which is a hash of the inputs to the action that produced +// the packages or binary, and the content ID, which is a hash of the action +// output, namely the archive or binary itself. The hash is the same one +// used by the build artifact cache (see cmd/go/internal/cache), but +// truncated when stored in packages and binaries, as the full length is not +// needed and is a bit unwieldy. The precise form is +// +// actionID/[.../]contentID +// +// where the actionID and contentID are prepared by hashToString below. +// and are found by looking for the first or last slash. +// Usually the buildID is simply actionID/contentID, but see below for an +// exception. +// +// The build ID serves two primary purposes. +// +// 1. The action ID half allows installed packages and binaries to serve as +// one-element cache entries. If we intend to build math.a with a given +// set of inputs summarized in the action ID, and the installed math.a already +// has that action ID, we can reuse the installed math.a instead of rebuilding it. +// +// 2. The content ID half allows the easy preparation of action IDs for steps +// that consume a particular package or binary. The content hash of every +// input file for a given action must be included in the action ID hash. +// Storing the content ID in the build ID lets us read it from the file with +// minimal I/O, instead of reading and hashing the entire file. +// This is especially effective since packages and binaries are typically +// the largest inputs to an action. +// +// Separating action ID from content ID is important for reproducible builds. +// The compiler is compiled with itself. If an output were represented by its +// own action ID (instead of content ID) when computing the action ID of +// the next step in the build process, then the compiler could never have its +// own input action ID as its output action ID (short of a miraculous hash collision). +// Instead we use the content IDs to compute the next action ID, and because +// the content IDs converge, so too do the action IDs and therefore the +// build IDs and the overall compiler binary. See cmd/dist's cmdbootstrap +// for the actual convergence sequence. +// +// The “one-element cache” purpose is a bit more complex for installed +// binaries. For a binary, like cmd/gofmt, there are two steps: compile +// cmd/gofmt/*.go into main.a, and then link main.a into the gofmt binary. +// We do not install gofmt's main.a, only the gofmt binary. Being able to +// decide that the gofmt binary is up-to-date means computing the action ID +// for the final link of the gofmt binary and comparing it against the +// already-installed gofmt binary. But computing the action ID for the link +// means knowing the content ID of main.a, which we did not keep. +// To sidestep this problem, each binary actually stores an expanded build ID: +// +// actionID(binary)/actionID(main.a)/contentID(main.a)/contentID(binary) +// +// (Note that this can be viewed equivalently as: +// +// actionID(binary)/buildID(main.a)/contentID(binary) +// +// Storing the buildID(main.a) in the middle lets the computations that care +// about the prefix or suffix halves ignore the middle and preserves the +// original build ID as a contiguous string.) +// +// During the build, when it's time to build main.a, the gofmt binary has the +// information needed to decide whether the eventual link would produce +// the same binary: if the action ID for main.a's inputs matches and then +// the action ID for the link step matches when assuming the given main.a +// content ID, then the binary as a whole is up-to-date and need not be rebuilt. +// +// This is all a bit complex and may be simplified once we can rely on the +// main cache, but at least at the start we will be using the content-based +// staleness determination without a cache beyond the usual installed +// package and binary locations. + +const buildIDSeparator = "/" + +// actionID returns the action ID half of a build ID. +func actionID(buildID string) string { + i := strings.Index(buildID, buildIDSeparator) + if i < 0 { + return buildID + } + return buildID[:i] +} + +// contentID returns the content ID half of a build ID. +func contentID(buildID string) string { + return buildID[strings.LastIndex(buildID, buildIDSeparator)+1:] +} + +// hashToString converts the hash h to a string to be recorded +// in package archives and binaries as part of the build ID. +// We use the first 96 bits of the hash and encode it in base64, +// resulting in a 16-byte string. Because this is only used for +// detecting the need to rebuild installed files (not for lookups +// in the object file cache), 96 bits are sufficient to drive the +// probability of a false "do not need to rebuild" decision to effectively zero. +// We embed two different hashes in archives and four in binaries, +// so cutting to 16 bytes is a significant savings when build IDs are displayed. +// (16*4+3 = 67 bytes compared to 64*4+3 = 259 bytes for the +// more straightforward option of printing the entire h in hex). +func hashToString(h [cache.HashSize]byte) string { + const b64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + const chunks = 5 + var dst [chunks * 4]byte + for i := 0; i < chunks; i++ { + v := uint32(h[3*i])<<16 | uint32(h[3*i+1])<<8 | uint32(h[3*i+2]) + dst[4*i+0] = b64[(v>>18)&0x3F] + dst[4*i+1] = b64[(v>>12)&0x3F] + dst[4*i+2] = b64[(v>>6)&0x3F] + dst[4*i+3] = b64[v&0x3F] + } + return string(dst[:]) +} + +// toolID returns the unique ID to use for the current copy of the +// named tool (asm, compile, cover, link). +// +// It is important that if the tool changes (for example a compiler bug is fixed +// and the compiler reinstalled), toolID returns a different string, so that old +// package archives look stale and are rebuilt (with the fixed compiler). +// This suggests using a content hash of the tool binary, as stored in the build ID. +// +// Unfortunately, we can't just open the tool binary, because the tool might be +// invoked via a wrapper program specified by -toolexec and we don't know +// what the wrapper program does. In particular, we want "-toolexec toolstash" +// to continue working: it does no good if "-toolexec toolstash" is executing a +// stashed copy of the compiler but the go command is acting as if it will run +// the standard copy of the compiler. The solution is to ask the tool binary to tell +// us its own build ID using the "-V=full" flag now supported by all tools. +// Then we know we're getting the build ID of the compiler that will actually run +// during the build. (How does the compiler binary know its own content hash? +// We store it there using updateBuildID after the standard link step.) +// +// A final twist is that we'd prefer to have reproducible builds for release toolchains. +// It should be possible to cross-compile for Windows from either Linux or Mac +// or Windows itself and produce the same binaries, bit for bit. If the tool ID, +// which influences the action ID half of the build ID, is based on the content ID, +// then the Linux compiler binary and Mac compiler binary will have different tool IDs +// and therefore produce executables with different action IDs. +// To avoids this problem, for releases we use the release version string instead +// of the compiler binary's content hash. This assumes that all compilers built +// on all different systems are semantically equivalent, which is of course only true +// modulo bugs. (Producing the exact same executables also requires that the different +// build setups agree on details like $GOROOT and file name paths, but at least the +// tool IDs do not make it impossible.) +func (b *Builder) toolID(name string) string { + b.id.Lock() + id := b.toolIDCache[name] + b.id.Unlock() + + if id != "" { + return id + } + + cmdline := str.StringList(cfg.BuildToolexec, base.Tool(name), "-V=full") + cmd := exec.Command(cmdline[0], cmdline[1:]...) + cmd.Env = base.EnvForDir(cmd.Dir, os.Environ()) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + base.Fatalf("go tool %s: %v\n%s%s", name, err, stdout.Bytes(), stderr.Bytes()) + } + + line := stdout.String() + f := strings.Fields(line) + if len(f) < 3 || f[0] != name || f[1] != "version" || f[2] == "devel" && !strings.HasPrefix(f[len(f)-1], "buildID=") { + base.Fatalf("go tool %s -V=full: unexpected output:\n\t%s", name, line) + } + if f[2] == "devel" { + // On the development branch, use the content ID part of the build ID. + id = contentID(f[len(f)-1]) + } else { + // For a release, the output is like: "compile version go1.9.1". Use the whole line. + id = f[2] + } + + b.id.Lock() + b.toolIDCache[name] = id + b.id.Unlock() + + return id +} + +// buildID returns the build ID found in the given file. +// If no build ID is found, buildID returns the content hash of the file. +func (b *Builder) buildID(file string) string { + b.id.Lock() + id := b.buildIDCache[file] + b.id.Unlock() + + if id != "" { + return id + } + + id, err := buildid.ReadFile(file) + if err != nil { + id = b.fileHash(file) + } + + b.id.Lock() + b.buildIDCache[file] = id + b.id.Unlock() + + return id +} + +// fileHash returns the content hash of the named file. +func (b *Builder) fileHash(file string) string { + sum, err := cache.FileHash(file) + if err != nil { + return "" + } + return hashToString(sum) +} + +// useCache tries to satisfy the action a, which has action ID actionHash, +// by using a cached result from an earlier build. At the moment, the only +// cached result is the installed package or binary at target. +// If useCache decides that the cache can be used, it sets a.buildID +// and a.built for use by parent actions and then returns true. +// Otherwise it sets a.buildID to a temporary build ID for use in the build +// and returns false. When useCache returns false the expectation is that +// the caller will build the target and then call updateBuildID to finish the +// build ID computation. +// When useCache returns false, it may have initiated buffering of output +// during a's work. The caller should defer b.flushOutput(a), to make sure +// that flushOutput is eventually called regardless of whether the action +// succeeds. The flushOutput call must happen after updateBuildID. +func (b *Builder) useCache(a *Action, p *load.Package, actionHash cache.ActionID, target string) bool { + // The second half of the build ID here is a placeholder for the content hash. + // It's important that the overall buildID be unlikely verging on impossible + // to appear in the output by chance, but that should be taken care of by + // the actionID half; if it also appeared in the input that would be like an + // engineered 96-bit partial SHA256 collision. + a.actionID = actionHash + actionID := hashToString(actionHash) + contentID := actionID // temporary placeholder, likely unique + a.buildID = actionID + buildIDSeparator + contentID + + // Executable binaries also record the main build ID in the middle. + // See "Build IDs" comment above. + if a.Mode == "link" { + mainpkg := a.Deps[0] + a.buildID = actionID + buildIDSeparator + mainpkg.buildID + buildIDSeparator + contentID + } + + // Check to see if target exists and matches the expected action ID. + // If so, it's up to date and we can reuse it instead of rebuilding it. + var buildID string + if target != "" && !cfg.BuildA { + var err error + buildID, err = buildid.ReadFile(target) + if err != nil && b.ComputeStaleOnly { + if p != nil && !p.Stale { + p.Stale = true + p.StaleReason = "target missing" + } + return true + } + if strings.HasPrefix(buildID, actionID+buildIDSeparator) { + a.buildID = buildID + a.built = target + // Poison a.Target to catch uses later in the build. + a.Target = "DO NOT USE - " + a.Mode + return true + } + } + + // Special case for building a main package: if the only thing we + // want the package for is to link a binary, and the binary is + // already up-to-date, then to avoid a rebuild, report the package + // as up-to-date as well. See "Build IDs" comment above. + // TODO(rsc): Rewrite this code to use a TryCache func on the link action. + if target != "" && !cfg.BuildA && a.Mode == "build" && len(a.triggers) == 1 && a.triggers[0].Mode == "link" { + buildID, err := buildid.ReadFile(target) + if err == nil { + id := strings.Split(buildID, buildIDSeparator) + if len(id) == 4 && id[1] == actionID { + // Temporarily assume a.buildID is the package build ID + // stored in the installed binary, and see if that makes + // the upcoming link action ID a match. If so, report that + // we built the package, safe in the knowledge that the + // link step will not ask us for the actual package file. + // Note that (*Builder).LinkAction arranged that all of + // a.triggers[0]'s dependencies other than a are also + // dependencies of a, so that we can be sure that, + // other than a.buildID, b.linkActionID is only accessing + // build IDs of completed actions. + oldBuildID := a.buildID + a.buildID = id[1] + buildIDSeparator + id[2] + linkID := hashToString(b.linkActionID(a.triggers[0])) + if id[0] == linkID { + // Poison a.Target to catch uses later in the build. + a.Target = "DO NOT USE - main build pseudo-cache Target" + a.built = "DO NOT USE - main build pseudo-cache built" + return true + } + // Otherwise restore old build ID for main build. + a.buildID = oldBuildID + } + } + } + + // Special case for linking a test binary: if the only thing we + // want the binary for is to run the test, and the test result is cached, + // then to avoid the link step, report the link as up-to-date. + // We avoid the nested build ID problem in the previous special case + // by recording the test results in the cache under the action ID half. + if !cfg.BuildA && len(a.triggers) == 1 && a.triggers[0].TryCache != nil && a.triggers[0].TryCache(b, a.triggers[0]) { + a.Target = "DO NOT USE - pseudo-cache Target" + a.built = "DO NOT USE - pseudo-cache built" + return true + } + + if b.ComputeStaleOnly { + // Invoked during go list only to compute and record staleness. + if p := a.Package; p != nil && !p.Stale { + p.Stale = true + if cfg.BuildA { + p.StaleReason = "build -a flag in use" + } else { + p.StaleReason = "build ID mismatch" + for _, p1 := range p.Internal.Imports { + if p1.Stale && p1.StaleReason != "" { + if strings.HasPrefix(p1.StaleReason, "stale dependency: ") { + p.StaleReason = p1.StaleReason + break + } + if strings.HasPrefix(p.StaleReason, "build ID mismatch") { + p.StaleReason = "stale dependency: " + p1.ImportPath + } + } + } + } + } + return true + } + + // Check the build artifact cache. + // We treat hits in this cache as being "stale" for the purposes of go list + // (in effect, "stale" means whether p.Target is up-to-date), + // but we're still happy to use results from the build artifact cache. + if c := cache.Default(); c != nil { + if !cfg.BuildA { + entry, err := c.Get(actionHash) + if err == nil { + file := c.OutputFile(entry.OutputID) + info, err1 := os.Stat(file) + buildID, err2 := buildid.ReadFile(file) + if err1 == nil && err2 == nil && info.Size() == entry.Size { + stdout, stdoutEntry, err := c.GetBytes(cache.Subkey(a.actionID, "stdout")) + if err == nil { + if len(stdout) > 0 { + if cfg.BuildX || cfg.BuildN { + b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList("cat", c.OutputFile(stdoutEntry.OutputID)))) + } + if !cfg.BuildN { + b.Print(string(stdout)) + } + } + a.built = file + a.Target = "DO NOT USE - using cache" + a.buildID = buildID + return true + } + } + } + } + + // Begin saving output for later writing to cache. + a.output = []byte{} + } + + return false +} + +// flushOutput flushes the output being queued in a. +func (b *Builder) flushOutput(a *Action) { + b.Print(string(a.output)) + a.output = nil +} + +// updateBuildID updates the build ID in the target written by action a. +// It requires that useCache was called for action a and returned false, +// and that the build was then carried out and given the temporary +// a.buildID to record as the build ID in the resulting package or binary. +// updateBuildID computes the final content ID and updates the build IDs +// in the binary. +func (b *Builder) updateBuildID(a *Action, target string, rewrite bool) error { + if cfg.BuildX || cfg.BuildN { + if rewrite { + b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList(base.Tool("buildid"), "-w", target))) + } + if cfg.BuildN { + return nil + } + } + + // Find occurrences of old ID and compute new content-based ID. + r, err := os.Open(target) + if err != nil { + return err + } + matches, hash, err := buildid.FindAndHash(r, a.buildID, 0) + r.Close() + if err != nil { + return err + } + newID := a.buildID[:strings.LastIndex(a.buildID, buildIDSeparator)] + buildIDSeparator + hashToString(hash) + if len(newID) != len(a.buildID) { + return fmt.Errorf("internal error: build ID length mismatch %q vs %q", a.buildID, newID) + } + + // Replace with new content-based ID. + a.buildID = newID + if len(matches) == 0 { + // Assume the user specified -buildid= to override what we were going to choose. + return nil + } + + if rewrite { + w, err := os.OpenFile(target, os.O_WRONLY, 0) + if err != nil { + return err + } + err = buildid.Rewrite(w, matches, newID) + if err != nil { + w.Close() + return err + } + if err := w.Close(); err != nil { + return err + } + } + + // Cache package builds, but not binaries (link steps). + // The expectation is that binaries are not reused + // nearly as often as individual packages, and they're + // much larger, so the cache-footprint-to-utility ratio + // of binaries is much lower for binaries. + // Not caching the link step also makes sure that repeated "go run" at least + // always rerun the linker, so that they don't get too fast. + // (We don't want people thinking go is a scripting language.) + // Note also that if we start caching binaries, then we will + // copy the binaries out of the cache to run them, and then + // that will mean the go process is itself writing a binary + // and then executing it, so we will need to defend against + // ETXTBSY problems as discussed in exec.go and golang.org/issue/22220. + if c := cache.Default(); c != nil && a.Mode == "build" { + r, err := os.Open(target) + if err == nil { + if a.output == nil { + panic("internal error: a.output not set") + } + outputID, _, err := c.Put(a.actionID, r) + if err == nil && cfg.BuildX { + b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, c.OutputFile(outputID)))) + } + c.PutBytes(cache.Subkey(a.actionID, "stdout"), a.output) + r.Close() + } + } + + return nil +} diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go new file mode 100644 index 00000000000..43409de764f --- /dev/null +++ b/src/cmd/go/internal/work/exec.go @@ -0,0 +1,2302 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Action graph execution. + +package work + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/str" +) + +// actionList returns the list of actions in the dag rooted at root +// as visited in a depth-first post-order traversal. +func actionList(root *Action) []*Action { + seen := map[*Action]bool{} + all := []*Action{} + var walk func(*Action) + walk = func(a *Action) { + if seen[a] { + return + } + seen[a] = true + for _, a1 := range a.Deps { + walk(a1) + } + all = append(all, a) + } + walk(root) + return all +} + +// do runs the action graph rooted at root. +func (b *Builder) Do(root *Action) { + // Build list of all actions, assigning depth-first post-order priority. + // The original implementation here was a true queue + // (using a channel) but it had the effect of getting + // distracted by low-level leaf actions to the detriment + // of completing higher-level actions. The order of + // work does not matter much to overall execution time, + // but when running "go test std" it is nice to see each test + // results as soon as possible. The priorities assigned + // ensure that, all else being equal, the execution prefers + // to do what it would have done first in a simple depth-first + // dependency order traversal. + all := actionList(root) + for i, a := range all { + a.priority = i + } + + if cfg.DebugActiongraph != "" { + js := actionGraphJSON(root) + if err := ioutil.WriteFile(cfg.DebugActiongraph, []byte(js), 0666); err != nil { + fmt.Fprintf(os.Stderr, "go: writing action graph: %v\n", err) + base.SetExitStatus(1) + } + } + + b.readySema = make(chan bool, len(all)) + + // Initialize per-action execution state. + for _, a := range all { + for _, a1 := range a.Deps { + a1.triggers = append(a1.triggers, a) + } + a.pending = len(a.Deps) + if a.pending == 0 { + b.ready.push(a) + b.readySema <- true + } + } + + // Handle runs a single action and takes care of triggering + // any actions that are runnable as a result. + handle := func(a *Action) { + var err error + + if a.Func != nil && (!a.Failed || a.IgnoreFail) { + if err == nil { + err = a.Func(b, a) + } + } + + // The actions run in parallel but all the updates to the + // shared work state are serialized through b.exec. + b.exec.Lock() + defer b.exec.Unlock() + + if err != nil { + if err == errPrintedOutput { + base.SetExitStatus(2) + } else { + base.Errorf("%s", err) + } + a.Failed = true + } + + for _, a0 := range a.triggers { + if a.Failed { + a0.Failed = true + } + if a0.pending--; a0.pending == 0 { + b.ready.push(a0) + b.readySema <- true + } + } + + if a == root { + close(b.readySema) + } + } + + var wg sync.WaitGroup + + // Kick off goroutines according to parallelism. + // If we are using the -n flag (just printing commands) + // drop the parallelism to 1, both to make the output + // deterministic and because there is no real work anyway. + par := cfg.BuildP + if cfg.BuildN { + par = 1 + } + for i := 0; i < par; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case _, ok := <-b.readySema: + if !ok { + return + } + // Receiving a value from b.readySema entitles + // us to take from the ready queue. + b.exec.Lock() + a := b.ready.pop() + b.exec.Unlock() + handle(a) + case <-base.Interrupted: + base.SetExitStatus(1) + return + } + } + }() + } + + wg.Wait() +} + +// buildActionID computes the action ID for a build action. +func (b *Builder) buildActionID(a *Action) cache.ActionID { + p := a.Package + h := cache.NewHash("build " + p.ImportPath) + + // Configuration independent of compiler toolchain. + // Note: buildmode has already been accounted for in buildGcflags + // and should not be inserted explicitly. Most buildmodes use the + // same compiler settings and can reuse each other's results. + // If not, the reason is already recorded in buildGcflags. + fmt.Fprintf(h, "compile\n") + // The compiler hides the exact value of $GOROOT + // when building things in GOROOT, + // but it does not hide the exact value of $GOPATH. + // Include the full dir in that case. + // Assume b.WorkDir is being trimmed properly. + if !p.Goroot && !strings.HasPrefix(p.Dir, b.WorkDir) { + fmt.Fprintf(h, "dir %s\n", p.Dir) + } + fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch) + fmt.Fprintf(h, "import %q\n", p.ImportPath) + fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix) + if len(p.CgoFiles)+len(p.SwigFiles) > 0 { + fmt.Fprintf(h, "cgo %q\n", b.toolID("cgo")) + cppflags, cflags, cxxflags, fflags, _ := b.CFlags(p) + fmt.Fprintf(h, "CC=%q %q %q\n", b.ccExe(), cppflags, cflags) + if len(p.CXXFiles)+len(p.SwigFiles) > 0 { + fmt.Fprintf(h, "CXX=%q %q\n", b.cxxExe(), cxxflags) + } + if len(p.FFiles) > 0 { + fmt.Fprintf(h, "FC=%q %q\n", b.fcExe(), fflags) + } + // TODO(rsc): Should we include the SWIG version or Fortran/GCC/G++/Objective-C compiler versions? + } + if p.Internal.CoverMode != "" { + fmt.Fprintf(h, "cover %q %q\n", p.Internal.CoverMode, b.toolID("cover")) + } + + // Configuration specific to compiler toolchain. + switch cfg.BuildToolchainName { + default: + base.Fatalf("buildActionID: unknown build toolchain %q", cfg.BuildToolchainName) + case "gc": + fmt.Fprintf(h, "compile %s %q %q\n", b.toolID("compile"), forcedGcflags, p.Internal.Gcflags) + if len(p.SFiles) > 0 { + fmt.Fprintf(h, "asm %q %q %q\n", b.toolID("asm"), forcedAsmflags, p.Internal.Asmflags) + } + fmt.Fprintf(h, "GO$GOARCH=%s\n", os.Getenv("GO"+strings.ToUpper(cfg.BuildContext.GOARCH))) // GO386, GOARM, etc + + // TODO(rsc): Convince compiler team not to add more magic environment variables, + // or perhaps restrict the environment variables passed to subprocesses. + magic := []string{ + "GOCLOBBERDEADHASH", + "GOSSAFUNC", + "GO_SSA_PHI_LOC_CUTOFF", + "GOSSAHASH", + } + for _, env := range magic { + if x := os.Getenv(env); x != "" { + fmt.Fprintf(h, "magic %s=%s\n", env, x) + } + } + if os.Getenv("GOSSAHASH") != "" { + for i := 0; ; i++ { + env := fmt.Sprintf("GOSSAHASH%d", i) + x := os.Getenv(env) + if x == "" { + break + } + fmt.Fprintf(h, "magic %s=%s\n", env, x) + } + } + if os.Getenv("GSHS_LOGFILE") != "" { + // Clumsy hack. Compiler writes to this log file, + // so do not allow use of cache at all. + // We will still write to the cache but it will be + // essentially unfindable. + fmt.Fprintf(h, "nocache %d\n", time.Now().UnixNano()) + } + } + + // Input files. + inputFiles := str.StringList( + p.GoFiles, + p.CgoFiles, + p.CFiles, + p.CXXFiles, + p.FFiles, + p.MFiles, + p.HFiles, + p.SFiles, + p.SysoFiles, + p.SwigFiles, + p.SwigCXXFiles, + ) + for _, file := range inputFiles { + fmt.Fprintf(h, "file %s %s\n", file, b.fileHash(filepath.Join(p.Dir, file))) + } + for _, a1 := range a.Deps { + p1 := a1.Package + if p1 != nil { + fmt.Fprintf(h, "import %s %s\n", p1.ImportPath, contentID(a1.buildID)) + } + } + + return h.Sum() +} + +// build is the action for building a single package. +// Note that any new influence on this logic must be reported in b.buildActionID above as well. +func (b *Builder) build(a *Action) (err error) { + p := a.Package + cached := false + if !p.BinaryOnly { + if b.useCache(a, p, b.buildActionID(a), p.Target) { + // If this build triggers a header install, run cgo to get the header. + // TODO(rsc): Once we can cache multiple file outputs from an action, + // the header should be cached, and then this awful test can be deleted. + // Need to look for install header actions depending on this action, + // or depending on a link that depends on this action. + needHeader := false + if (a.Package.UsesCgo() || a.Package.UsesSwig()) && (cfg.BuildBuildmode == "c-archive" || cfg.BuildBuildmode == "c-header") { + for _, t1 := range a.triggers { + if t1.Mode == "install header" { + needHeader = true + goto CheckedHeader + } + } + for _, t1 := range a.triggers { + for _, t2 := range t1.triggers { + if t2.Mode == "install header" { + needHeader = true + goto CheckedHeader + } + } + } + } + CheckedHeader: + if b.ComputeStaleOnly || !a.needVet && !needHeader { + return nil + } + cached = true + } + defer b.flushOutput(a) + } + + defer func() { + if err != nil && err != errPrintedOutput { + err = fmt.Errorf("go build %s: %v", a.Package.ImportPath, err) + } + }() + if cfg.BuildN { + // In -n mode, print a banner between packages. + // The banner is five lines so that when changes to + // different sections of the bootstrap script have to + // be merged, the banners give patch something + // to use to find its context. + b.Print("\n#\n# " + a.Package.ImportPath + "\n#\n\n") + } + + if cfg.BuildV { + b.Print(a.Package.ImportPath + "\n") + } + + if a.Package.BinaryOnly { + _, err := os.Stat(a.Package.Target) + if err == nil { + a.built = a.Package.Target + a.Target = a.Package.Target + a.buildID = b.fileHash(a.Package.Target) + a.Package.Stale = false + a.Package.StaleReason = "binary-only package" + return nil + } + if b.ComputeStaleOnly { + a.Package.Stale = true + a.Package.StaleReason = "missing or invalid binary-only package" + return nil + } + return fmt.Errorf("missing or invalid binary-only package") + } + + if err := b.Mkdir(a.Objdir); err != nil { + return err + } + objdir := a.Objdir + + // make target directory + dir, _ := filepath.Split(a.Target) + if dir != "" { + if err := b.Mkdir(dir); err != nil { + return err + } + } + + var gofiles, cgofiles, cfiles, sfiles, cxxfiles, objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string + + gofiles = append(gofiles, a.Package.GoFiles...) + cgofiles = append(cgofiles, a.Package.CgoFiles...) + cfiles = append(cfiles, a.Package.CFiles...) + sfiles = append(sfiles, a.Package.SFiles...) + cxxfiles = append(cxxfiles, a.Package.CXXFiles...) + + if a.Package.UsesCgo() || a.Package.UsesSwig() { + if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(a.Package); err != nil { + return + } + } + + // Run SWIG on each .swig and .swigcxx file. + // Each run will generate two files, a .go file and a .c or .cxx file. + // The .go file will use import "C" and is to be processed by cgo. + if a.Package.UsesSwig() { + outGo, outC, outCXX, err := b.swig(a, a.Package, objdir, pcCFLAGS) + if err != nil { + return err + } + cgofiles = append(cgofiles, outGo...) + cfiles = append(cfiles, outC...) + cxxfiles = append(cxxfiles, outCXX...) + } + + // If we're doing coverage, preprocess the .go files and put them in the work directory + if a.Package.Internal.CoverMode != "" { + for i, file := range str.StringList(gofiles, cgofiles) { + var sourceFile string + var coverFile string + var key string + if strings.HasSuffix(file, ".cgo1.go") { + // cgo files have absolute paths + base := filepath.Base(file) + sourceFile = file + coverFile = objdir + base + key = strings.TrimSuffix(base, ".cgo1.go") + ".go" + } else { + sourceFile = filepath.Join(a.Package.Dir, file) + coverFile = objdir + file + key = file + } + coverFile = strings.TrimSuffix(coverFile, ".go") + ".cover.go" + cover := a.Package.Internal.CoverVars[key] + if cover == nil || base.IsTestFile(file) { + // Not covering this file. + continue + } + if err := b.cover(a, coverFile, sourceFile, 0666, cover.Var); err != nil { + return err + } + if i < len(gofiles) { + gofiles[i] = coverFile + } else { + cgofiles[i-len(gofiles)] = coverFile + } + } + } + + // Run cgo. + if a.Package.UsesCgo() || a.Package.UsesSwig() { + // In a package using cgo, cgo compiles the C, C++ and assembly files with gcc. + // There is one exception: runtime/cgo's job is to bridge the + // cgo and non-cgo worlds, so it necessarily has files in both. + // In that case gcc only gets the gcc_* files. + var gccfiles []string + gccfiles = append(gccfiles, cfiles...) + cfiles = nil + if a.Package.Standard && a.Package.ImportPath == "runtime/cgo" { + filter := func(files, nongcc, gcc []string) ([]string, []string) { + for _, f := range files { + if strings.HasPrefix(f, "gcc_") { + gcc = append(gcc, f) + } else { + nongcc = append(nongcc, f) + } + } + return nongcc, gcc + } + sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles) + } else { + for _, sfile := range sfiles { + data, err := ioutil.ReadFile(filepath.Join(a.Package.Dir, sfile)) + if err == nil { + if bytes.HasPrefix(data, []byte("TEXT")) || bytes.Contains(data, []byte("\nTEXT")) || + bytes.HasPrefix(data, []byte("DATA")) || bytes.Contains(data, []byte("\nDATA")) || + bytes.HasPrefix(data, []byte("GLOBL")) || bytes.Contains(data, []byte("\nGLOBL")) { + return fmt.Errorf("package using cgo has Go assembly file %s", sfile) + } + } + } + gccfiles = append(gccfiles, sfiles...) + sfiles = nil + } + + outGo, outObj, err := b.cgo(a, base.Tool("cgo"), objdir, pcCFLAGS, pcLDFLAGS, mkAbsFiles(a.Package.Dir, cgofiles), gccfiles, cxxfiles, a.Package.MFiles, a.Package.FFiles) + if err != nil { + return err + } + if cfg.BuildToolchainName == "gccgo" { + cgoObjects = append(cgoObjects, a.Objdir+"_cgo_flags") + } + cgoObjects = append(cgoObjects, outObj...) + gofiles = append(gofiles, outGo...) + } + if cached && !a.needVet { + return nil + } + + // Sanity check only, since Package.load already checked as well. + if len(gofiles) == 0 { + return &load.NoGoError{Package: a.Package} + } + + // Prepare Go vet config if needed. + var vcfg *vetConfig + if a.needVet { + // Pass list of absolute paths to vet, + // so that vet's error messages will use absolute paths, + // so that we can reformat them relative to the directory + // in which the go command is invoked. + vcfg = &vetConfig{ + Compiler: cfg.BuildToolchainName, + Dir: a.Package.Dir, + GoFiles: mkAbsFiles(a.Package.Dir, gofiles), + ImportMap: make(map[string]string), + PackageFile: make(map[string]string), + } + a.vetCfg = vcfg + for i, raw := range a.Package.Internal.RawImports { + final := a.Package.Imports[i] + vcfg.ImportMap[raw] = final + } + } + + // Prepare Go import config. + var icfg bytes.Buffer + for i, raw := range a.Package.Internal.RawImports { + final := a.Package.Imports[i] + if final != raw { + fmt.Fprintf(&icfg, "importmap %s=%s\n", raw, final) + } + } + + // Compute the list of mapped imports in the vet config + // so that we can add any missing mappings below. + var vcfgMapped map[string]bool + if vcfg != nil { + vcfgMapped = make(map[string]bool) + for _, p := range vcfg.ImportMap { + vcfgMapped[p] = true + } + } + + for _, a1 := range a.Deps { + p1 := a1.Package + if p1 == nil || p1.ImportPath == "" || a1.built == "" { + continue + } + fmt.Fprintf(&icfg, "packagefile %s=%s\n", p1.ImportPath, a1.built) + if vcfg != nil { + // Add import mapping if needed + // (for imports like "runtime/cgo" that appear only in generated code). + if !vcfgMapped[p1.ImportPath] { + vcfg.ImportMap[p1.ImportPath] = p1.ImportPath + } + vcfg.PackageFile[p1.ImportPath] = a1.built + } + } + + if cached { + // The cached package file is OK, so we don't need to run the compile. + // We've only going through the motions to prepare the vet configuration, + // which is now complete. + return nil + } + + // Compile Go. + objpkg := objdir + "_pkg_.a" + ofile, out, err := BuildToolchain.gc(b, a, objpkg, icfg.Bytes(), len(sfiles) > 0, gofiles) + if len(out) > 0 { + b.showOutput(a, a.Package.Dir, a.Package.ImportPath, b.processOutput(out)) + if err != nil { + return errPrintedOutput + } + } + if err != nil { + return err + } + if ofile != objpkg { + objects = append(objects, ofile) + } + + // Copy .h files named for goos or goarch or goos_goarch + // to names using GOOS and GOARCH. + // For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h. + _goos_goarch := "_" + cfg.Goos + "_" + cfg.Goarch + _goos := "_" + cfg.Goos + _goarch := "_" + cfg.Goarch + for _, file := range a.Package.HFiles { + name, ext := fileExtSplit(file) + switch { + case strings.HasSuffix(name, _goos_goarch): + targ := file[:len(name)-len(_goos_goarch)] + "_GOOS_GOARCH." + ext + if err := b.copyFile(a, objdir+targ, filepath.Join(a.Package.Dir, file), 0666, true); err != nil { + return err + } + case strings.HasSuffix(name, _goarch): + targ := file[:len(name)-len(_goarch)] + "_GOARCH." + ext + if err := b.copyFile(a, objdir+targ, filepath.Join(a.Package.Dir, file), 0666, true); err != nil { + return err + } + case strings.HasSuffix(name, _goos): + targ := file[:len(name)-len(_goos)] + "_GOOS." + ext + if err := b.copyFile(a, objdir+targ, filepath.Join(a.Package.Dir, file), 0666, true); err != nil { + return err + } + } + } + + for _, file := range cfiles { + out := file[:len(file)-len(".c")] + ".o" + if err := BuildToolchain.cc(b, a, objdir+out, file); err != nil { + return err + } + objects = append(objects, out) + } + + // Assemble .s files. + if len(sfiles) > 0 { + ofiles, err := BuildToolchain.asm(b, a, sfiles) + if err != nil { + return err + } + objects = append(objects, ofiles...) + } + + // NOTE(rsc): On Windows, it is critically important that the + // gcc-compiled objects (cgoObjects) be listed after the ordinary + // objects in the archive. I do not know why this is. + // https://golang.org/issue/2601 + objects = append(objects, cgoObjects...) + + // Add system object files. + for _, syso := range a.Package.SysoFiles { + objects = append(objects, filepath.Join(a.Package.Dir, syso)) + } + + // Pack into archive in objdir directory. + // If the Go compiler wrote an archive, we only need to add the + // object files for non-Go sources to the archive. + // If the Go compiler wrote an archive and the package is entirely + // Go sources, there is no pack to execute at all. + if len(objects) > 0 { + if err := BuildToolchain.pack(b, a, objpkg, objects); err != nil { + return err + } + } + + if err := b.updateBuildID(a, objpkg, true); err != nil { + return err + } + + a.built = objpkg + return nil +} + +type vetConfig struct { + Compiler string + Dir string + GoFiles []string + ImportMap map[string]string + PackageFile map[string]string + + SucceedOnTypecheckFailure bool +} + +// VetFlags are the flags to pass to vet. +// The caller is expected to set them before executing any vet actions. +var VetFlags []string + +func (b *Builder) vet(a *Action) error { + // a.Deps[0] is the build of the package being vetted. + // a.Deps[1] is the build of the "fmt" package. + + vcfg := a.Deps[0].vetCfg + if vcfg == nil { + // Vet config should only be missing if the build failed. + if !a.Deps[0].Failed { + return fmt.Errorf("vet config not found") + } + return nil + } + + if vcfg.ImportMap["fmt"] == "" { + a1 := a.Deps[1] + vcfg.ImportMap["fmt"] = "fmt" + vcfg.PackageFile["fmt"] = a1.built + } + + // During go test, ignore type-checking failures during vet. + // We only run vet if the compilation has succeeded, + // so at least for now assume the bug is in vet. + // We know of at least #18395. + // TODO(rsc,gri): Try to remove this for Go 1.11. + vcfg.SucceedOnTypecheckFailure = cfg.CmdName == "test" + + js, err := json.MarshalIndent(vcfg, "", "\t") + if err != nil { + return fmt.Errorf("internal error marshaling vet config: %v", err) + } + js = append(js, '\n') + if err := b.writeFile(a.Objdir+"vet.cfg", js); err != nil { + return err + } + + p := a.Package + return b.run(a, p.Dir, p.ImportPath, nil, cfg.BuildToolexec, base.Tool("vet"), VetFlags, a.Objdir+"vet.cfg") +} + +// linkActionID computes the action ID for a link action. +func (b *Builder) linkActionID(a *Action) cache.ActionID { + p := a.Package + h := cache.NewHash("link " + p.ImportPath) + + // Toolchain-independent configuration. + fmt.Fprintf(h, "link\n") + fmt.Fprintf(h, "buildmode %s goos %s goarch %s\n", cfg.BuildBuildmode, cfg.Goos, cfg.Goarch) + fmt.Fprintf(h, "import %q\n", p.ImportPath) + fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix) + + // Toolchain-dependent configuration, shared with b.linkSharedActionID. + b.printLinkerConfig(h, p) + + // Input files. + for _, a1 := range a.Deps { + p1 := a1.Package + if p1 != nil { + if a1.built != "" || a1.buildID != "" { + buildID := a1.buildID + if buildID == "" { + buildID = b.buildID(a1.built) + } + fmt.Fprintf(h, "packagefile %s=%s\n", p1.ImportPath, contentID(buildID)) + } + // Because we put package main's full action ID into the binary's build ID, + // we must also put the full action ID into the binary's action ID hash. + if p1.Name == "main" { + fmt.Fprintf(h, "packagemain %s\n", a1.buildID) + } + if p1.Shlib != "" { + fmt.Fprintf(h, "packageshlib %s=%s\n", p1.ImportPath, contentID(b.buildID(p1.Shlib))) + } + } + } + + return h.Sum() +} + +// printLinkerConfig prints the linker config into the hash h, +// as part of the computation of a linker-related action ID. +func (b *Builder) printLinkerConfig(h io.Writer, p *load.Package) { + switch cfg.BuildToolchainName { + default: + base.Fatalf("linkActionID: unknown toolchain %q", cfg.BuildToolchainName) + + case "gc": + fmt.Fprintf(h, "link %s %q %s\n", b.toolID("link"), forcedLdflags, ldBuildmode) + if p != nil { + fmt.Fprintf(h, "linkflags %q\n", p.Internal.Ldflags) + } + fmt.Fprintf(h, "GO$GOARCH=%s\n", os.Getenv("GO"+strings.ToUpper(cfg.BuildContext.GOARCH))) // GO386, GOARM, etc + + /* + // TODO(rsc): Enable this code. + // golang.org/issue/22475. + goroot := cfg.BuildContext.GOROOT + if final := os.Getenv("GOROOT_FINAL"); final != "" { + goroot = final + } + fmt.Fprintf(h, "GOROOT=%s\n", goroot) + */ + + // TODO(rsc): Convince linker team not to add more magic environment variables, + // or perhaps restrict the environment variables passed to subprocesses. + magic := []string{ + "GO_EXTLINK_ENABLED", + } + for _, env := range magic { + if x := os.Getenv(env); x != "" { + fmt.Fprintf(h, "magic %s=%s\n", env, x) + } + } + + // TODO(rsc): Do cgo settings and flags need to be included? + // Or external linker settings and flags? + } +} + +// link is the action for linking a single command. +// Note that any new influence on this logic must be reported in b.linkActionID above as well. +func (b *Builder) link(a *Action) (err error) { + if b.useCache(a, a.Package, b.linkActionID(a), a.Package.Target) { + return nil + } + defer b.flushOutput(a) + + if err := b.Mkdir(a.Objdir); err != nil { + return err + } + + importcfg := a.Objdir + "importcfg.link" + if err := b.writeLinkImportcfg(a, importcfg); err != nil { + return err + } + + // make target directory + dir, _ := filepath.Split(a.Target) + if dir != "" { + if err := b.Mkdir(dir); err != nil { + return err + } + } + + if err := BuildToolchain.ld(b, a, a.Target, importcfg, a.Deps[0].built); err != nil { + return err + } + + // Update the binary with the final build ID. + // But if OmitDebug is set, don't rewrite the binary, because we set OmitDebug + // on binaries that we are going to run and then delete. + // There's no point in doing work on such a binary. + // Worse, opening the binary for write here makes it + // essentially impossible to safely fork+exec due to a fundamental + // incompatibility between ETXTBSY and threads on modern Unix systems. + // See golang.org/issue/22220. + // We still call updateBuildID to update a.buildID, which is important + // for test result caching, but passing rewrite=false (final arg) + // means we don't actually rewrite the binary, nor store the + // result into the cache. + // Not calling updateBuildID means we also don't insert these + // binaries into the build object cache. That's probably a net win: + // less cache space wasted on large binaries we are not likely to + // need again. (On the other hand it does make repeated go test slower.) + if err := b.updateBuildID(a, a.Target, !a.Package.Internal.OmitDebug); err != nil { + return err + } + + a.built = a.Target + return nil +} + +func (b *Builder) writeLinkImportcfg(a *Action, file string) error { + // Prepare Go import cfg. + var icfg bytes.Buffer + for _, a1 := range a.Deps { + p1 := a1.Package + if p1 == nil { + continue + } + fmt.Fprintf(&icfg, "packagefile %s=%s\n", p1.ImportPath, a1.built) + if p1.Shlib != "" { + fmt.Fprintf(&icfg, "packageshlib %s=%s\n", p1.ImportPath, p1.Shlib) + } + } + return b.writeFile(file, icfg.Bytes()) +} + +// PkgconfigCmd returns a pkg-config binary name +// defaultPkgConfig is defined in zdefaultcc.go, written by cmd/dist. +func (b *Builder) PkgconfigCmd() string { + return envList("PKG_CONFIG", cfg.DefaultPkgConfig)[0] +} + +// splitPkgConfigOutput parses the pkg-config output into a slice of +// flags. pkg-config always uses \ to escape special characters. +func splitPkgConfigOutput(out []byte) []string { + if len(out) == 0 { + return nil + } + var flags []string + flag := make([]byte, len(out)) + r, w := 0, 0 + for r < len(out) { + switch out[r] { + case ' ', '\t', '\r', '\n': + if w > 0 { + flags = append(flags, string(flag[:w])) + } + w = 0 + case '\\': + r++ + fallthrough + default: + if r < len(out) { + flag[w] = out[r] + w++ + } + } + r++ + } + if w > 0 { + flags = append(flags, string(flag[:w])) + } + return flags +} + +// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package. +func (b *Builder) getPkgConfigFlags(p *load.Package) (cflags, ldflags []string, err error) { + if pkgs := p.CgoPkgConfig; len(pkgs) > 0 { + var out []byte + out, err = b.runOut(p.Dir, p.ImportPath, nil, b.PkgconfigCmd(), "--cflags", pkgs) + if err != nil { + b.showOutput(nil, p.Dir, b.PkgconfigCmd()+" --cflags "+strings.Join(pkgs, " "), string(out)) + b.Print(err.Error() + "\n") + err = errPrintedOutput + return + } + if len(out) > 0 { + cflags = splitPkgConfigOutput(out) + } + out, err = b.runOut(p.Dir, p.ImportPath, nil, b.PkgconfigCmd(), "--libs", pkgs) + if err != nil { + b.showOutput(nil, p.Dir, b.PkgconfigCmd()+" --libs "+strings.Join(pkgs, " "), string(out)) + b.Print(err.Error() + "\n") + err = errPrintedOutput + return + } + if len(out) > 0 { + ldflags = strings.Fields(string(out)) + } + } + return +} + +func (b *Builder) installShlibname(a *Action) error { + // TODO: BuildN + a1 := a.Deps[0] + err := ioutil.WriteFile(a.Target, []byte(filepath.Base(a1.Target)+"\n"), 0666) + if err != nil { + return err + } + if cfg.BuildX { + b.Showcmd("", "echo '%s' > %s # internal", filepath.Base(a1.Target), a.Target) + } + return nil +} + +func (b *Builder) linkSharedActionID(a *Action) cache.ActionID { + h := cache.NewHash("linkShared") + + // Toolchain-independent configuration. + fmt.Fprintf(h, "linkShared\n") + fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch) + + // Toolchain-dependent configuration, shared with b.linkActionID. + b.printLinkerConfig(h, nil) + + // Input files. + for _, a1 := range a.Deps { + p1 := a1.Package + if a1.built == "" { + continue + } + if p1 != nil { + fmt.Fprintf(h, "packagefile %s=%s\n", p1.ImportPath, contentID(b.buildID(a1.built))) + if p1.Shlib != "" { + fmt.Fprintf(h, "packageshlib %s=%s\n", p1.ImportPath, contentID(b.buildID(p1.Shlib))) + } + } + } + // Files named on command line are special. + for _, a1 := range a.Deps[0].Deps { + p1 := a1.Package + fmt.Fprintf(h, "top %s=%s\n", p1.ImportPath, contentID(b.buildID(a1.built))) + } + + return h.Sum() +} + +func (b *Builder) linkShared(a *Action) (err error) { + if b.useCache(a, nil, b.linkSharedActionID(a), a.Target) { + return nil + } + defer b.flushOutput(a) + + if err := b.Mkdir(a.Objdir); err != nil { + return err + } + + importcfg := a.Objdir + "importcfg.link" + if err := b.writeLinkImportcfg(a, importcfg); err != nil { + return err + } + + // TODO(rsc): There is a missing updateBuildID here, + // but we have to decide where to store the build ID in these files. + a.built = a.Target + return BuildToolchain.ldShared(b, a, a.Deps[0].Deps, a.Target, importcfg, a.Deps) +} + +// BuildInstallFunc is the action for installing a single package or executable. +func BuildInstallFunc(b *Builder, a *Action) (err error) { + defer func() { + if err != nil && err != errPrintedOutput { + // a.Package == nil is possible for the go install -buildmode=shared + // action that installs libmangledname.so, which corresponds to + // a list of packages, not just one. + sep, path := "", "" + if a.Package != nil { + sep, path = " ", a.Package.ImportPath + } + err = fmt.Errorf("go %s%s%s: %v", cfg.CmdName, sep, path, err) + } + }() + + a1 := a.Deps[0] + a.buildID = a1.buildID + + // If we are using the eventual install target as an up-to-date + // cached copy of the thing we built, then there's no need to + // copy it into itself (and that would probably fail anyway). + // In this case a1.built == a.Target because a1.built == p.Target, + // so the built target is not in the a1.Objdir tree that b.cleanup(a1) removes. + if a1.built == a.Target { + a.built = a.Target + b.cleanup(a1) + // Whether we're smart enough to avoid a complete rebuild + // depends on exactly what the staleness and rebuild algorithms + // are, as well as potentially the state of the Go build cache. + // We don't really want users to be able to infer (or worse start depending on) + // those details from whether the modification time changes during + // "go install", so do a best-effort update of the file times to make it + // look like we rewrote a.Target even if we did not. Updating the mtime + // may also help other mtime-based systems that depend on our + // previous mtime updates that happened more often. + // This is still not perfect - we ignore the error result, and if the file was + // unwritable for some reason then pretending to have written it is also + // confusing - but it's probably better than not doing the mtime update. + // + // But don't do that for the special case where building an executable + // with -linkshared implicitly installs all its dependent libraries. + // We want to hide that awful detail as much as possible, so don't + // advertise it by touching the mtimes (usually the libraries are up + // to date). + if !a.buggyInstall { + now := time.Now() + os.Chtimes(a.Target, now, now) + } + return nil + } + if b.ComputeStaleOnly { + return nil + } + + if err := b.Mkdir(a.Objdir); err != nil { + return err + } + + perm := os.FileMode(0666) + if a1.Mode == "link" { + switch cfg.BuildBuildmode { + case "c-archive", "c-shared", "plugin": + default: + perm = 0777 + } + } + + // make target directory + dir, _ := filepath.Split(a.Target) + if dir != "" { + if err := b.Mkdir(dir); err != nil { + return err + } + } + + defer b.cleanup(a1) + + return b.moveOrCopyFile(a, a.Target, a1.built, perm, false) +} + +// cleanup removes a's object dir to keep the amount of +// on-disk garbage down in a large build. On an operating system +// with aggressive buffering, cleaning incrementally like +// this keeps the intermediate objects from hitting the disk. +func (b *Builder) cleanup(a *Action) { + if !cfg.BuildWork { + if cfg.BuildX { + b.Showcmd("", "rm -r %s", a.Objdir) + } + os.RemoveAll(a.Objdir) + } +} + +// moveOrCopyFile is like 'mv src dst' or 'cp src dst'. +func (b *Builder) moveOrCopyFile(a *Action, dst, src string, perm os.FileMode, force bool) error { + if cfg.BuildN { + b.Showcmd("", "mv %s %s", src, dst) + return nil + } + + // If we can update the mode and rename to the dst, do it. + // Otherwise fall back to standard copy. + + // If the source is in the build cache, we need to copy it. + if strings.HasPrefix(src, cache.DefaultDir()) { + return b.copyFile(a, dst, src, perm, force) + } + + // On Windows, always copy the file, so that we respect the NTFS + // permissions of the parent folder. https://golang.org/issue/22343. + // What matters here is not cfg.Goos (the system we are building + // for) but runtime.GOOS (the system we are building on). + if runtime.GOOS == "windows" { + return b.copyFile(a, dst, src, perm, force) + } + + // If the destination directory has the group sticky bit set, + // we have to copy the file to retain the correct permissions. + // https://golang.org/issue/18878 + if fi, err := os.Stat(filepath.Dir(dst)); err == nil { + if fi.IsDir() && (fi.Mode()&os.ModeSetgid) != 0 { + return b.copyFile(a, dst, src, perm, force) + } + } + + // The perm argument is meant to be adjusted according to umask, + // but we don't know what the umask is. + // Create a dummy file to find out. + // This avoids build tags and works even on systems like Plan 9 + // where the file mask computation incorporates other information. + mode := perm + f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm) + if err == nil { + fi, err := f.Stat() + if err == nil { + mode = fi.Mode() & 0777 + } + name := f.Name() + f.Close() + os.Remove(name) + } + + if err := os.Chmod(src, mode); err == nil { + if err := os.Rename(src, dst); err == nil { + if cfg.BuildX { + b.Showcmd("", "mv %s %s", src, dst) + } + return nil + } + } + + return b.copyFile(a, dst, src, perm, force) +} + +// copyFile is like 'cp src dst'. +func (b *Builder) copyFile(a *Action, dst, src string, perm os.FileMode, force bool) error { + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "cp %s %s", src, dst) + if cfg.BuildN { + return nil + } + } + + sf, err := os.Open(src) + if err != nil { + return err + } + defer sf.Close() + + // Be careful about removing/overwriting dst. + // Do not remove/overwrite if dst exists and is a directory + // or a non-object file. + if fi, err := os.Stat(dst); err == nil { + if fi.IsDir() { + return fmt.Errorf("build output %q already exists and is a directory", dst) + } + if !force && fi.Mode().IsRegular() && !isObject(dst) { + return fmt.Errorf("build output %q already exists and is not an object file", dst) + } + } + + // On Windows, remove lingering ~ file from last attempt. + if base.ToolIsWindows { + if _, err := os.Stat(dst + "~"); err == nil { + os.Remove(dst + "~") + } + } + + mayberemovefile(dst) + df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil && base.ToolIsWindows { + // Windows does not allow deletion of a binary file + // while it is executing. Try to move it out of the way. + // If the move fails, which is likely, we'll try again the + // next time we do an install of this binary. + if err := os.Rename(dst, dst+"~"); err == nil { + os.Remove(dst + "~") + } + df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + } + if err != nil { + return err + } + + _, err = io.Copy(df, sf) + df.Close() + if err != nil { + mayberemovefile(dst) + return fmt.Errorf("copying %s to %s: %v", src, dst, err) + } + return nil +} + +// writeFile writes the text to file. +func (b *Builder) writeFile(file string, text []byte) error { + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "cat >%s << 'EOF' # internal\n%sEOF", file, text) + } + if cfg.BuildN { + return nil + } + return ioutil.WriteFile(file, text, 0666) +} + +// Install the cgo export header file, if there is one. +func (b *Builder) installHeader(a *Action) error { + src := a.Objdir + "_cgo_install.h" + if _, err := os.Stat(src); os.IsNotExist(err) { + // If the file does not exist, there are no exported + // functions, and we do not install anything. + // TODO(rsc): Once we know that caching is rebuilding + // at the right times (not missing rebuilds), here we should + // probably delete the installed header, if any. + if cfg.BuildX { + b.Showcmd("", "# %s not created", src) + } + return nil + } + + dir, _ := filepath.Split(a.Target) + if dir != "" { + if err := b.Mkdir(dir); err != nil { + return err + } + } + + return b.moveOrCopyFile(a, a.Target, src, 0666, true) +} + +// cover runs, in effect, +// go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go +func (b *Builder) cover(a *Action, dst, src string, perm os.FileMode, varName string) error { + return b.run(a, a.Objdir, "cover "+a.Package.ImportPath, nil, + cfg.BuildToolexec, + base.Tool("cover"), + "-mode", a.Package.Internal.CoverMode, + "-var", varName, + "-o", dst, + src) +} + +var objectMagic = [][]byte{ + {'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive + {'\x7F', 'E', 'L', 'F'}, // ELF + {0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit + {0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit + {0xCE, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 32-bit + {0xCF, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 64-bit + {0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00}, // PE (Windows) as generated by 6l/8l and gcc + {0x00, 0x00, 0x01, 0xEB}, // Plan 9 i386 + {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64 + {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm +} + +func isObject(s string) bool { + f, err := os.Open(s) + if err != nil { + return false + } + defer f.Close() + buf := make([]byte, 64) + io.ReadFull(f, buf) + for _, magic := range objectMagic { + if bytes.HasPrefix(buf, magic) { + return true + } + } + return false +} + +// mayberemovefile removes a file only if it is a regular file +// When running as a user with sufficient privileges, we may delete +// even device files, for example, which is not intended. +func mayberemovefile(s string) { + if fi, err := os.Lstat(s); err == nil && !fi.Mode().IsRegular() { + return + } + os.Remove(s) +} + +// fmtcmd formats a command in the manner of fmt.Sprintf but also: +// +// If dir is non-empty and the script is not in dir right now, +// fmtcmd inserts "cd dir\n" before the command. +// +// fmtcmd replaces the value of b.WorkDir with $WORK. +// fmtcmd replaces the value of goroot with $GOROOT. +// fmtcmd replaces the value of b.gobin with $GOBIN. +// +// fmtcmd replaces the name of the current directory with dot (.) +// but only when it is at the beginning of a space-separated token. +// +func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string { + cmd := fmt.Sprintf(format, args...) + if dir != "" && dir != "/" { + cmd = strings.Replace(" "+cmd, " "+dir, " .", -1)[1:] + if b.scriptDir != dir { + b.scriptDir = dir + cmd = "cd " + dir + "\n" + cmd + } + } + if b.WorkDir != "" { + cmd = strings.Replace(cmd, b.WorkDir, "$WORK", -1) + } + return cmd +} + +// showcmd prints the given command to standard output +// for the implementation of -n or -x. +func (b *Builder) Showcmd(dir string, format string, args ...interface{}) { + b.output.Lock() + defer b.output.Unlock() + b.Print(b.fmtcmd(dir, format, args...) + "\n") +} + +// showOutput prints "# desc" followed by the given output. +// The output is expected to contain references to 'dir', usually +// the source directory for the package that has failed to build. +// showOutput rewrites mentions of dir with a relative path to dir +// when the relative path is shorter. This is usually more pleasant. +// For example, if fmt doesn't compile and we are in src/html, +// the output is +// +// $ go build +// # fmt +// ../fmt/print.go:1090: undefined: asdf +// $ +// +// instead of +// +// $ go build +// # fmt +// /usr/gopher/go/src/fmt/print.go:1090: undefined: asdf +// $ +// +// showOutput also replaces references to the work directory with $WORK. +// +// If a is not nil and a.output is not nil, showOutput appends to that slice instead of +// printing to b.Print. +// +func (b *Builder) showOutput(a *Action, dir, desc, out string) { + prefix := "# " + desc + suffix := "\n" + out + if reldir := base.ShortPath(dir); reldir != dir { + suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1) + suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1) + } + suffix = strings.Replace(suffix, " "+b.WorkDir, " $WORK", -1) + + if a != nil && a.output != nil { + a.output = append(a.output, prefix...) + a.output = append(a.output, suffix...) + return + } + + b.output.Lock() + defer b.output.Unlock() + b.Print(prefix, suffix) +} + +// errPrintedOutput is a special error indicating that a command failed +// but that it generated output as well, and that output has already +// been printed, so there's no point showing 'exit status 1' or whatever +// the wait status was. The main executor, builder.do, knows not to +// print this error. +var errPrintedOutput = errors.New("already printed output - no need to show error") + +var cgoLine = regexp.MustCompile(`\[[^\[\]]+\.(cgo1|cover)\.go:[0-9]+(:[0-9]+)?\]`) +var cgoTypeSigRe = regexp.MustCompile(`\b_C2?(type|func|var|macro)_\B`) + +// run runs the command given by cmdline in the directory dir. +// If the command fails, run prints information about the failure +// and returns a non-nil error. +func (b *Builder) run(a *Action, dir string, desc string, env []string, cmdargs ...interface{}) error { + out, err := b.runOut(dir, desc, env, cmdargs...) + if len(out) > 0 { + if desc == "" { + desc = b.fmtcmd(dir, "%s", strings.Join(str.StringList(cmdargs...), " ")) + } + b.showOutput(a, dir, desc, b.processOutput(out)) + if err != nil { + err = errPrintedOutput + } + } + return err +} + +// processOutput prepares the output of runOut to be output to the console. +func (b *Builder) processOutput(out []byte) string { + if out[len(out)-1] != '\n' { + out = append(out, '\n') + } + messages := string(out) + // Fix up output referring to cgo-generated code to be more readable. + // Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19. + // Replace *[100]_Ctype_foo with *[100]C.foo. + // If we're using -x, assume we're debugging and want the full dump, so disable the rewrite. + if !cfg.BuildX && cgoLine.MatchString(messages) { + messages = cgoLine.ReplaceAllString(messages, "") + messages = cgoTypeSigRe.ReplaceAllString(messages, "C.") + } + return messages +} + +// runOut runs the command given by cmdline in the directory dir. +// It returns the command output and any errors that occurred. +func (b *Builder) runOut(dir string, desc string, env []string, cmdargs ...interface{}) ([]byte, error) { + cmdline := str.StringList(cmdargs...) + if cfg.BuildN || cfg.BuildX { + var envcmdline string + for _, e := range env { + if j := strings.IndexByte(e, '='); j != -1 { + if strings.ContainsRune(e[j+1:], '\'') { + envcmdline += fmt.Sprintf("%s=%q", e[:j], e[j+1:]) + } else { + envcmdline += fmt.Sprintf("%s='%s'", e[:j], e[j+1:]) + } + envcmdline += " " + } + } + envcmdline += joinUnambiguously(cmdline) + b.Showcmd(dir, "%s", envcmdline) + if cfg.BuildN { + return nil, nil + } + } + + var buf bytes.Buffer + cmd := exec.Command(cmdline[0], cmdline[1:]...) + cmd.Stdout = &buf + cmd.Stderr = &buf + cmd.Dir = dir + cmd.Env = base.MergeEnvLists(env, base.EnvForDir(cmd.Dir, os.Environ())) + err := cmd.Run() + + // err can be something like 'exit status 1'. + // Add information about what program was running. + // Note that if buf.Bytes() is non-empty, the caller usually + // shows buf.Bytes() and does not print err at all, so the + // prefix here does not make most output any more verbose. + if err != nil { + err = errors.New(cmdline[0] + ": " + err.Error()) + } + return buf.Bytes(), err +} + +// joinUnambiguously prints the slice, quoting where necessary to make the +// output unambiguous. +// TODO: See issue 5279. The printing of commands needs a complete redo. +func joinUnambiguously(a []string) string { + var buf bytes.Buffer + for i, s := range a { + if i > 0 { + buf.WriteByte(' ') + } + q := strconv.Quote(s) + if s == "" || strings.Contains(s, " ") || len(q) > len(s)+2 { + buf.WriteString(q) + } else { + buf.WriteString(s) + } + } + return buf.String() +} + +// mkdir makes the named directory. +func (b *Builder) Mkdir(dir string) error { + // Make Mkdir(a.Objdir) a no-op instead of an error when a.Objdir == "". + if dir == "" { + return nil + } + + b.exec.Lock() + defer b.exec.Unlock() + // We can be a little aggressive about being + // sure directories exist. Skip repeated calls. + if b.mkdirCache[dir] { + return nil + } + b.mkdirCache[dir] = true + + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "mkdir -p %s", dir) + if cfg.BuildN { + return nil + } + } + + if err := os.MkdirAll(dir, 0777); err != nil { + return err + } + return nil +} + +// symlink creates a symlink newname -> oldname. +func (b *Builder) Symlink(oldname, newname string) error { + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "ln -s %s %s", oldname, newname) + if cfg.BuildN { + return nil + } + } + return os.Symlink(oldname, newname) +} + +// mkAbs returns an absolute path corresponding to +// evaluating f in the directory dir. +// We always pass absolute paths of source files so that +// the error messages will include the full path to a file +// in need of attention. +func mkAbs(dir, f string) string { + // Leave absolute paths alone. + // Also, during -n mode we use the pseudo-directory $WORK + // instead of creating an actual work directory that won't be used. + // Leave paths beginning with $WORK alone too. + if filepath.IsAbs(f) || strings.HasPrefix(f, "$WORK") { + return f + } + return filepath.Join(dir, f) +} + +type toolchain interface { + // gc runs the compiler in a specific directory on a set of files + // and returns the name of the generated output file. + gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) + // cc runs the toolchain's C compiler in a directory on a C file + // to produce an output file. + cc(b *Builder, a *Action, ofile, cfile string) error + // asm runs the assembler in a specific directory on specific files + // and returns a list of named output files. + asm(b *Builder, a *Action, sfiles []string) ([]string, error) + // pack runs the archive packer in a specific directory to create + // an archive from a set of object files. + // typically it is run in the object directory. + pack(b *Builder, a *Action, afile string, ofiles []string) error + // ld runs the linker to create an executable starting at mainpkg. + ld(b *Builder, root *Action, out, importcfg, mainpkg string) error + // ldShared runs the linker to create a shared library containing the pkgs built by toplevelactions + ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error + + compiler() string + linker() string +} + +type noToolchain struct{} + +func noCompiler() error { + log.Fatalf("unknown compiler %q", cfg.BuildContext.Compiler) + return nil +} + +func (noToolchain) compiler() string { + noCompiler() + return "" +} + +func (noToolchain) linker() string { + noCompiler() + return "" +} + +func (noToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) { + return "", nil, noCompiler() +} + +func (noToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) { + return nil, noCompiler() +} + +func (noToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { + return noCompiler() +} + +func (noToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error { + return noCompiler() +} + +func (noToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error { + return noCompiler() +} + +func (noToolchain) cc(b *Builder, a *Action, ofile, cfile string) error { + return noCompiler() +} + +// gcc runs the gcc C compiler to create an object from a single C file. +func (b *Builder) gcc(a *Action, p *load.Package, workdir, out string, flags []string, cfile string) error { + return b.ccompile(a, p, out, flags, cfile, b.GccCmd(p.Dir, workdir)) +} + +// gxx runs the g++ C++ compiler to create an object from a single C++ file. +func (b *Builder) gxx(a *Action, p *load.Package, workdir, out string, flags []string, cxxfile string) error { + return b.ccompile(a, p, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir)) +} + +// gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file. +func (b *Builder) gfortran(a *Action, p *load.Package, workdir, out string, flags []string, ffile string) error { + return b.ccompile(a, p, out, flags, ffile, b.gfortranCmd(p.Dir, workdir)) +} + +// ccompile runs the given C or C++ compiler and creates an object from a single source file. +func (b *Builder) ccompile(a *Action, p *load.Package, outfile string, flags []string, file string, compiler []string) error { + file = mkAbs(p.Dir, file) + desc := p.ImportPath + if !filepath.IsAbs(outfile) { + outfile = filepath.Join(p.Dir, outfile) + } + output, err := b.runOut(filepath.Dir(file), desc, nil, compiler, flags, "-o", outfile, "-c", filepath.Base(file)) + if len(output) > 0 { + // On FreeBSD 11, when we pass -g to clang 3.8 it + // invokes its internal assembler with -dwarf-version=2. + // When it sees .section .note.GNU-stack, it warns + // "DWARF2 only supports one section per compilation unit". + // This warning makes no sense, since the section is empty, + // but it confuses people. + // We work around the problem by detecting the warning + // and dropping -g and trying again. + if bytes.Contains(output, []byte("DWARF2 only supports one section per compilation unit")) { + newFlags := make([]string, 0, len(flags)) + for _, f := range flags { + if !strings.HasPrefix(f, "-g") { + newFlags = append(newFlags, f) + } + } + if len(newFlags) < len(flags) { + return b.ccompile(a, p, outfile, newFlags, file, compiler) + } + } + + b.showOutput(a, p.Dir, desc, b.processOutput(output)) + if err != nil { + err = errPrintedOutput + } else if os.Getenv("GO_BUILDER_NAME") != "" { + return errors.New("C compiler warning promoted to error on Go builders") + } + } + return err +} + +// gccld runs the gcc linker to create an executable from a set of object files. +func (b *Builder) gccld(p *load.Package, objdir, out string, flags []string, objs []string) error { + var cmd []string + if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 { + cmd = b.GxxCmd(p.Dir, objdir) + } else { + cmd = b.GccCmd(p.Dir, objdir) + } + return b.run(nil, p.Dir, p.ImportPath, nil, cmd, "-o", out, objs, flags) +} + +// Grab these before main helpfully overwrites them. +var ( + origCC = os.Getenv("CC") + origCXX = os.Getenv("CXX") +) + +// gccCmd returns a gcc command line prefix +// defaultCC is defined in zdefaultcc.go, written by cmd/dist. +func (b *Builder) GccCmd(incdir, workdir string) []string { + return b.compilerCmd(b.ccExe(), incdir, workdir) +} + +// gxxCmd returns a g++ command line prefix +// defaultCXX is defined in zdefaultcc.go, written by cmd/dist. +func (b *Builder) GxxCmd(incdir, workdir string) []string { + return b.compilerCmd(b.cxxExe(), incdir, workdir) +} + +// gfortranCmd returns a gfortran command line prefix. +func (b *Builder) gfortranCmd(incdir, workdir string) []string { + return b.compilerCmd(b.fcExe(), incdir, workdir) +} + +// ccExe returns the CC compiler setting without all the extra flags we add implicitly. +func (b *Builder) ccExe() []string { + return b.compilerExe(origCC, cfg.DefaultCC(cfg.Goos, cfg.Goarch)) +} + +// cxxExe returns the CXX compiler setting without all the extra flags we add implicitly. +func (b *Builder) cxxExe() []string { + return b.compilerExe(origCXX, cfg.DefaultCXX(cfg.Goos, cfg.Goarch)) +} + +// fcExe returns the FC compiler setting without all the extra flags we add implicitly. +func (b *Builder) fcExe() []string { + return b.compilerExe(os.Getenv("FC"), "gfortran") +} + +// compilerExe returns the compiler to use given an +// environment variable setting (the value not the name) +// and a default. The resulting slice is usually just the name +// of the compiler but can have additional arguments if they +// were present in the environment value. +// For example if CC="gcc -DGOPHER" then the result is ["gcc", "-DGOPHER"]. +func (b *Builder) compilerExe(envValue string, def string) []string { + compiler := strings.Fields(envValue) + if len(compiler) == 0 { + compiler = []string{def} + } + return compiler +} + +// compilerCmd returns a command line prefix for the given environment +// variable and using the default command when the variable is empty. +func (b *Builder) compilerCmd(compiler []string, incdir, workdir string) []string { + // NOTE: env.go's mkEnv knows that the first three + // strings returned are "gcc", "-I", incdir (and cuts them off). + a := []string{compiler[0], "-I", incdir} + a = append(a, compiler[1:]...) + + // Definitely want -fPIC but on Windows gcc complains + // "-fPIC ignored for target (all code is position independent)" + if cfg.Goos != "windows" { + a = append(a, "-fPIC") + } + a = append(a, b.gccArchArgs()...) + // gcc-4.5 and beyond require explicit "-pthread" flag + // for multithreading with pthread library. + if cfg.BuildContext.CgoEnabled { + switch cfg.Goos { + case "windows": + a = append(a, "-mthreads") + default: + a = append(a, "-pthread") + } + } + + // disable ASCII art in clang errors, if possible + if b.gccSupportsFlag(compiler, "-fno-caret-diagnostics") { + a = append(a, "-fno-caret-diagnostics") + } + // clang is too smart about command-line arguments + if b.gccSupportsFlag(compiler, "-Qunused-arguments") { + a = append(a, "-Qunused-arguments") + } + + // disable word wrapping in error messages + a = append(a, "-fmessage-length=0") + + // Tell gcc not to include the work directory in object files. + if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") { + if workdir == "" { + workdir = b.WorkDir + } + workdir = strings.TrimSuffix(workdir, string(filepath.Separator)) + a = append(a, "-fdebug-prefix-map="+workdir+"=/tmp/go-build") + } + + // Tell gcc not to include flags in object files, which defeats the + // point of -fdebug-prefix-map above. + if b.gccSupportsFlag(compiler, "-gno-record-gcc-switches") { + a = append(a, "-gno-record-gcc-switches") + } + + // On OS X, some of the compilers behave as if -fno-common + // is always set, and the Mach-O linker in 6l/8l assumes this. + // See https://golang.org/issue/3253. + if cfg.Goos == "darwin" { + a = append(a, "-fno-common") + } + + return a +} + +// gccNoPie returns the flag to use to request non-PIE. On systems +// with PIE (position independent executables) enabled by default, +// -no-pie must be passed when doing a partial link with -Wl,-r. +// But -no-pie is not supported by all compilers, and clang spells it -nopie. +func (b *Builder) gccNoPie(linker []string) string { + if b.gccSupportsFlag(linker, "-no-pie") { + return "-no-pie" + } + if b.gccSupportsFlag(linker, "-nopie") { + return "-nopie" + } + return "" +} + +// gccSupportsFlag checks to see if the compiler supports a flag. +func (b *Builder) gccSupportsFlag(compiler []string, flag string) bool { + key := [2]string{compiler[0], flag} + + b.exec.Lock() + defer b.exec.Unlock() + if b, ok := b.flagCache[key]; ok { + return b + } + if b.flagCache == nil { + b.flagCache = make(map[[2]string]bool) + } + // We used to write an empty C file, but that gets complicated with + // go build -n. We tried using a file that does not exist, but that + // fails on systems with GCC version 4.2.1; that is the last GPLv2 + // version of GCC, so some systems have frozen on it. + // Now we pass an empty file on stdin, which should work at least for + // GCC and clang. + cmdArgs := str.StringList(compiler, flag, "-c", "-x", "c", "-") + if cfg.BuildN || cfg.BuildX { + b.Showcmd(b.WorkDir, "%s", joinUnambiguously(cmdArgs)) + if cfg.BuildN { + return false + } + } + cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) + cmd.Dir = b.WorkDir + cmd.Env = base.MergeEnvLists([]string{"LC_ALL=C"}, base.EnvForDir(cmd.Dir, os.Environ())) + out, _ := cmd.CombinedOutput() + // GCC says "unrecognized command line option". + // clang says "unknown argument". + // Older versions of GCC say "unrecognised debug output level". + supported := !bytes.Contains(out, []byte("unrecognized")) && + !bytes.Contains(out, []byte("unknown")) && + !bytes.Contains(out, []byte("unrecognised")) + b.flagCache[key] = supported + return supported +} + +// gccArchArgs returns arguments to pass to gcc based on the architecture. +func (b *Builder) gccArchArgs() []string { + switch cfg.Goarch { + case "386": + return []string{"-m32"} + case "amd64", "amd64p32": + return []string{"-m64"} + case "arm": + return []string{"-marm"} // not thumb + case "s390x": + return []string{"-m64", "-march=z196"} + case "mips64", "mips64le": + return []string{"-mabi=64"} + case "mips", "mipsle": + return []string{"-mabi=32", "-march=mips32"} + } + return nil +} + +// envList returns the value of the given environment variable broken +// into fields, using the default value when the variable is empty. +func envList(key, def string) []string { + v := os.Getenv(key) + if v == "" { + v = def + } + return strings.Fields(v) +} + +// CFlags returns the flags to use when invoking the C, C++ or Fortran compilers, or cgo. +func (b *Builder) CFlags(p *load.Package) (cppflags, cflags, cxxflags, fflags, ldflags []string) { + defaults := "-g -O2" + + cppflags = str.StringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) + cflags = str.StringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) + cxxflags = str.StringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) + fflags = str.StringList(envList("CGO_FFLAGS", defaults), p.CgoFFLAGS) + ldflags = str.StringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) + return +} + +var cgoRe = regexp.MustCompile(`[/\\:]`) + +func (b *Builder) cgo(a *Action, cgoExe, objdir string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) { + p := a.Package + cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS := b.CFlags(p) + cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) + cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...) + // If we are compiling Objective-C code, then we need to link against libobjc + if len(mfiles) > 0 { + cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc") + } + + // Likewise for Fortran, except there are many Fortran compilers. + // Support gfortran out of the box and let others pass the correct link options + // via CGO_LDFLAGS + if len(ffiles) > 0 { + fc := os.Getenv("FC") + if fc == "" { + fc = "gfortran" + } + if strings.Contains(fc, "gfortran") { + cgoLDFLAGS = append(cgoLDFLAGS, "-lgfortran") + } + } + + if cfg.BuildMSan { + cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...) + cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...) + } + + // Allows including _cgo_export.h from .[ch] files in the package. + cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", objdir) + + // cgo + // TODO: CGO_FLAGS? + gofiles := []string{objdir + "_cgo_gotypes.go"} + cfiles := []string{"_cgo_export.c"} + for _, fn := range cgofiles { + f := strings.TrimSuffix(filepath.Base(fn), ".go") + gofiles = append(gofiles, objdir+f+".cgo1.go") + cfiles = append(cfiles, f+".cgo2.c") + } + + // TODO: make cgo not depend on $GOARCH? + + cgoflags := []string{} + if p.Standard && p.ImportPath == "runtime/cgo" { + cgoflags = append(cgoflags, "-import_runtime_cgo=false") + } + if p.Standard && (p.ImportPath == "runtime/race" || p.ImportPath == "runtime/msan" || p.ImportPath == "runtime/cgo") { + cgoflags = append(cgoflags, "-import_syscall=false") + } + + // Update $CGO_LDFLAGS with p.CgoLDFLAGS. + var cgoenv []string + if len(cgoLDFLAGS) > 0 { + flags := make([]string, len(cgoLDFLAGS)) + for i, f := range cgoLDFLAGS { + flags[i] = strconv.Quote(f) + } + cgoenv = []string{"CGO_LDFLAGS=" + strings.Join(flags, " ")} + } + + if cfg.BuildToolchainName == "gccgo" { + switch cfg.Goarch { + case "386", "amd64": + cgoCFLAGS = append(cgoCFLAGS, "-fsplit-stack") + } + cgoflags = append(cgoflags, "-gccgo") + if pkgpath := gccgoPkgpath(p); pkgpath != "" { + cgoflags = append(cgoflags, "-gccgopkgpath="+pkgpath) + } + } + + switch cfg.BuildBuildmode { + case "c-archive", "c-shared": + // Tell cgo that if there are any exported functions + // it should generate a header file that C code can + // #include. + cgoflags = append(cgoflags, "-exportheader="+objdir+"_cgo_install.h") + } + + if err := b.run(a, p.Dir, p.ImportPath, cgoenv, cfg.BuildToolexec, cgoExe, "-objdir", objdir, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoCFLAGS, cgofiles); err != nil { + return nil, nil, err + } + outGo = append(outGo, gofiles...) + + // Use sequential object file names to keep them distinct + // and short enough to fit in the .a header file name slots. + // We no longer collect them all into _all.o, and we'd like + // tools to see both the .o suffix and unique names, so + // we need to make them short enough not to be truncated + // in the final archive. + oseq := 0 + nextOfile := func() string { + oseq++ + return objdir + fmt.Sprintf("_x%03d.o", oseq) + } + + // gcc + cflags := str.StringList(cgoCPPFLAGS, cgoCFLAGS) + for _, cfile := range cfiles { + ofile := nextOfile() + if err := b.gcc(a, p, a.Objdir, ofile, cflags, objdir+cfile); err != nil { + return nil, nil, err + } + outObj = append(outObj, ofile) + } + + for _, file := range gccfiles { + ofile := nextOfile() + if err := b.gcc(a, p, a.Objdir, ofile, cflags, file); err != nil { + return nil, nil, err + } + outObj = append(outObj, ofile) + } + + cxxflags := str.StringList(cgoCPPFLAGS, cgoCXXFLAGS) + for _, file := range gxxfiles { + ofile := nextOfile() + if err := b.gxx(a, p, a.Objdir, ofile, cxxflags, file); err != nil { + return nil, nil, err + } + outObj = append(outObj, ofile) + } + + for _, file := range mfiles { + ofile := nextOfile() + if err := b.gcc(a, p, a.Objdir, ofile, cflags, file); err != nil { + return nil, nil, err + } + outObj = append(outObj, ofile) + } + + fflags := str.StringList(cgoCPPFLAGS, cgoFFLAGS) + for _, file := range ffiles { + ofile := nextOfile() + if err := b.gfortran(a, p, a.Objdir, ofile, fflags, file); err != nil { + return nil, nil, err + } + outObj = append(outObj, ofile) + } + + switch cfg.BuildToolchainName { + case "gc": + importGo := objdir + "_cgo_import.go" + if err := b.dynimport(a, p, objdir, importGo, cgoExe, cflags, cgoLDFLAGS, outObj); err != nil { + return nil, nil, err + } + outGo = append(outGo, importGo) + + case "gccgo": + defunC := objdir + "_cgo_defun.c" + defunObj := objdir + "_cgo_defun.o" + if err := BuildToolchain.cc(b, a, defunObj, defunC); err != nil { + return nil, nil, err + } + outObj = append(outObj, defunObj) + + default: + noCompiler() + } + + return outGo, outObj, nil +} + +// dynimport creates a Go source file named importGo containing +// //go:cgo_import_dynamic directives for each symbol or library +// dynamically imported by the object files outObj. +func (b *Builder) dynimport(a *Action, p *load.Package, objdir, importGo, cgoExe string, cflags, cgoLDFLAGS, outObj []string) error { + cfile := objdir + "_cgo_main.c" + ofile := objdir + "_cgo_main.o" + if err := b.gcc(a, p, objdir, ofile, cflags, cfile); err != nil { + return err + } + + linkobj := str.StringList(ofile, outObj, p.SysoFiles) + dynobj := objdir + "_cgo_.o" + + // we need to use -pie for Linux/ARM to get accurate imported sym + ldflags := cgoLDFLAGS + if (cfg.Goarch == "arm" && cfg.Goos == "linux") || cfg.Goos == "android" { + ldflags = append(ldflags, "-pie") + } + if err := b.gccld(p, objdir, dynobj, ldflags, linkobj); err != nil { + return err + } + + // cgo -dynimport + var cgoflags []string + if p.Standard && p.ImportPath == "runtime/cgo" { + cgoflags = []string{"-dynlinker"} // record path to dynamic linker + } + return b.run(a, p.Dir, p.ImportPath, nil, cfg.BuildToolexec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags) +} + +// Run SWIG on all SWIG input files. +// TODO: Don't build a shared library, once SWIG emits the necessary +// pragmas for external linking. +func (b *Builder) swig(a *Action, p *load.Package, objdir string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) { + if err := b.swigVersionCheck(); err != nil { + return nil, nil, nil, err + } + + intgosize, err := b.swigIntSize(objdir) + if err != nil { + return nil, nil, nil, err + } + + for _, f := range p.SwigFiles { + goFile, cFile, err := b.swigOne(a, p, f, objdir, pcCFLAGS, false, intgosize) + if err != nil { + return nil, nil, nil, err + } + if goFile != "" { + outGo = append(outGo, goFile) + } + if cFile != "" { + outC = append(outC, cFile) + } + } + for _, f := range p.SwigCXXFiles { + goFile, cxxFile, err := b.swigOne(a, p, f, objdir, pcCFLAGS, true, intgosize) + if err != nil { + return nil, nil, nil, err + } + if goFile != "" { + outGo = append(outGo, goFile) + } + if cxxFile != "" { + outCXX = append(outCXX, cxxFile) + } + } + return outGo, outC, outCXX, nil +} + +// Make sure SWIG is new enough. +var ( + swigCheckOnce sync.Once + swigCheck error +) + +func (b *Builder) swigDoVersionCheck() error { + out, err := b.runOut("", "", nil, "swig", "-version") + if err != nil { + return err + } + re := regexp.MustCompile(`[vV]ersion +([\d]+)([.][\d]+)?([.][\d]+)?`) + matches := re.FindSubmatch(out) + if matches == nil { + // Can't find version number; hope for the best. + return nil + } + + major, err := strconv.Atoi(string(matches[1])) + if err != nil { + // Can't find version number; hope for the best. + return nil + } + const errmsg = "must have SWIG version >= 3.0.6" + if major < 3 { + return errors.New(errmsg) + } + if major > 3 { + // 4.0 or later + return nil + } + + // We have SWIG version 3.x. + if len(matches[2]) > 0 { + minor, err := strconv.Atoi(string(matches[2][1:])) + if err != nil { + return nil + } + if minor > 0 { + // 3.1 or later + return nil + } + } + + // We have SWIG version 3.0.x. + if len(matches[3]) > 0 { + patch, err := strconv.Atoi(string(matches[3][1:])) + if err != nil { + return nil + } + if patch < 6 { + // Before 3.0.6. + return errors.New(errmsg) + } + } + + return nil +} + +func (b *Builder) swigVersionCheck() error { + swigCheckOnce.Do(func() { + swigCheck = b.swigDoVersionCheck() + }) + return swigCheck +} + +// Find the value to pass for the -intgosize option to swig. +var ( + swigIntSizeOnce sync.Once + swigIntSize string + swigIntSizeError error +) + +// This code fails to build if sizeof(int) <= 32 +const swigIntSizeCode = ` +package main +const i int = 1 << 32 +` + +// Determine the size of int on the target system for the -intgosize option +// of swig >= 2.0.9. Run only once. +func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) { + if cfg.BuildN { + return "$INTBITS", nil + } + src := filepath.Join(b.WorkDir, "swig_intsize.go") + if err = ioutil.WriteFile(src, []byte(swigIntSizeCode), 0666); err != nil { + return + } + srcs := []string{src} + + p := load.GoFilesPackage(srcs) + + if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, false, srcs); e != nil { + return "32", nil + } + return "64", nil +} + +// Determine the size of int on the target system for the -intgosize option +// of swig >= 2.0.9. +func (b *Builder) swigIntSize(objdir string) (intsize string, err error) { + swigIntSizeOnce.Do(func() { + swigIntSize, swigIntSizeError = b.swigDoIntSize(objdir) + }) + return swigIntSize, swigIntSizeError +} + +// Run SWIG on one SWIG input file. +func (b *Builder) swigOne(a *Action, p *load.Package, file, objdir string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) { + cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _ := b.CFlags(p) + var cflags []string + if cxx { + cflags = str.StringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS) + } else { + cflags = str.StringList(cgoCPPFLAGS, pcCFLAGS, cgoCFLAGS) + } + + n := 5 // length of ".swig" + if cxx { + n = 8 // length of ".swigcxx" + } + base := file[:len(file)-n] + goFile := base + ".go" + gccBase := base + "_wrap." + gccExt := "c" + if cxx { + gccExt = "cxx" + } + + gccgo := cfg.BuildToolchainName == "gccgo" + + // swig + args := []string{ + "-go", + "-cgo", + "-intgosize", intgosize, + "-module", base, + "-o", objdir + gccBase + gccExt, + "-outdir", objdir, + } + + for _, f := range cflags { + if len(f) > 3 && f[:2] == "-I" { + args = append(args, f) + } + } + + if gccgo { + args = append(args, "-gccgo") + if pkgpath := gccgoPkgpath(p); pkgpath != "" { + args = append(args, "-go-pkgpath", pkgpath) + } + } + if cxx { + args = append(args, "-c++") + } + + out, err := b.runOut(p.Dir, p.ImportPath, nil, "swig", args, file) + if err != nil { + if len(out) > 0 { + if bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo")) { + return "", "", errors.New("must have SWIG version >= 3.0.6") + } + b.showOutput(a, p.Dir, p.ImportPath, b.processOutput(out)) // swig error + return "", "", errPrintedOutput + } + return "", "", err + } + if len(out) > 0 { + b.showOutput(a, p.Dir, p.ImportPath, b.processOutput(out)) // swig warning + } + + // If the input was x.swig, the output is x.go in the objdir. + // But there might be an x.go in the original dir too, and if it + // uses cgo as well, cgo will be processing both and will + // translate both into x.cgo1.go in the objdir, overwriting one. + // Rename x.go to _x_swig.go to avoid this problem. + // We ignore files in the original dir that begin with underscore + // so _x_swig.go cannot conflict with an original file we were + // going to compile. + goFile = objdir + goFile + newGoFile := objdir + "_" + base + "_swig.go" + if err := os.Rename(goFile, newGoFile); err != nil { + return "", "", err + } + return newGoFile, objdir + gccBase + gccExt, nil +} + +// disableBuildID adjusts a linker command line to avoid creating a +// build ID when creating an object file rather than an executable or +// shared library. Some systems, such as Ubuntu, always add +// --build-id to every link, but we don't want a build ID when we are +// producing an object file. On some of those system a plain -r (not +// -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a +// plain -r. I don't know how to turn off --build-id when using clang +// other than passing a trailing --build-id=none. So that is what we +// do, but only on systems likely to support it, which is to say, +// systems that normally use gold or the GNU linker. +func (b *Builder) disableBuildID(ldflags []string) []string { + switch cfg.Goos { + case "android", "dragonfly", "linux", "netbsd": + ldflags = append(ldflags, "-Wl,--build-id=none") + } + return ldflags +} + +// mkAbsFiles converts files into a list of absolute files, +// assuming they were originally relative to dir, +// and returns that new list. +func mkAbsFiles(dir string, files []string) []string { + abs := make([]string, len(files)) + for i, f := range files { + if !filepath.IsAbs(f) { + f = filepath.Join(dir, f) + } + abs[i] = f + } + return abs +} diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go new file mode 100644 index 00000000000..49258b30fd0 --- /dev/null +++ b/src/cmd/go/internal/work/gc.go @@ -0,0 +1,500 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package work + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/str" + "cmd/internal/objabi" + "crypto/sha1" +) + +// The Go toolchain. + +type gcToolchain struct{} + +func (gcToolchain) compiler() string { + return base.Tool("compile") +} + +func (gcToolchain) linker() string { + return base.Tool("link") +} + +func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { + p := a.Package + objdir := a.Objdir + if archive != "" { + ofile = archive + } else { + out := "_go_.o" + ofile = objdir + out + } + + pkgpath := p.ImportPath + if cfg.BuildBuildmode == "plugin" { + pkgpath = pluginPath(a) + } else if p.Name == "main" { + pkgpath = "main" + } + gcargs := []string{"-p", pkgpath} + if p.Standard { + gcargs = append(gcargs, "-std") + } + compilingRuntime := p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) + if compilingRuntime { + // runtime compiles with a special gc flag to emit + // additional reflect type data. + gcargs = append(gcargs, "-+") + } + + // If we're giving the compiler the entire package (no C etc files), tell it that, + // so that it can give good error messages about forward declarations. + // Exceptions: a few standard packages have forward declarations for + // pieces supplied behind-the-scenes by package runtime. + extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.FFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles) + if p.Standard { + switch p.ImportPath { + case "bytes", "internal/poll", "net", "os", "runtime/pprof", "sync", "syscall", "time": + extFiles++ + } + } + if extFiles == 0 { + gcargs = append(gcargs, "-complete") + } + if cfg.BuildContext.InstallSuffix != "" { + gcargs = append(gcargs, "-installsuffix", cfg.BuildContext.InstallSuffix) + } + if a.buildID != "" { + gcargs = append(gcargs, "-buildid", a.buildID) + } + platform := cfg.Goos + "/" + cfg.Goarch + if p.Internal.OmitDebug || platform == "nacl/amd64p32" || platform == "darwin/arm" || platform == "darwin/arm64" || cfg.Goos == "plan9" { + gcargs = append(gcargs, "-dwarf=false") + } + if strings.HasPrefix(runtimeVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") { + gcargs = append(gcargs, "-goversion", runtimeVersion) + } + + gcflags := str.StringList(forcedGcflags, p.Internal.Gcflags) + if compilingRuntime { + // Remove -N, if present. + // It is not possible to build the runtime with no optimizations, + // because the compiler cannot eliminate enough write barriers. + for i := 0; i < len(gcflags); i++ { + if gcflags[i] == "-N" { + copy(gcflags[i:], gcflags[i+1:]) + gcflags = gcflags[:len(gcflags)-1] + i-- + } + } + } + + args := []interface{}{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", trimDir(a.Objdir), gcflags, gcargs, "-D", p.Internal.LocalPrefix} + if importcfg != nil { + if err := b.writeFile(objdir+"importcfg", importcfg); err != nil { + return "", nil, err + } + args = append(args, "-importcfg", objdir+"importcfg") + } + if ofile == archive { + args = append(args, "-pack") + } + if asmhdr { + args = append(args, "-asmhdr", objdir+"go_asm.h") + } + + // Add -c=N to use concurrent backend compilation, if possible. + if c := gcBackendConcurrency(gcflags); c > 1 { + args = append(args, fmt.Sprintf("-c=%d", c)) + } + + for _, f := range gofiles { + args = append(args, mkAbs(p.Dir, f)) + } + + output, err = b.runOut(p.Dir, p.ImportPath, nil, args...) + return ofile, output, err +} + +// gcBackendConcurrency returns the backend compiler concurrency level for a package compilation. +func gcBackendConcurrency(gcflags []string) int { + // First, check whether we can use -c at all for this compilation. + canDashC := concurrentGCBackendCompilationEnabledByDefault + + switch e := os.Getenv("GO19CONCURRENTCOMPILATION"); e { + case "0": + canDashC = false + case "1": + canDashC = true + case "": + // Not set. Use default. + default: + log.Fatalf("GO19CONCURRENTCOMPILATION must be 0, 1, or unset, got %q", e) + } + +CheckFlags: + for _, flag := range gcflags { + // Concurrent compilation is presumed incompatible with any gcflags, + // except for a small whitelist of commonly used flags. + // If the user knows better, they can manually add their own -c to the gcflags. + switch flag { + case "-N", "-l", "-S", "-B", "-C", "-I": + // OK + default: + canDashC = false + break CheckFlags + } + } + + // TODO: Test and delete these conditions. + if objabi.Fieldtrack_enabled != 0 || objabi.Preemptibleloops_enabled != 0 || objabi.Clobberdead_enabled != 0 { + canDashC = false + } + + if !canDashC { + return 1 + } + + // Decide how many concurrent backend compilations to allow. + // + // If we allow too many, in theory we might end up with p concurrent processes, + // each with c concurrent backend compiles, all fighting over the same resources. + // However, in practice, that seems not to happen too much. + // Most build graphs are surprisingly serial, so p==1 for much of the build. + // Furthermore, concurrent backend compilation is only enabled for a part + // of the overall compiler execution, so c==1 for much of the build. + // So don't worry too much about that interaction for now. + // + // However, in practice, setting c above 4 tends not to help very much. + // See the analysis in CL 41192. + // + // TODO(josharian): attempt to detect whether this particular compilation + // is likely to be a bottleneck, e.g. when: + // - it has no successor packages to compile (usually package main) + // - all paths through the build graph pass through it + // - critical path scheduling says it is high priority + // and in such a case, set c to runtime.NumCPU. + // We do this now when p==1. + if cfg.BuildP == 1 { + // No process parallelism. Max out c. + return runtime.NumCPU() + } + // Some process parallelism. Set c to min(4, numcpu). + c := 4 + if ncpu := runtime.NumCPU(); ncpu < c { + c = ncpu + } + return c +} + +func trimDir(dir string) string { + if len(dir) > 1 && dir[len(dir)-1] == filepath.Separator { + dir = dir[:len(dir)-1] + } + return dir +} + +func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) { + p := a.Package + // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files. + inc := filepath.Join(cfg.GOROOT, "pkg", "include") + args := []interface{}{cfg.BuildToolexec, base.Tool("asm"), "-trimpath", trimDir(a.Objdir), "-I", a.Objdir, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, forcedAsmflags, p.Internal.Asmflags} + if p.ImportPath == "runtime" && cfg.Goarch == "386" { + for _, arg := range forcedAsmflags { + if arg == "-dynlink" { + args = append(args, "-D=GOBUILDMODE_shared=1") + } + } + } + + if cfg.Goarch == "mips" || cfg.Goarch == "mipsle" { + // Define GOMIPS_value from cfg.GOMIPS. + args = append(args, "-D", "GOMIPS_"+cfg.GOMIPS) + } + + var ofiles []string + for _, sfile := range sfiles { + ofile := a.Objdir + sfile[:len(sfile)-len(".s")] + ".o" + ofiles = append(ofiles, ofile) + args1 := append(args, "-o", ofile, mkAbs(p.Dir, sfile)) + if err := b.run(a, p.Dir, p.ImportPath, nil, args1...); err != nil { + return nil, err + } + } + return ofiles, nil +} + +// toolVerify checks that the command line args writes the same output file +// if run using newTool instead. +// Unused now but kept around for future use. +func toolVerify(a *Action, b *Builder, p *load.Package, newTool string, ofile string, args []interface{}) error { + newArgs := make([]interface{}, len(args)) + copy(newArgs, args) + newArgs[1] = base.Tool(newTool) + newArgs[3] = ofile + ".new" // x.6 becomes x.6.new + if err := b.run(a, p.Dir, p.ImportPath, nil, newArgs...); err != nil { + return err + } + data1, err := ioutil.ReadFile(ofile) + if err != nil { + return err + } + data2, err := ioutil.ReadFile(ofile + ".new") + if err != nil { + return err + } + if !bytes.Equal(data1, data2) { + return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(str.StringList(args...), " "), strings.Join(str.StringList(newArgs...), " ")) + } + os.Remove(ofile + ".new") + return nil +} + +func (gcToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { + var absOfiles []string + for _, f := range ofiles { + absOfiles = append(absOfiles, mkAbs(a.Objdir, f)) + } + absAfile := mkAbs(a.Objdir, afile) + + // The archive file should have been created by the compiler. + // Since it used to not work that way, verify. + if !cfg.BuildN { + if _, err := os.Stat(absAfile); err != nil { + base.Fatalf("os.Stat of archive file failed: %v", err) + } + } + + p := a.Package + if cfg.BuildN || cfg.BuildX { + cmdline := str.StringList(base.Tool("pack"), "r", absAfile, absOfiles) + b.Showcmd(p.Dir, "%s # internal", joinUnambiguously(cmdline)) + } + if cfg.BuildN { + return nil + } + if err := packInternal(b, absAfile, absOfiles); err != nil { + b.showOutput(a, p.Dir, p.ImportPath, err.Error()+"\n") + return errPrintedOutput + } + return nil +} + +func packInternal(b *Builder, afile string, ofiles []string) error { + dst, err := os.OpenFile(afile, os.O_WRONLY|os.O_APPEND, 0) + if err != nil { + return err + } + defer dst.Close() // only for error returns or panics + w := bufio.NewWriter(dst) + + for _, ofile := range ofiles { + src, err := os.Open(ofile) + if err != nil { + return err + } + fi, err := src.Stat() + if err != nil { + src.Close() + return err + } + // Note: Not using %-16.16s format because we care + // about bytes, not runes. + name := fi.Name() + if len(name) > 16 { + name = name[:16] + } else { + name += strings.Repeat(" ", 16-len(name)) + } + size := fi.Size() + fmt.Fprintf(w, "%s%-12d%-6d%-6d%-8o%-10d`\n", + name, 0, 0, 0, 0644, size) + n, err := io.Copy(w, src) + src.Close() + if err == nil && n < size { + err = io.ErrUnexpectedEOF + } else if err == nil && n > size { + err = fmt.Errorf("file larger than size reported by stat") + } + if err != nil { + return fmt.Errorf("copying %s to %s: %v", ofile, afile, err) + } + if size&1 != 0 { + w.WriteByte(0) + } + } + + if err := w.Flush(); err != nil { + return err + } + return dst.Close() +} + +// setextld sets the appropriate linker flags for the specified compiler. +func setextld(ldflags []string, compiler []string) []string { + for _, f := range ldflags { + if f == "-extld" || strings.HasPrefix(f, "-extld=") { + // don't override -extld if supplied + return ldflags + } + } + ldflags = append(ldflags, "-extld="+compiler[0]) + if len(compiler) > 1 { + extldflags := false + add := strings.Join(compiler[1:], " ") + for i, f := range ldflags { + if f == "-extldflags" && i+1 < len(ldflags) { + ldflags[i+1] = add + " " + ldflags[i+1] + extldflags = true + break + } else if strings.HasPrefix(f, "-extldflags=") { + ldflags[i] = "-extldflags=" + add + " " + ldflags[i][len("-extldflags="):] + extldflags = true + break + } + } + if !extldflags { + ldflags = append(ldflags, "-extldflags="+add) + } + } + return ldflags +} + +// pluginPath computes the package path for a plugin main package. +// +// This is typically the import path of the main package p, unless the +// plugin is being built directly from source files. In that case we +// combine the package build ID with the contents of the main package +// source files. This allows us to identify two different plugins +// built from two source files with the same name. +func pluginPath(a *Action) string { + p := a.Package + if p.ImportPath != "command-line-arguments" { + return p.ImportPath + } + h := sha1.New() + fmt.Fprintf(h, "build ID: %s\n", a.buildID) + for _, file := range str.StringList(p.GoFiles, p.CgoFiles, p.SFiles) { + data, err := ioutil.ReadFile(filepath.Join(p.Dir, file)) + if err != nil { + base.Fatalf("go: %s", err) + } + h.Write(data) + } + return fmt.Sprintf("plugin/unnamed-%x", h.Sum(nil)) +} + +func (gcToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error { + cxx := len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0 + for _, a := range root.Deps { + if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) { + cxx = true + } + } + var ldflags []string + if cfg.BuildContext.InstallSuffix != "" { + ldflags = append(ldflags, "-installsuffix", cfg.BuildContext.InstallSuffix) + } + if root.Package.Internal.OmitDebug { + ldflags = append(ldflags, "-s", "-w") + } + if cfg.BuildBuildmode == "plugin" { + ldflags = append(ldflags, "-pluginpath", pluginPath(root)) + } + + // TODO(rsc): This is probably wrong - see golang.org/issue/22155. + if cfg.GOROOT != runtime.GOROOT() { + ldflags = append(ldflags, "-X=runtime/internal/sys.DefaultGoroot="+cfg.GOROOT) + } + + // Store BuildID inside toolchain binaries as a unique identifier of the + // tool being run, for use by content-based staleness determination. + if root.Package.Goroot && strings.HasPrefix(root.Package.ImportPath, "cmd/") { + ldflags = append(ldflags, "-X=cmd/internal/objabi.buildID="+root.buildID) + } + + // If the user has not specified the -extld option, then specify the + // appropriate linker. In case of C++ code, use the compiler named + // by the CXX environment variable or defaultCXX if CXX is not set. + // Else, use the CC environment variable and defaultCC as fallback. + var compiler []string + if cxx { + compiler = envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch)) + } else { + compiler = envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch)) + } + ldflags = append(ldflags, "-buildmode="+ldBuildmode) + if root.buildID != "" { + ldflags = append(ldflags, "-buildid="+root.buildID) + } + ldflags = append(ldflags, forcedLdflags...) + ldflags = append(ldflags, root.Package.Internal.Ldflags...) + ldflags = setextld(ldflags, compiler) + + // On OS X when using external linking to build a shared library, + // the argument passed here to -o ends up recorded in the final + // shared library in the LC_ID_DYLIB load command. + // To avoid putting the temporary output directory name there + // (and making the resulting shared library useless), + // run the link in the output directory so that -o can name + // just the final path element. + // On Windows, DLL file name is recorded in PE file + // export section, so do like on OS X. + dir := "." + if (cfg.Goos == "darwin" || cfg.Goos == "windows") && cfg.BuildBuildmode == "c-shared" { + dir, out = filepath.Split(out) + } + + return b.run(root, dir, root.Package.ImportPath, nil, cfg.BuildToolexec, base.Tool("link"), "-o", out, "-importcfg", importcfg, ldflags, mainpkg) +} + +func (gcToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error { + ldflags := []string{"-installsuffix", cfg.BuildContext.InstallSuffix} + ldflags = append(ldflags, "-buildmode=shared") + ldflags = append(ldflags, forcedLdflags...) + ldflags = append(ldflags, root.Package.Internal.Ldflags...) + cxx := false + for _, a := range allactions { + if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) { + cxx = true + } + } + // If the user has not specified the -extld option, then specify the + // appropriate linker. In case of C++ code, use the compiler named + // by the CXX environment variable or defaultCXX if CXX is not set. + // Else, use the CC environment variable and defaultCC as fallback. + var compiler []string + if cxx { + compiler = envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch)) + } else { + compiler = envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch)) + } + ldflags = setextld(ldflags, compiler) + for _, d := range toplevelactions { + if !strings.HasSuffix(d.Target, ".a") { // omit unsafe etc and actions for other shared libraries + continue + } + ldflags = append(ldflags, d.Package.ImportPath+"="+d.Target) + } + return b.run(root, ".", out, nil, cfg.BuildToolexec, base.Tool("link"), "-o", out, "-importcfg", importcfg, ldflags) +} + +func (gcToolchain) cc(b *Builder, a *Action, ofile, cfile string) error { + return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(a.Package.Dir, cfile)) +} diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go new file mode 100644 index 00000000000..37a828f5929 --- /dev/null +++ b/src/cmd/go/internal/work/gccgo.go @@ -0,0 +1,497 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package work + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/str" +) + +// The Gccgo toolchain. + +type gccgoToolchain struct{} + +var GccgoName, GccgoBin string +var gccgoErr error + +func init() { + GccgoName = os.Getenv("GCCGO") + if GccgoName == "" { + GccgoName = "gccgo" + } + GccgoBin, gccgoErr = exec.LookPath(GccgoName) +} + +func (gccgoToolchain) compiler() string { + checkGccgoBin() + return GccgoBin +} + +func (gccgoToolchain) linker() string { + checkGccgoBin() + return GccgoBin +} + +func checkGccgoBin() { + if gccgoErr == nil { + return + } + fmt.Fprintf(os.Stderr, "cmd/go: gccgo: %s\n", gccgoErr) + os.Exit(2) +} + +func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg []byte, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { + p := a.Package + objdir := a.Objdir + out := "_go_.o" + ofile = objdir + out + gcargs := []string{"-g"} + gcargs = append(gcargs, b.gccArchArgs()...) + if pkgpath := gccgoPkgpath(p); pkgpath != "" { + gcargs = append(gcargs, "-fgo-pkgpath="+pkgpath) + } + if p.Internal.LocalPrefix != "" { + gcargs = append(gcargs, "-fgo-relative-import-path="+p.Internal.LocalPrefix) + } + + args := str.StringList(tools.compiler(), "-c", gcargs, "-o", ofile, forcedGccgoflags) + if importcfg != nil { + if b.gccSupportsFlag(args[:1], "-fgo-importcfg=/dev/null") { + if err := b.writeFile(objdir+"importcfg", importcfg); err != nil { + return "", nil, err + } + args = append(args, "-fgo-importcfg="+objdir+"importcfg") + } else { + root := objdir + "_importcfgroot_" + if err := buildImportcfgSymlinks(b, root, importcfg); err != nil { + return "", nil, err + } + args = append(args, "-I", root) + } + } + args = append(args, a.Package.Internal.Gccgoflags...) + for _, f := range gofiles { + args = append(args, mkAbs(p.Dir, f)) + } + + output, err = b.runOut(p.Dir, p.ImportPath, nil, args) + return ofile, output, err +} + +// buildImportcfgSymlinks builds in root a tree of symlinks +// implementing the directives from importcfg. +// This serves as a temporary transition mechanism until +// we can depend on gccgo reading an importcfg directly. +// (The Go 1.9 and later gc compilers already do.) +func buildImportcfgSymlinks(b *Builder, root string, importcfg []byte) error { + for lineNum, line := range strings.Split(string(importcfg), "\n") { + lineNum++ // 1-based + line = strings.TrimSpace(line) + if line == "" { + continue + } + if line == "" || strings.HasPrefix(line, "#") { + continue + } + var verb, args string + if i := strings.Index(line, " "); i < 0 { + verb = line + } else { + verb, args = line[:i], strings.TrimSpace(line[i+1:]) + } + var before, after string + if i := strings.Index(args, "="); i >= 0 { + before, after = args[:i], args[i+1:] + } + switch verb { + default: + base.Fatalf("importcfg:%d: unknown directive %q", lineNum, verb) + case "packagefile": + if before == "" || after == "" { + return fmt.Errorf(`importcfg:%d: invalid packagefile: syntax is "packagefile path=filename": %s`, lineNum, line) + } + archive := gccgoArchive(root, before) + if err := b.Mkdir(filepath.Dir(archive)); err != nil { + return err + } + if err := b.Symlink(after, archive); err != nil { + return err + } + case "importmap": + if before == "" || after == "" { + return fmt.Errorf(`importcfg:%d: invalid importmap: syntax is "importmap old=new": %s`, lineNum, line) + } + beforeA := gccgoArchive(root, before) + afterA := gccgoArchive(root, after) + if err := b.Mkdir(filepath.Dir(beforeA)); err != nil { + return err + } + if err := b.Mkdir(filepath.Dir(afterA)); err != nil { + return err + } + if err := b.Symlink(afterA, beforeA); err != nil { + return err + } + case "packageshlib": + return fmt.Errorf("gccgo -importcfg does not support shared libraries") + } + } + return nil +} + +func (tools gccgoToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) { + p := a.Package + var ofiles []string + for _, sfile := range sfiles { + ofile := a.Objdir + sfile[:len(sfile)-len(".s")] + ".o" + ofiles = append(ofiles, ofile) + sfile = mkAbs(p.Dir, sfile) + defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch} + if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { + defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath) + } + defs = tools.maybePIC(defs) + defs = append(defs, b.gccArchArgs()...) + err := b.run(a, p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", a.Objdir, "-c", "-o", ofile, defs, sfile) + if err != nil { + return nil, err + } + } + return ofiles, nil +} + +func gccgoArchive(basedir, imp string) string { + end := filepath.FromSlash(imp + ".a") + afile := filepath.Join(basedir, end) + // add "lib" to the final element + return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile)) +} + +func (gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { + p := a.Package + objdir := a.Objdir + var absOfiles []string + for _, f := range ofiles { + absOfiles = append(absOfiles, mkAbs(objdir, f)) + } + return b.run(a, p.Dir, p.ImportPath, nil, "ar", "rc", mkAbs(objdir, afile), absOfiles) +} + +func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string, allactions []*Action, buildmode, desc string) error { + // gccgo needs explicit linking with all package dependencies, + // and all LDFLAGS from cgo dependencies. + apackagePathsSeen := make(map[string]bool) + afiles := []string{} + shlibs := []string{} + ldflags := b.gccArchArgs() + cgoldflags := []string{} + usesCgo := false + cxx := false + objc := false + fortran := false + if root.Package != nil { + cxx = len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0 + objc = len(root.Package.MFiles) > 0 + fortran = len(root.Package.FFiles) > 0 + } + + readCgoFlags := func(flagsFile string) error { + flags, err := ioutil.ReadFile(flagsFile) + if err != nil { + return err + } + const ldflagsPrefix = "_CGO_LDFLAGS=" + for _, line := range strings.Split(string(flags), "\n") { + if strings.HasPrefix(line, ldflagsPrefix) { + newFlags := strings.Fields(line[len(ldflagsPrefix):]) + for _, flag := range newFlags { + // Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS + // but they don't mean anything to the linker so filter + // them out. + if flag != "-g" && !strings.HasPrefix(flag, "-O") { + cgoldflags = append(cgoldflags, flag) + } + } + } + } + return nil + } + + newID := 0 + readAndRemoveCgoFlags := func(archive string) (string, error) { + newID++ + newArchive := root.Objdir + fmt.Sprintf("_pkg%d_.a", newID) + if err := b.copyFile(root, newArchive, archive, 0666, false); err != nil { + return "", err + } + if cfg.BuildN || cfg.BuildX { + b.Showcmd("", "ar d %s _cgo_flags", newArchive) + if cfg.BuildN { + // TODO(rsc): We could do better about showing the right _cgo_flags even in -n mode. + // Either the archive is already built and we can read them out, + // or we're printing commands to build the archive and can + // forward the _cgo_flags directly to this step. + return "", nil + } + } + err := b.run(root, root.Objdir, desc, nil, "ar", "x", newArchive, "_cgo_flags") + if err != nil { + return "", err + } + err = b.run(root, ".", desc, nil, "ar", "d", newArchive, "_cgo_flags") + if err != nil { + return "", err + } + err = readCgoFlags(filepath.Join(root.Objdir, "_cgo_flags")) + if err != nil { + return "", err + } + return newArchive, nil + } + + actionsSeen := make(map[*Action]bool) + // Make a pre-order depth-first traversal of the action graph, taking note of + // whether a shared library action has been seen on the way to an action (the + // construction of the graph means that if any path to a node passes through + // a shared library action, they all do). + var walk func(a *Action, seenShlib bool) + var err error + walk = func(a *Action, seenShlib bool) { + if actionsSeen[a] { + return + } + actionsSeen[a] = true + if a.Package != nil && !seenShlib { + if a.Package.Standard { + return + } + // We record the target of the first time we see a .a file + // for a package to make sure that we prefer the 'install' + // rather than the 'build' location (which may not exist any + // more). We still need to traverse the dependencies of the + // build action though so saying + // if apackagePathsSeen[a.Package.ImportPath] { return } + // doesn't work. + if !apackagePathsSeen[a.Package.ImportPath] { + apackagePathsSeen[a.Package.ImportPath] = true + target := a.Target + if len(a.Package.CgoFiles) > 0 || a.Package.UsesSwig() { + target, err = readAndRemoveCgoFlags(target) + if err != nil { + return + } + } + afiles = append(afiles, target) + } + } + if strings.HasSuffix(a.Target, ".so") { + shlibs = append(shlibs, a.Target) + seenShlib = true + } + for _, a1 := range a.Deps { + walk(a1, seenShlib) + if err != nil { + return + } + } + } + for _, a1 := range root.Deps { + walk(a1, false) + if err != nil { + return err + } + } + + for _, a := range allactions { + // Gather CgoLDFLAGS, but not from standard packages. + // The go tool can dig up runtime/cgo from GOROOT and + // think that it should use its CgoLDFLAGS, but gccgo + // doesn't use runtime/cgo. + if a.Package == nil { + continue + } + if !a.Package.Standard { + cgoldflags = append(cgoldflags, a.Package.CgoLDFLAGS...) + } + if len(a.Package.CgoFiles) > 0 { + usesCgo = true + } + if a.Package.UsesSwig() { + usesCgo = true + } + if len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0 { + cxx = true + } + if len(a.Package.MFiles) > 0 { + objc = true + } + if len(a.Package.FFiles) > 0 { + fortran = true + } + } + + ldflags = append(ldflags, "-Wl,--whole-archive") + ldflags = append(ldflags, afiles...) + ldflags = append(ldflags, "-Wl,--no-whole-archive") + + ldflags = append(ldflags, cgoldflags...) + ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...) + if root.Package != nil { + ldflags = append(ldflags, root.Package.CgoLDFLAGS...) + } + + ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)") + + for _, shlib := range shlibs { + ldflags = append( + ldflags, + "-L"+filepath.Dir(shlib), + "-Wl,-rpath="+filepath.Dir(shlib), + "-l"+strings.TrimSuffix( + strings.TrimPrefix(filepath.Base(shlib), "lib"), + ".so")) + } + + var realOut string + switch buildmode { + case "exe": + if usesCgo && cfg.Goos == "linux" { + ldflags = append(ldflags, "-Wl,-E") + } + + case "c-archive": + // Link the Go files into a single .o, and also link + // in -lgolibbegin. + // + // We need to use --whole-archive with -lgolibbegin + // because it doesn't define any symbols that will + // cause the contents to be pulled in; it's just + // initialization code. + // + // The user remains responsible for linking against + // -lgo -lpthread -lm in the final link. We can't use + // -r to pick them up because we can't combine + // split-stack and non-split-stack code in a single -r + // link, and libgo picks up non-split-stack code from + // libffi. + ldflags = append(ldflags, "-Wl,-r", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive") + + if nopie := b.gccNoPie([]string{tools.linker()}); nopie != "" { + ldflags = append(ldflags, nopie) + } + + // We are creating an object file, so we don't want a build ID. + ldflags = b.disableBuildID(ldflags) + + realOut = out + out = out + ".o" + + case "c-shared": + ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc") + case "shared": + ldflags = append(ldflags, "-zdefs", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc") + + default: + base.Fatalf("-buildmode=%s not supported for gccgo", buildmode) + } + + switch buildmode { + case "exe", "c-shared": + if cxx { + ldflags = append(ldflags, "-lstdc++") + } + if objc { + ldflags = append(ldflags, "-lobjc") + } + if fortran { + fc := os.Getenv("FC") + if fc == "" { + fc = "gfortran" + } + // support gfortran out of the box and let others pass the correct link options + // via CGO_LDFLAGS + if strings.Contains(fc, "gfortran") { + ldflags = append(ldflags, "-lgfortran") + } + } + } + + if err := b.run(root, ".", desc, nil, tools.linker(), "-o", out, ldflags, forcedGccgoflags, root.Package.Internal.Gccgoflags); err != nil { + return err + } + + switch buildmode { + case "c-archive": + if err := b.run(root, ".", desc, nil, "ar", "rc", realOut, out); err != nil { + return err + } + } + return nil +} + +func (tools gccgoToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error { + return tools.link(b, root, out, importcfg, root.Deps, ldBuildmode, root.Package.ImportPath) +} + +func (tools gccgoToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error { + fakeRoot := *root + fakeRoot.Deps = toplevelactions + return tools.link(b, &fakeRoot, out, importcfg, allactions, "shared", out) +} + +func (tools gccgoToolchain) cc(b *Builder, a *Action, ofile, cfile string) error { + p := a.Package + inc := filepath.Join(cfg.GOROOT, "pkg", "include") + cfile = mkAbs(p.Dir, cfile) + defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch} + defs = append(defs, b.gccArchArgs()...) + if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { + defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`) + } + switch cfg.Goarch { + case "386", "amd64": + defs = append(defs, "-fsplit-stack") + } + defs = tools.maybePIC(defs) + return b.run(a, p.Dir, p.ImportPath, nil, envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch)), "-Wall", "-g", + "-I", a.Objdir, "-I", inc, "-o", ofile, defs, "-c", cfile) +} + +// maybePIC adds -fPIC to the list of arguments if needed. +func (tools gccgoToolchain) maybePIC(args []string) []string { + switch cfg.BuildBuildmode { + case "c-shared", "shared", "plugin": + args = append(args, "-fPIC") + } + return args +} + +func gccgoPkgpath(p *load.Package) string { + if p.Internal.Build.IsCommand() && !p.Internal.ForceLibrary { + return "" + } + return p.ImportPath +} + +func gccgoCleanPkgpath(p *load.Package) string { + clean := func(r rune) rune { + switch { + case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z', + '0' <= r && r <= '9': + return r + } + return '_' + } + return strings.Map(clean, gccgoPkgpath(p)) +} diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go new file mode 100644 index 00000000000..7f894f5c6dc --- /dev/null +++ b/src/cmd/go/internal/work/init.go @@ -0,0 +1,217 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Build initialization (after flag parsing). + +package work + +import ( + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "flag" + "fmt" + "os" + "path/filepath" +) + +func BuildInit() { + instrumentInit() + buildModeInit() + + // Make sure -pkgdir is absolute, because we run commands + // in different directories. + if cfg.BuildPkgdir != "" && !filepath.IsAbs(cfg.BuildPkgdir) { + p, err := filepath.Abs(cfg.BuildPkgdir) + if err != nil { + fmt.Fprintf(os.Stderr, "go %s: evaluating -pkgdir: %v\n", flag.Args()[0], err) + os.Exit(2) + } + cfg.BuildPkgdir = p + } +} + +func instrumentInit() { + if !cfg.BuildRace && !cfg.BuildMSan { + return + } + if cfg.BuildRace && cfg.BuildMSan { + fmt.Fprintf(os.Stderr, "go %s: may not use -race and -msan simultaneously\n", flag.Args()[0]) + os.Exit(2) + } + if cfg.BuildMSan && (cfg.Goos != "linux" || cfg.Goarch != "amd64") { + fmt.Fprintf(os.Stderr, "-msan is not supported on %s/%s\n", cfg.Goos, cfg.Goarch) + os.Exit(2) + } + if cfg.Goarch != "amd64" || cfg.Goos != "linux" && cfg.Goos != "freebsd" && cfg.Goos != "darwin" && cfg.Goos != "windows" { + fmt.Fprintf(os.Stderr, "go %s: -race and -msan are only supported on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) + os.Exit(2) + } + + mode := "race" + if cfg.BuildMSan { + mode = "msan" + } + modeFlag := "-" + mode + + if !cfg.BuildContext.CgoEnabled { + fmt.Fprintf(os.Stderr, "go %s: %s requires cgo; enable cgo by setting CGO_ENABLED=1\n", flag.Args()[0], modeFlag) + os.Exit(2) + } + forcedGcflags = append(forcedGcflags, modeFlag) + forcedLdflags = append(forcedLdflags, modeFlag) + + if cfg.BuildContext.InstallSuffix != "" { + cfg.BuildContext.InstallSuffix += "_" + } + cfg.BuildContext.InstallSuffix += mode + cfg.BuildContext.BuildTags = append(cfg.BuildContext.BuildTags, mode) +} + +func buildModeInit() { + gccgo := cfg.BuildToolchainName == "gccgo" + var codegenArg string + platform := cfg.Goos + "/" + cfg.Goarch + switch cfg.BuildBuildmode { + case "archive": + pkgsFilter = pkgsNotMain + case "c-archive": + pkgsFilter = oneMainPkg + switch platform { + case "darwin/arm", "darwin/arm64": + codegenArg = "-shared" + default: + switch cfg.Goos { + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + // Use -shared so that the result is + // suitable for inclusion in a PIE or + // shared library. + codegenArg = "-shared" + } + } + cfg.ExeSuffix = ".a" + ldBuildmode = "c-archive" + case "c-shared": + pkgsFilter = oneMainPkg + if gccgo { + codegenArg = "-fPIC" + } else { + switch platform { + case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/ppc64le", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386": + codegenArg = "-shared" + case "darwin/amd64", "darwin/386": + case "windows/amd64", "windows/386": + // Do not add usual .exe suffix to the .dll file. + cfg.ExeSuffix = "" + default: + base.Fatalf("-buildmode=c-shared not supported on %s\n", platform) + } + } + ldBuildmode = "c-shared" + case "default": + switch platform { + case "android/arm", "android/arm64", "android/amd64", "android/386": + codegenArg = "-shared" + ldBuildmode = "pie" + case "darwin/arm", "darwin/arm64": + codegenArg = "-shared" + fallthrough + default: + ldBuildmode = "exe" + } + case "exe": + pkgsFilter = pkgsMain + ldBuildmode = "exe" + // Set the pkgsFilter to oneMainPkg if the user passed a specific binary output + // and is using buildmode=exe for a better error message. + // See issue #20017. + if cfg.BuildO != "" { + pkgsFilter = oneMainPkg + } + case "pie": + if cfg.BuildRace { + base.Fatalf("-buildmode=pie not supported when -race is enabled") + } + if gccgo { + base.Fatalf("-buildmode=pie not supported by gccgo") + } else { + switch platform { + case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x", + "android/amd64", "android/arm", "android/arm64", "android/386": + codegenArg = "-shared" + case "darwin/amd64": + codegenArg = "-shared" + default: + base.Fatalf("-buildmode=pie not supported on %s\n", platform) + } + } + ldBuildmode = "pie" + case "shared": + pkgsFilter = pkgsNotMain + if gccgo { + codegenArg = "-fPIC" + } else { + switch platform { + case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x": + default: + base.Fatalf("-buildmode=shared not supported on %s\n", platform) + } + codegenArg = "-dynlink" + } + if cfg.BuildO != "" { + base.Fatalf("-buildmode=shared and -o not supported together") + } + ldBuildmode = "shared" + case "plugin": + pkgsFilter = oneMainPkg + if gccgo { + codegenArg = "-fPIC" + } else { + switch platform { + case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/s390x", "linux/ppc64le", + "android/amd64", "android/arm", "android/arm64", "android/386": + case "darwin/amd64": + // Skip DWARF generation due to #21647 + forcedLdflags = append(forcedLdflags, "-w") + default: + base.Fatalf("-buildmode=plugin not supported on %s\n", platform) + } + codegenArg = "-dynlink" + } + cfg.ExeSuffix = ".so" + ldBuildmode = "plugin" + default: + base.Fatalf("buildmode=%s not supported", cfg.BuildBuildmode) + } + if cfg.BuildLinkshared { + if gccgo { + codegenArg = "-fPIC" + } else { + switch platform { + case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x": + forcedAsmflags = append(forcedAsmflags, "-D=GOBUILDMODE_shared=1") + default: + base.Fatalf("-linkshared not supported on %s\n", platform) + } + codegenArg = "-dynlink" + // TODO(mwhudson): remove -w when that gets fixed in linker. + forcedLdflags = append(forcedLdflags, "-linkshared", "-w") + } + } + if codegenArg != "" { + if gccgo { + forcedGccgoflags = append([]string{codegenArg}, forcedGccgoflags...) + } else { + forcedAsmflags = append([]string{codegenArg}, forcedAsmflags...) + forcedGcflags = append([]string{codegenArg}, forcedGcflags...) + } + // Don't alter InstallSuffix when modifying default codegen args. + if cfg.BuildBuildmode != "default" || cfg.BuildLinkshared { + if cfg.BuildContext.InstallSuffix != "" { + cfg.BuildContext.InstallSuffix += "_" + } + cfg.BuildContext.InstallSuffix += codegenArg[1:] + } + } +} diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go index 75a46db98f2..b7e4034152a 100644 --- a/src/cmd/go/main.go +++ b/src/cmd/go/main.go @@ -77,6 +77,7 @@ func main() { base.Usage() } + cfg.CmdName = args[0] // for error messages if args[0] == "help" { help.Help(args[1:]) return @@ -89,6 +90,11 @@ func main() { fmt.Fprintf(os.Stderr, "warning: GOPATH set to GOROOT (%s) has no effect\n", gopath) } else { for _, p := range filepath.SplitList(gopath) { + // Some GOPATHs have empty directory elements - ignore them. + // See issue 21928 for details. + if p == "" { + continue + } // Note: using HasPrefix instead of Contains because a ~ can appear // in the middle of directory elements, such as /tmp/git-1.8.2~rc3 // or C:\PROGRA~1. Only ~ as a path prefix has meaning to the shell. diff --git a/src/cmd/go/note_test.go b/src/cmd/go/note_test.go index 289af9908a4..66c5d39bf77 100644 --- a/src/cmd/go/note_test.go +++ b/src/cmd/go/note_test.go @@ -9,33 +9,19 @@ import ( "runtime" "testing" - "cmd/go/internal/buildid" + "cmd/internal/buildid" ) func TestNoteReading(t *testing.T) { - testNoteReading(t) -} - -func TestNoteReading2K(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skipf("2kB is not enough on %s", runtime.GOOS) - } - // Set BuildIDReadSize to 2kB to exercise Mach-O parsing more strictly. - defer func(old int) { - buildid.BuildIDReadSize = old - }(buildid.BuildIDReadSize) - buildid.BuildIDReadSize = 2 * 1024 - - testNoteReading(t) -} - -func testNoteReading(t *testing.T) { + // cmd/internal/buildid already has tests that the basic reading works. + // This test is essentially checking that -ldflags=-buildid=XXX works, + // both in internal and external linking mode. tg := testgo(t) defer tg.cleanup() tg.tempFile("hello.go", `package main; func main() { print("hello, world\n") }`) const buildID = "TestNoteReading-Build-ID" tg.run("build", "-ldflags", "-buildid="+buildID, "-o", tg.path("hello.exe"), tg.path("hello.go")) - id, err := buildid.ReadBuildIDFromBinary(tg.path("hello.exe")) + id, err := buildid.ReadFile(tg.path("hello.exe")) if err != nil { t.Fatalf("reading build ID from hello binary: %v", err) } @@ -46,20 +32,41 @@ func testNoteReading(t *testing.T) { switch { case !build.Default.CgoEnabled: t.Skipf("skipping - no cgo, so assuming external linking not available") - case runtime.GOOS == "linux" && (runtime.GOARCH == "ppc64le" || runtime.GOARCH == "ppc64"): - t.Skipf("skipping - external linking not supported, golang.org/issue/11184") case runtime.GOOS == "openbsd" && runtime.GOARCH == "arm": t.Skipf("skipping - external linking not supported, golang.org/issue/10619") case runtime.GOOS == "plan9": t.Skipf("skipping - external linking not supported") } - tg.run("build", "-ldflags", "-buildid="+buildID+" -linkmode=external", "-o", tg.path("hello.exe"), tg.path("hello.go")) - id, err = buildid.ReadBuildIDFromBinary(tg.path("hello.exe")) + tg.run("build", "-ldflags", "-buildid="+buildID+" -linkmode=external", "-o", tg.path("hello2.exe"), tg.path("hello.go")) + id, err = buildid.ReadFile(tg.path("hello2.exe")) if err != nil { t.Fatalf("reading build ID from hello binary (linkmode=external): %v", err) } if id != buildID { t.Fatalf("buildID in hello binary = %q, want %q (linkmode=external)", id, buildID) } + + switch runtime.GOOS { + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd": + // Test while forcing use of the gold linker, since in the past + // we've had trouble reading the notes generated by gold. + err := tg.doRun([]string{"build", "-ldflags", "-buildid=" + buildID + " -linkmode=external -extldflags=-fuse-ld=gold", "-o", tg.path("hello3.exe"), tg.path("hello.go")}) + if err != nil { + if tg.grepCountBoth("(invalid linker|gold|cannot find 'ld')") > 0 { + // It's not an error if gold isn't there. gcc claims it "cannot find 'ld'" if + // ld.gold is missing, see issue #22340. + t.Log("skipping gold test") + break + } + t.Fatalf("building hello binary: %v", err) + } + id, err = buildid.ReadFile(tg.path("hello3.exe")) + if err != nil { + t.Fatalf("reading build ID from hello binary (linkmode=external -extldflags=-fuse-ld=gold): %v", err) + } + if id != buildID { + t.Fatalf("buildID in hello binary = %q, want %q (linkmode=external -extldflags=-fuse-ld=gold)", id, buildID) + } + } } diff --git a/src/cmd/go/testdata/print_goroot.go b/src/cmd/go/testdata/print_goroot.go new file mode 100644 index 00000000000..54772910609 --- /dev/null +++ b/src/cmd/go/testdata/print_goroot.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "runtime" + +func main() { + println(runtime.GOROOT()) +} diff --git a/src/cmd/go/testdata/src/complex/main.go b/src/cmd/go/testdata/src/complex/main.go new file mode 100644 index 00000000000..c38df019480 --- /dev/null +++ b/src/cmd/go/testdata/src/complex/main.go @@ -0,0 +1,12 @@ +package main + +import ( + _ "complex/nest/sub/test12" + _ "complex/nest/sub/test23" + "complex/w" + "v" +) + +func main() { + println(v.Hello + " " + w.World) +} diff --git a/src/cmd/go/testdata/src/complex/nest/sub/test12/p.go b/src/cmd/go/testdata/src/complex/nest/sub/test12/p.go new file mode 100644 index 00000000000..94943ec1bbe --- /dev/null +++ b/src/cmd/go/testdata/src/complex/nest/sub/test12/p.go @@ -0,0 +1,11 @@ +package test12 + +// Check that vendor/v1 is used but vendor/v2 is NOT used (sub/vendor/v2 wins). + +import ( + "v1" + "v2" +) + +const x = v1.ComplexNestVendorV1 +const y = v2.ComplexNestSubVendorV2 diff --git a/src/cmd/go/testdata/src/complex/nest/sub/test23/p.go b/src/cmd/go/testdata/src/complex/nest/sub/test23/p.go new file mode 100644 index 00000000000..8801a4812af --- /dev/null +++ b/src/cmd/go/testdata/src/complex/nest/sub/test23/p.go @@ -0,0 +1,11 @@ +package test23 + +// Check that vendor/v3 is used but vendor/v2 is NOT used (sub/vendor/v2 wins). + +import ( + "v2" + "v3" +) + +const x = v3.ComplexNestVendorV3 +const y = v2.ComplexNestSubVendorV2 diff --git a/src/cmd/go/testdata/src/complex/nest/sub/vendor/v2/v2.go b/src/cmd/go/testdata/src/complex/nest/sub/vendor/v2/v2.go new file mode 100644 index 00000000000..2991871710e --- /dev/null +++ b/src/cmd/go/testdata/src/complex/nest/sub/vendor/v2/v2.go @@ -0,0 +1,3 @@ +package v2 + +const ComplexNestSubVendorV2 = true diff --git a/src/cmd/go/testdata/src/complex/nest/vendor/v1/v1.go b/src/cmd/go/testdata/src/complex/nest/vendor/v1/v1.go new file mode 100644 index 00000000000..a55f5290a9a --- /dev/null +++ b/src/cmd/go/testdata/src/complex/nest/vendor/v1/v1.go @@ -0,0 +1,3 @@ +package v1 + +const ComplexNestVendorV1 = true diff --git a/src/cmd/go/testdata/src/complex/nest/vendor/v2/v2.go b/src/cmd/go/testdata/src/complex/nest/vendor/v2/v2.go new file mode 100644 index 00000000000..ac94def4e3e --- /dev/null +++ b/src/cmd/go/testdata/src/complex/nest/vendor/v2/v2.go @@ -0,0 +1,3 @@ +package v2 + +const ComplexNestVendorV2 = true diff --git a/src/cmd/go/testdata/src/complex/nest/vendor/v3/v3.go b/src/cmd/go/testdata/src/complex/nest/vendor/v3/v3.go new file mode 100644 index 00000000000..abf99b95745 --- /dev/null +++ b/src/cmd/go/testdata/src/complex/nest/vendor/v3/v3.go @@ -0,0 +1,3 @@ +package v3 + +const ComplexNestVendorV3 = true diff --git a/src/cmd/go/testdata/src/complex/vendor/v/v.go b/src/cmd/go/testdata/src/complex/vendor/v/v.go new file mode 100644 index 00000000000..bb20d86f25a --- /dev/null +++ b/src/cmd/go/testdata/src/complex/vendor/v/v.go @@ -0,0 +1,3 @@ +package v + +const Hello = "hello" diff --git a/src/cmd/go/testdata/src/complex/w/w.go b/src/cmd/go/testdata/src/complex/w/w.go new file mode 100644 index 00000000000..a9c7fbb3094 --- /dev/null +++ b/src/cmd/go/testdata/src/complex/w/w.go @@ -0,0 +1,3 @@ +package w + +const World = "world" diff --git a/src/cmd/go/testdata/src/coverasm/p.go b/src/cmd/go/testdata/src/coverasm/p.go new file mode 100644 index 00000000000..ab0c300d723 --- /dev/null +++ b/src/cmd/go/testdata/src/coverasm/p.go @@ -0,0 +1,7 @@ +package p + +func f() + +func g() { + println("g") +} diff --git a/src/cmd/go/testdata/src/coverasm/p.s b/src/cmd/go/testdata/src/coverasm/p.s new file mode 100644 index 00000000000..5e728f9946d --- /dev/null +++ b/src/cmd/go/testdata/src/coverasm/p.s @@ -0,0 +1,2 @@ +// empty asm file, +// so go test doesn't complain about declaration of f in p.go. diff --git a/src/cmd/go/testdata/src/coverasm/p_test.go b/src/cmd/go/testdata/src/coverasm/p_test.go new file mode 100644 index 00000000000..3cb3bd5664b --- /dev/null +++ b/src/cmd/go/testdata/src/coverasm/p_test.go @@ -0,0 +1,7 @@ +package p + +import "testing" + +func Test(t *testing.T) { + g() +} diff --git a/src/cmd/go/testdata/src/coverbad/p.go b/src/cmd/go/testdata/src/coverbad/p.go new file mode 100644 index 00000000000..16504a401eb --- /dev/null +++ b/src/cmd/go/testdata/src/coverbad/p.go @@ -0,0 +1,5 @@ +package p + +func f() { + g() +} diff --git a/src/cmd/go/testdata/src/coverbad/p1.go b/src/cmd/go/testdata/src/coverbad/p1.go new file mode 100644 index 00000000000..2d25c8e1908 --- /dev/null +++ b/src/cmd/go/testdata/src/coverbad/p1.go @@ -0,0 +1,7 @@ +package p + +import "C" + +func h() { + j() +} diff --git a/src/cmd/go/testdata/src/coverbad/p_test.go b/src/cmd/go/testdata/src/coverbad/p_test.go new file mode 100644 index 00000000000..3a876d6296c --- /dev/null +++ b/src/cmd/go/testdata/src/coverbad/p_test.go @@ -0,0 +1,5 @@ +package p + +import "testing" + +func Test(t *testing.T) {} diff --git a/src/cmd/go/testdata/src/coverdep/p.go b/src/cmd/go/testdata/src/coverdep/p.go new file mode 100644 index 00000000000..6baf6d5f0c7 --- /dev/null +++ b/src/cmd/go/testdata/src/coverdep/p.go @@ -0,0 +1,6 @@ +package p + +import _ "coverdep/p1" + +func F() { +} diff --git a/src/cmd/go/testdata/src/coverdep/p1/p1.go b/src/cmd/go/testdata/src/coverdep/p1/p1.go new file mode 100644 index 00000000000..8ae793d55d7 --- /dev/null +++ b/src/cmd/go/testdata/src/coverdep/p1/p1.go @@ -0,0 +1,3 @@ +package p1 + +import _ "errors" diff --git a/src/cmd/go/testdata/src/coverdep/p_test.go b/src/cmd/go/testdata/src/coverdep/p_test.go new file mode 100644 index 00000000000..11a14343ea9 --- /dev/null +++ b/src/cmd/go/testdata/src/coverdep/p_test.go @@ -0,0 +1,7 @@ +package p + +import "testing" + +func Test(t *testing.T) { + F() +} diff --git a/src/cmd/go/testdata/src/failfast_test.go b/src/cmd/go/testdata/src/failfast_test.go new file mode 100644 index 00000000000..fef4d2a35e1 --- /dev/null +++ b/src/cmd/go/testdata/src/failfast_test.go @@ -0,0 +1,54 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package failfast + +import "testing" + +func TestA(t *testing.T) { + // Edge-case testing, mixing unparallel tests too + t.Logf("LOG: %s", t.Name()) +} + +func TestFailingA(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) +} + +func TestB(t *testing.T) { + // Edge-case testing, mixing unparallel tests too + t.Logf("LOG: %s", t.Name()) +} + +func TestParallelFailingA(t *testing.T) { + t.Parallel() + t.Errorf("FAIL - %s", t.Name()) +} + +func TestParallelFailingB(t *testing.T) { + t.Parallel() + t.Errorf("FAIL - %s", t.Name()) +} + +func TestParallelFailingSubtestsA(t *testing.T) { + t.Parallel() + t.Run("TestFailingSubtestsA1", func(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) + }) + t.Run("TestFailingSubtestsA2", func(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) + }) +} + +func TestFailingSubtestsA(t *testing.T) { + t.Run("TestFailingSubtestsA1", func(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) + }) + t.Run("TestFailingSubtestsA2", func(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) + }) +} + +func TestFailingB(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) +} diff --git a/src/cmd/go/testdata/src/not_main/not_main.go b/src/cmd/go/testdata/src/not_main/not_main.go new file mode 100644 index 00000000000..75a397c6cba --- /dev/null +++ b/src/cmd/go/testdata/src/not_main/not_main.go @@ -0,0 +1,3 @@ +package not_main + +func F() {} diff --git a/src/cmd/go/testdata/src/skipper/skip_test.go b/src/cmd/go/testdata/src/skipper/skip_test.go new file mode 100644 index 00000000000..58e6dc505b7 --- /dev/null +++ b/src/cmd/go/testdata/src/skipper/skip_test.go @@ -0,0 +1,7 @@ +package skipper + +import "testing" + +func Test(t *testing.T) { + t.Skip("skipping") +} diff --git a/src/cmd/go/testdata/src/sleepy1/p_test.go b/src/cmd/go/testdata/src/sleepy1/p_test.go new file mode 100644 index 00000000000..333be7d8e4c --- /dev/null +++ b/src/cmd/go/testdata/src/sleepy1/p_test.go @@ -0,0 +1,10 @@ +package p + +import ( + "testing" + "time" +) + +func Test1(t *testing.T) { + time.Sleep(200 * time.Millisecond) +} diff --git a/src/cmd/go/testdata/src/sleepy2/p_test.go b/src/cmd/go/testdata/src/sleepy2/p_test.go new file mode 100644 index 00000000000..333be7d8e4c --- /dev/null +++ b/src/cmd/go/testdata/src/sleepy2/p_test.go @@ -0,0 +1,10 @@ +package p + +import ( + "testing" + "time" +) + +func Test1(t *testing.T) { + time.Sleep(200 * time.Millisecond) +} diff --git a/src/cmd/go/testdata/src/sleepybad/p.go b/src/cmd/go/testdata/src/sleepybad/p.go new file mode 100644 index 00000000000..e05b403e392 --- /dev/null +++ b/src/cmd/go/testdata/src/sleepybad/p.go @@ -0,0 +1,5 @@ +package p + +// missing import + +var _ = io.DoesNotExist diff --git a/src/cmd/go/testdata/src/testrace/race_test.go b/src/cmd/go/testdata/src/testrace/race_test.go index 264dcf0d8a0..7ec0c6d17a3 100644 --- a/src/cmd/go/testdata/src/testrace/race_test.go +++ b/src/cmd/go/testdata/src/testrace/race_test.go @@ -12,6 +12,7 @@ func TestRace(t *testing.T) { }() x = 3 <-c + _ = x } } @@ -25,5 +26,6 @@ func BenchmarkRace(b *testing.B) { }() x = 3 <-c + _ = x } } diff --git a/src/cmd/go/testdata/src/vetcycle/p.go b/src/cmd/go/testdata/src/vetcycle/p.go new file mode 100644 index 00000000000..857c3a611fd --- /dev/null +++ b/src/cmd/go/testdata/src/vetcycle/p.go @@ -0,0 +1,12 @@ +package p + + +type ( + _ interface{ m(B1) } + A1 interface{ a(D1) } + B1 interface{ A1 } + C1 interface{ B1 /* ERROR issue #18395 */ } + D1 interface{ C1 } +) + +var _ A1 = C1 /* ERROR cannot use C1 */ (nil) diff --git a/src/cmd/go/testdata/standalone_main_normal_test.go b/src/cmd/go/testdata/standalone_main_normal_test.go new file mode 100644 index 00000000000..018ce75b2e3 --- /dev/null +++ b/src/cmd/go/testdata/standalone_main_normal_test.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package standalone_main_normal_test + +import "testing" + +func TestMain(t *testing.T) { +} diff --git a/src/cmd/go/testdata/standalone_main_wrong_test.go b/src/cmd/go/testdata/standalone_main_wrong_test.go new file mode 100644 index 00000000000..59998873f94 --- /dev/null +++ b/src/cmd/go/testdata/standalone_main_wrong_test.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package standalone_main_wrong_test + +import "testing" + +func TestMain(m *testing.Main) { +} diff --git a/src/cmd/go/internal/buildid/buildid.go b/src/cmd/internal/buildid/buildid.go similarity index 69% rename from src/cmd/go/internal/buildid/buildid.go rename to src/cmd/internal/buildid/buildid.go index 091c9090c86..1740c88292b 100644 --- a/src/cmd/go/internal/buildid/buildid.go +++ b/src/cmd/internal/buildid/buildid.go @@ -6,12 +6,10 @@ package buildid import ( "bytes" - "cmd/go/internal/cfg" "fmt" "io" "os" "strconv" - "strings" ) var ( @@ -27,23 +25,22 @@ var ( buildid = []byte("build id ") ) -// ReadBuildID reads the build ID from an archive or binary. -// It only supports the gc toolchain. -// Other toolchain maintainers should adjust this function. -func ReadBuildID(name, target string) (id string, err error) { - if cfg.BuildToolchainName != "gc" { - return "", errBuildIDToolchain +// ReadFile reads the build ID from an archive or executable file. +// It only supports archives from the gc toolchain. +// TODO(rsc): Figure out what gccgo and llvm are going to do for archives. +func ReadFile(name string) (id string, err error) { + f, err := os.Open(name) + if err != nil { + return "", err } + defer f.Close() - // For commands, read build ID directly from binary. - if name == "main" { - return ReadBuildIDFromBinary(target) + buf := make([]byte, 8) + if _, err := f.ReadAt(buf, 0); err != nil { + return "", err } - - // Otherwise, we expect to have an archive (.a) file, - // and we can read the build ID from the Go export data. - if !strings.HasSuffix(target, ".a") { - return "", &os.PathError{Op: "parse", Path: target, Err: errBuildIDUnknown} + if string(buf) != "!\n" { + return readBinary(name, f) } // Read just enough of the target to fetch the build ID. @@ -56,20 +53,14 @@ func ReadBuildID(name, target string) (id string, err error) { // // The variable-sized strings are GOOS, GOARCH, and the experiment list (X:none). // Reading the first 1024 bytes should be plenty. - f, err := os.Open(target) - if err != nil { - return "", err - } data := make([]byte, 1024) n, err := io.ReadFull(f, data) - f.Close() - if err != nil && n == 0 { return "", err } bad := func() (string, error) { - return "", &os.PathError{Op: "parse", Path: target, Err: errBuildIDMalformed} + return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed} } // Archive header. @@ -122,9 +113,9 @@ var ( } ) -var BuildIDReadSize = 32 * 1024 // changed for testing +var readSize = 32 * 1024 // changed for testing -// ReadBuildIDFromBinary reads the build ID from a binary. +// readBinary reads the build ID from a binary. // // ELF binaries store the build ID in a proper PT_NOTE section. // @@ -133,11 +124,7 @@ var BuildIDReadSize = 32 * 1024 // changed for testing // of the text segment, which should appear near the beginning // of the file. This is clumsy but fairly portable. Custom locations // can be added for other binary types as needed, like we did for ELF. -func ReadBuildIDFromBinary(filename string) (id string, err error) { - if filename == "" { - return "", &os.PathError{Op: "parse", Path: filename, Err: errBuildIDUnknown} - } - +func readBinary(name string, f *os.File) (id string, err error) { // Read the first 32 kB of the binary file. // That should be enough to find the build ID. // In ELF files, the build ID is in the leading headers, @@ -151,13 +138,7 @@ func ReadBuildIDFromBinary(filename string) (id string, err error) { // Plan 9: 0x20 // Windows: 0x600 // - f, err := os.Open(filename) - if err != nil { - return "", err - } - defer f.Close() - - data := make([]byte, BuildIDReadSize) + data := make([]byte, readSize) _, err = io.ReadFull(f, data) if err == io.ErrUnexpectedEOF { err = nil @@ -167,19 +148,18 @@ func ReadBuildIDFromBinary(filename string) (id string, err error) { } if bytes.HasPrefix(data, elfPrefix) { - return readELFGoBuildID(filename, f, data) + return readELF(name, f, data) } for _, m := range machoPrefixes { if bytes.HasPrefix(data, m) { - return readMachoGoBuildID(filename, f, data) + return readMacho(name, f, data) } } - - return readRawGoBuildID(filename, data) + return readRaw(name, data) } -// readRawGoBuildID finds the raw build ID stored in text segment data. -func readRawGoBuildID(filename string, data []byte) (id string, err error) { +// readRaw finds the raw build ID stored in text segment data. +func readRaw(name string, data []byte) (id string, err error) { i := bytes.Index(data, goBuildPrefix) if i < 0 { // Missing. Treat as successful but build ID empty. @@ -188,14 +168,13 @@ func readRawGoBuildID(filename string, data []byte) (id string, err error) { j := bytes.Index(data[i+len(goBuildPrefix):], goBuildEnd) if j < 0 { - return "", &os.PathError{Op: "parse", Path: filename, Err: errBuildIDMalformed} + return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed} } quoted := data[i+len(goBuildPrefix)-1 : i+len(goBuildPrefix)+j+1] id, err = strconv.Unquote(string(quoted)) if err != nil { - return "", &os.PathError{Op: "parse", Path: filename, Err: errBuildIDMalformed} + return "", &os.PathError{Op: "parse", Path: name, Err: errBuildIDMalformed} } - return id, nil } diff --git a/src/cmd/internal/buildid/buildid_test.go b/src/cmd/internal/buildid/buildid_test.go new file mode 100644 index 00000000000..15481dd7623 --- /dev/null +++ b/src/cmd/internal/buildid/buildid_test.go @@ -0,0 +1,137 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildid + +import ( + "bytes" + "crypto/sha256" + "io/ioutil" + "os" + "reflect" + "testing" +) + +const ( + expectedID = "abcdefghijklmnopqrstuvwxyz.1234567890123456789012345678901234567890123456789012345678901234" + newID = "bcdefghijklmnopqrstuvwxyza.2345678901234567890123456789012345678901234567890123456789012341" +) + +func TestReadFile(t *testing.T) { + var files = []string{ + "p.a", + "a.elf", + "a.macho", + "a.pe", + } + + f, err := ioutil.TempFile("", "buildid-test-") + if err != nil { + t.Fatal(err) + } + tmp := f.Name() + defer os.Remove(tmp) + f.Close() + + for _, f := range files { + id, err := ReadFile("testdata/" + f) + if id != expectedID || err != nil { + t.Errorf("ReadFile(testdata/%s) = %q, %v, want %q, nil", f, id, err, expectedID) + } + old := readSize + readSize = 2048 + id, err = ReadFile("testdata/" + f) + readSize = old + if id != expectedID || err != nil { + t.Errorf("ReadFile(testdata/%s) [readSize=2k] = %q, %v, want %q, nil", f, id, err, expectedID) + } + + data, err := ioutil.ReadFile("testdata/" + f) + if err != nil { + t.Fatal(err) + } + m, _, err := FindAndHash(bytes.NewReader(data), expectedID, 1024) + if err != nil { + t.Errorf("FindAndHash(testdata/%s): %v", f, err) + continue + } + if err := ioutil.WriteFile(tmp, data, 0666); err != nil { + t.Error(err) + continue + } + tf, err := os.OpenFile(tmp, os.O_WRONLY, 0) + if err != nil { + t.Error(err) + continue + } + err = Rewrite(tf, m, newID) + err2 := tf.Close() + if err != nil { + t.Errorf("Rewrite(testdata/%s): %v", f, err) + continue + } + if err2 != nil { + t.Fatal(err2) + } + + id, err = ReadFile(tmp) + if id != newID || err != nil { + t.Errorf("ReadFile(testdata/%s after Rewrite) = %q, %v, want %q, nil", f, id, err, newID) + } + } +} + +func TestFindAndHash(t *testing.T) { + buf := make([]byte, 64) + buf2 := make([]byte, 64) + id := make([]byte, 8) + zero := make([]byte, 8) + for i := range id { + id[i] = byte(i) + } + numError := 0 + errorf := func(msg string, args ...interface{}) { + t.Errorf(msg, args...) + if numError++; numError > 20 { + t.Logf("stopping after too many errors") + t.FailNow() + } + } + for bufSize := len(id); bufSize <= len(buf); bufSize++ { + for j := range buf { + for k := 0; k < 2*len(id) && j+k < len(buf); k++ { + for i := range buf { + buf[i] = 1 + } + copy(buf[j:], id) + copy(buf[j+k:], id) + var m []int64 + if j+len(id) <= j+k { + m = append(m, int64(j)) + } + if j+k+len(id) <= len(buf) { + m = append(m, int64(j+k)) + } + copy(buf2, buf) + for _, p := range m { + copy(buf2[p:], zero) + } + h := sha256.Sum256(buf2) + + matches, hash, err := FindAndHash(bytes.NewReader(buf), string(id), bufSize) + if err != nil { + errorf("bufSize=%d j=%d k=%d: findAndHash: %v", bufSize, j, k, err) + continue + } + if !reflect.DeepEqual(matches, m) { + errorf("bufSize=%d j=%d k=%d: findAndHash: matches=%v, want %v", bufSize, j, k, matches, m) + continue + } + if hash != h { + errorf("bufSize=%d j=%d k=%d: findAndHash: matches correct, but hash=%x, want %x", bufSize, j, k, hash, h) + } + } + } + } +} diff --git a/src/cmd/go/internal/buildid/note.go b/src/cmd/internal/buildid/note.go similarity index 88% rename from src/cmd/go/internal/buildid/note.go rename to src/cmd/internal/buildid/note.go index 68c91e27047..5156cbd88c4 100644 --- a/src/cmd/go/internal/buildid/note.go +++ b/src/cmd/internal/buildid/note.go @@ -73,7 +73,7 @@ var elfGoNote = []byte("Go\x00\x00") // The Go build ID is stored in a note described by an ELF PT_NOTE prog // header. The caller has already opened filename, to get f, and read // at least 4 kB out, in data. -func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string, err error) { +func readELF(name string, f *os.File, data []byte) (buildid string, err error) { // Assume the note content is in the data, already read. // Rewrite the ELF header to set shnum to 0, so that we can pass // the data to elf.NewFile and it will decode the Prog list but not @@ -93,7 +93,7 @@ func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string, ef, err := elf.NewFile(bytes.NewReader(data)) if err != nil { - return "", &os.PathError{Path: filename, Op: "parse", Err: err} + return "", &os.PathError{Path: name, Op: "parse", Err: err} } for _, p := range ef.Progs { if p.Type != elf.PT_NOTE || p.Filesz < 16 { @@ -151,23 +151,23 @@ func readELFGoBuildID(filename string, f *os.File, data []byte) (buildid string, // The caller has already opened filename, to get f, and read a few kB out, in data. // Sadly, that's not guaranteed to hold the note, because there is an arbitrary amount // of other junk placed in the file ahead of the main text. -func readMachoGoBuildID(filename string, f *os.File, data []byte) (buildid string, err error) { +func readMacho(name string, f *os.File, data []byte) (buildid string, err error) { // If the data we want has already been read, don't worry about Mach-O parsing. // This is both an optimization and a hedge against the Mach-O parsing failing // in the future due to, for example, the name of the __text section changing. - if b, err := readRawGoBuildID(filename, data); b != "" && err == nil { + if b, err := readRaw(name, data); b != "" && err == nil { return b, err } mf, err := macho.NewFile(f) if err != nil { - return "", &os.PathError{Path: filename, Op: "parse", Err: err} + return "", &os.PathError{Path: name, Op: "parse", Err: err} } sect := mf.Section("__text") if sect == nil { // Every binary has a __text section. Something is wrong. - return "", &os.PathError{Path: filename, Op: "parse", Err: fmt.Errorf("cannot find __text section")} + return "", &os.PathError{Path: name, Op: "parse", Err: fmt.Errorf("cannot find __text section")} } // It should be in the first few bytes, but read a lot just in case, @@ -175,13 +175,13 @@ func readMachoGoBuildID(filename string, f *os.File, data []byte) (buildid strin // There shouldn't be much difference between reading 4kB and 32kB: // the hard part is getting to the data, not transferring it. n := sect.Size - if n > uint64(BuildIDReadSize) { - n = uint64(BuildIDReadSize) + if n > uint64(readSize) { + n = uint64(readSize) } buf := make([]byte, n) if _, err := f.ReadAt(buf, int64(sect.Offset)); err != nil { return "", err } - return readRawGoBuildID(filename, buf) + return readRaw(name, buf) } diff --git a/src/cmd/internal/buildid/rewrite.go b/src/cmd/internal/buildid/rewrite.go new file mode 100644 index 00000000000..5be54552a6d --- /dev/null +++ b/src/cmd/internal/buildid/rewrite.go @@ -0,0 +1,91 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package buildid + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" +) + +// FindAndHash reads all of r and returns the offsets of occurrences of id. +// While reading, findAndHash also computes and returns +// a hash of the content of r, but with occurrences of id replaced by zeros. +// FindAndHash reads bufSize bytes from r at a time. +// If bufSize == 0, FindAndHash uses a reasonable default. +func FindAndHash(r io.Reader, id string, bufSize int) (matches []int64, hash [32]byte, err error) { + if bufSize == 0 { + bufSize = 31 * 1024 // bufSize+little will likely fit in 32 kB + } + if len(id) > bufSize { + return nil, [32]byte{}, fmt.Errorf("buildid.FindAndHash: buffer too small") + } + zeros := make([]byte, len(id)) + idBytes := []byte(id) + + // The strategy is to read the file through buf, looking for id, + // but we need to worry about what happens if id is broken up + // and returned in parts by two different reads. + // We allocate a tiny buffer (at least len(id)) and a big buffer (bufSize bytes) + // next to each other in memory and then copy the tail of + // one read into the tiny buffer before reading new data into the big buffer. + // The search for id is over the entire tiny+big buffer. + tiny := (len(id) + 127) &^ 127 // round up to 128-aligned + buf := make([]byte, tiny+bufSize) + h := sha256.New() + start := tiny + for offset := int64(0); ; { + // The file offset maintained by the loop corresponds to &buf[tiny]. + // buf[start:tiny] is left over from previous iteration. + // After reading n bytes into buf[tiny:], we process buf[start:tiny+n]. + n, err := io.ReadFull(r, buf[tiny:]) + if err != io.ErrUnexpectedEOF && err != io.EOF && err != nil { + return nil, [32]byte{}, err + } + + // Process any matches. + for { + i := bytes.Index(buf[start:tiny+n], idBytes) + if i < 0 { + break + } + matches = append(matches, offset+int64(start+i-tiny)) + h.Write(buf[start : start+i]) + h.Write(zeros) + start += i + len(id) + } + if n < bufSize { + // Did not fill buffer, must be at end of file. + h.Write(buf[start : tiny+n]) + break + } + + // Process all but final tiny bytes of buf (bufSize = len(buf)-tiny). + // Note that start > len(buf)-tiny is possible, if the search above + // found an id ending in the final tiny fringe. That's OK. + if start < len(buf)-tiny { + h.Write(buf[start : len(buf)-tiny]) + start = len(buf) - tiny + } + + // Slide ending tiny-sized fringe to beginning of buffer. + copy(buf[0:], buf[bufSize:]) + start -= bufSize + offset += int64(bufSize) + } + h.Sum(hash[:0]) + return matches, hash, nil +} + +func Rewrite(w io.WriterAt, pos []int64, id string) error { + b := []byte(id) + for _, p := range pos { + if _, err := w.WriteAt(b, p); err != nil { + return err + } + } + return nil +} diff --git a/src/cmd/internal/buildid/testdata/a.elf b/src/cmd/internal/buildid/testdata/a.elf new file mode 100755 index 00000000000..f63128921aa Binary files /dev/null and b/src/cmd/internal/buildid/testdata/a.elf differ diff --git a/src/cmd/internal/buildid/testdata/a.macho b/src/cmd/internal/buildid/testdata/a.macho new file mode 100755 index 00000000000..fbbd57c1fe3 Binary files /dev/null and b/src/cmd/internal/buildid/testdata/a.macho differ diff --git a/src/cmd/internal/buildid/testdata/a.pe b/src/cmd/internal/buildid/testdata/a.pe new file mode 100755 index 00000000000..91202728c3f Binary files /dev/null and b/src/cmd/internal/buildid/testdata/a.pe differ diff --git a/src/cmd/internal/buildid/testdata/p.a b/src/cmd/internal/buildid/testdata/p.a new file mode 100644 index 00000000000..dcc3e76ef8b Binary files /dev/null and b/src/cmd/internal/buildid/testdata/p.a differ diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go index a617c389f92..8997fbf41ed 100644 --- a/src/cmd/internal/dwarf/dwarf.go +++ b/src/cmd/internal/dwarf/dwarf.go @@ -10,26 +10,72 @@ package dwarf import ( "errors" "fmt" + "sort" + "strings" ) // InfoPrefix is the prefix for all the symbols containing DWARF info entries. const InfoPrefix = "go.info." +// RangePrefix is the prefix for all the symbols containing DWARF location lists. +const LocPrefix = "go.loc." + // RangePrefix is the prefix for all the symbols containing DWARF range lists. const RangePrefix = "go.range." +// ConstInfoPrefix is the prefix for all symbols containing DWARF info +// entries that contain constants. +const ConstInfoPrefix = "go.constinfo." + +// CUInfoPrefix is the prefix for symbols containing information to +// populate the DWARF compilation unit info entries. +const CUInfoPrefix = "go.cuinfo." + +// Used to form the symbol name assigned to the DWARF 'abstract subprogram" +// info entry for a function +const AbstractFuncSuffix = "$abstract" + +// Controls logging/debugging for selected aspects of DWARF subprogram +// generation (functions, scopes). +var logDwarf bool + // Sym represents a symbol. type Sym interface { Len() int64 } +// A Location represents a variable's location at a particular PC range. +// It becomes a location list entry in the DWARF. +type Location struct { + StartPC, EndPC int64 + Pieces []Piece +} + +// A Piece represents the location of a particular part of a variable. +// It becomes part of a location list entry (a DW_OP_piece) in the DWARF. +type Piece struct { + Length int64 + StackOffset int32 + RegNum int16 + Missing bool + OnStack bool // if true, RegNum is unset. +} + // A Var represents a local variable or a function parameter. type Var struct { - Name string - Abbrev int // Either DW_ABRV_AUTO or DW_ABRV_PARAM - Offset int32 - Scope int32 - Type Sym + Name string + Abbrev int // Either DW_ABRV_AUTO[_LOCLIST] or DW_ABRV_PARAM[_LOCLIST] + IsReturnValue bool + IsInlFormal bool + StackOffset int32 + LocationList []Location + Scope int32 + Type Sym + DeclFile string + DeclLine uint + DeclCol uint + InlIndex int32 // subtract 1 to form real index into InlTree + ChildIndex int32 // child DIE index in abstract function } // A Scope represents a lexical scope. All variables declared within a @@ -49,6 +95,27 @@ type Range struct { Start, End int64 } +// This container is used by the PutFunc* variants below when +// creating the DWARF subprogram DIE(s) for a function. +type FnState struct { + Name string + Importpath string + Info Sym + Filesym Sym + Loc Sym + Ranges Sym + Absfn Sym + StartPC Sym + Size int64 + External bool + Scopes []Scope + InlCalls InlCalls +} + +func EnableLogging(doit bool) { + logDwarf = doit +} + // UnifyRanges merges the list of ranges of c into the list of ranges of s func (s *Scope) UnifyRanges(c *Scope) { out := make([]Range, 0, len(s.Ranges)+len(c.Ranges)) @@ -84,15 +151,51 @@ func (s *Scope) UnifyRanges(c *Scope) { s.Ranges = out } +type InlCalls struct { + Calls []InlCall +} + +type InlCall struct { + // index into ctx.InlTree describing the call inlined here + InlIndex int + + // Symbol of file containing inlined call site (really *obj.LSym). + CallFile Sym + + // Line number of inlined call site. + CallLine uint32 + + // Dwarf abstract subroutine symbol (really *obj.LSym). + AbsFunSym Sym + + // Indices of child inlines within Calls array above. + Children []int + + // entries in this list are PAUTO's created by the inliner to + // capture the promoted formals and locals of the inlined callee. + InlVars []*Var + + // PC ranges for this inlined call. + Ranges []Range + + // Root call (not a child of some other call). + Root bool +} + // A Context specifies how to add data to a Sym. type Context interface { PtrSize() int AddInt(s Sym, size int, i int64) AddBytes(s Sym, b []byte) AddAddress(s Sym, t interface{}, ofs int64) + AddCURelativeAddress(s Sym, t interface{}, ofs int64) AddSectionOffset(s Sym, size int, t interface{}, ofs int64) + CurrentOffset(s Sym) int64 + RecordDclReference(from Sym, to Sym, dclIdx int, inlIndex int) + RecordChildDieOffsets(s Sym, vars []*Var, offsets []int32) AddString(s Sym, v string) - SymValue(s Sym) int64 + AddFileRef(s Sym, f interface{}) + Logf(format string, args ...interface{}) } // AppendUleb128 appends v to b using DWARF's unsigned LEB128 encoding. @@ -205,15 +308,28 @@ const ( ) // Index into the abbrevs table below. -// Keep in sync with ispubname() and ispubtype() below. +// Keep in sync with ispubname() and ispubtype() in ld/dwarf.go. // ispubtype considers >= NULLTYPE public const ( DW_ABRV_NULL = iota DW_ABRV_COMPUNIT DW_ABRV_FUNCTION + DW_ABRV_FUNCTION_ABSTRACT + DW_ABRV_FUNCTION_CONCRETE + DW_ABRV_INLINED_SUBROUTINE + DW_ABRV_INLINED_SUBROUTINE_RANGES DW_ABRV_VARIABLE + DW_ABRV_INT_CONSTANT DW_ABRV_AUTO + DW_ABRV_AUTO_LOCLIST + DW_ABRV_AUTO_ABSTRACT + DW_ABRV_AUTO_CONCRETE + DW_ABRV_AUTO_CONCRETE_LOCLIST DW_ABRV_PARAM + DW_ABRV_PARAM_LOCLIST + DW_ABRV_PARAM_ABSTRACT + DW_ABRV_PARAM_CONCRETE + DW_ABRV_PARAM_CONCRETE_LOCLIST DW_ABRV_LEXICAL_BLOCK_RANGES DW_ABRV_LEXICAL_BLOCK_SIMPLE DW_ABRV_STRUCTFIELD @@ -253,9 +369,9 @@ var abbrevs = [DW_NABRV]dwAbbrev{ []dwAttrForm{ {DW_AT_name, DW_FORM_string}, {DW_AT_language, DW_FORM_data1}, - {DW_AT_low_pc, DW_FORM_addr}, - {DW_AT_high_pc, DW_FORM_addr}, {DW_AT_stmt_list, DW_FORM_sec_offset}, + {DW_AT_low_pc, DW_FORM_addr}, + {DW_AT_ranges, DW_FORM_sec_offset}, {DW_AT_comp_dir, DW_FORM_string}, {DW_AT_producer, DW_FORM_string}, }, @@ -270,10 +386,59 @@ var abbrevs = [DW_NABRV]dwAbbrev{ {DW_AT_low_pc, DW_FORM_addr}, {DW_AT_high_pc, DW_FORM_addr}, {DW_AT_frame_base, DW_FORM_block1}, + {DW_AT_decl_file, DW_FORM_data4}, {DW_AT_external, DW_FORM_flag}, }, }, + /* FUNCTION_ABSTRACT */ + { + DW_TAG_subprogram, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_inline, DW_FORM_data1}, + {DW_AT_external, DW_FORM_flag}, + }, + }, + + /* FUNCTION_CONCRETE */ + { + DW_TAG_subprogram, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_low_pc, DW_FORM_addr}, + {DW_AT_high_pc, DW_FORM_addr}, + {DW_AT_frame_base, DW_FORM_block1}, + }, + }, + + /* INLINED_SUBROUTINE */ + { + DW_TAG_inlined_subroutine, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_low_pc, DW_FORM_addr}, + {DW_AT_high_pc, DW_FORM_addr}, + {DW_AT_call_file, DW_FORM_data4}, + {DW_AT_call_line, DW_FORM_udata}, + }, + }, + + /* INLINED_SUBROUTINE_RANGES */ + { + DW_TAG_inlined_subroutine, + DW_CHILDREN_yes, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_ranges, DW_FORM_sec_offset}, + {DW_AT_call_file, DW_FORM_data4}, + {DW_AT_call_line, DW_FORM_udata}, + }, + }, + /* VARIABLE */ { DW_TAG_variable, @@ -286,14 +451,69 @@ var abbrevs = [DW_NABRV]dwAbbrev{ }, }, + /* INT CONSTANT */ + { + DW_TAG_constant, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_const_value, DW_FORM_sdata}, + }, + }, + /* AUTO */ { DW_TAG_variable, DW_CHILDREN_no, []dwAttrForm{ {DW_AT_name, DW_FORM_string}, - {DW_AT_location, DW_FORM_block1}, + {DW_AT_decl_line, DW_FORM_udata}, {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + + /* AUTO_LOCLIST */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + + /* AUTO_ABSTRACT */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + }, + }, + + /* AUTO_CONCRETE */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + + /* AUTO_CONCRETE_LOCLIST */ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, }, }, @@ -303,10 +523,58 @@ var abbrevs = [DW_NABRV]dwAbbrev{ DW_CHILDREN_no, []dwAttrForm{ {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, {DW_AT_location, DW_FORM_block1}, + }, + }, + + /* PARAM_LOCLIST */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + + /* PARAM_ABSTRACT */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, {DW_AT_type, DW_FORM_ref_addr}, }, }, + + /* PARAM_CONCRETE */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + + /* PARAM_CONCRETE_LOCLIST */ + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + /* LEXICAL_BLOCK_RANGES */ { DW_TAG_lexical_block, @@ -332,7 +600,7 @@ var abbrevs = [DW_NABRV]dwAbbrev{ DW_CHILDREN_no, []dwAttrForm{ {DW_AT_name, DW_FORM_string}, - {DW_AT_data_member_location, DW_FORM_block1}, + {DW_AT_data_member_location, DW_FORM_udata}, {DW_AT_type, DW_FORM_ref_addr}, {DW_AT_go_embedded_field, DW_FORM_flag}, }, @@ -421,6 +689,7 @@ var abbrevs = [DW_NABRV]dwAbbrev{ DW_CHILDREN_yes, []dwAttrForm{ {DW_AT_name, DW_FORM_string}, + {DW_AT_byte_size, DW_FORM_udata}, // {DW_AT_type, DW_FORM_ref_addr}, {DW_AT_go_kind, DW_FORM_data1}, }, @@ -682,33 +951,353 @@ func HasChildren(die *DWDie) bool { return abbrevs[die.Abbrev].children != 0 } -// PutFunc writes a DIE for a function to s. -// It also writes child DIEs for each variable in vars. -func PutFunc(ctxt Context, s, ranges Sym, name string, external bool, startPC Sym, size int64, scopes []Scope) error { - Uleb128put(ctxt, s, DW_ABRV_FUNCTION) - putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_string, DW_CLS_STRING, int64(len(name)), name) - putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_addr, DW_CLS_ADDRESS, 0, startPC) - putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_addr, DW_CLS_ADDRESS, size, startPC) - putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_block1, DW_CLS_BLOCK, 1, []byte{DW_OP_call_frame_cfa}) - var ev int64 - if external { - ev = 1 - } - putattr(ctxt, s, DW_ABRV_FUNCTION, DW_FORM_flag, DW_CLS_FLAG, ev, 0) - if len(scopes) > 0 { - var encbuf [20]byte - if putscope(ctxt, s, ranges, startPC, 0, scopes, encbuf[:0]) < int32(len(scopes)) { - return errors.New("multiple toplevel scopes") +// PutIntConst writes a DIE for an integer constant +func PutIntConst(ctxt Context, info, typ Sym, name string, val int64) { + Uleb128put(ctxt, info, DW_ABRV_INT_CONSTANT) + putattr(ctxt, info, DW_ABRV_INT_CONSTANT, DW_FORM_string, DW_CLS_STRING, int64(len(name)), name) + putattr(ctxt, info, DW_ABRV_INT_CONSTANT, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, typ) + putattr(ctxt, info, DW_ABRV_INT_CONSTANT, DW_FORM_sdata, DW_CLS_CONSTANT, val, nil) +} + +// PutRanges writes a range table to sym. All addresses in ranges are +// relative to some base address. If base is not nil, then they're +// relative to the start of base. If base is nil, then the caller must +// arrange a base address some other way (such as a DW_AT_low_pc +// attribute). +func PutRanges(ctxt Context, sym Sym, base Sym, ranges []Range) { + ps := ctxt.PtrSize() + // Write ranges. + // We do not emit base address entries here, even though they would reduce + // the number of relocations, because dsymutil (which is used on macOS when + // linking externally) does not support them. + for _, r := range ranges { + if base == nil { + ctxt.AddInt(sym, ps, r.Start) + ctxt.AddInt(sym, ps, r.End) + } else { + ctxt.AddCURelativeAddress(sym, base, r.Start) + ctxt.AddCURelativeAddress(sym, base, r.End) } } + // Write trailer. + ctxt.AddInt(sym, ps, 0) + ctxt.AddInt(sym, ps, 0) +} - Uleb128put(ctxt, s, 0) +// Return TRUE if the inlined call in the specified slot is empty, +// meaning it has a zero-length range (no instructions), and all +// of its children are empty. +func isEmptyInlinedCall(slot int, calls *InlCalls) bool { + ic := &calls.Calls[slot] + if ic.InlIndex == -2 { + return true + } + live := false + for _, k := range ic.Children { + if !isEmptyInlinedCall(k, calls) { + live = true + } + } + if len(ic.Ranges) > 0 { + live = true + } + if !live { + ic.InlIndex = -2 + } + return !live +} + +// Slot -1: return top-level inlines +// Slot >= 0: return children of that slot +func inlChildren(slot int, calls *InlCalls) []int { + var kids []int + if slot != -1 { + for _, k := range calls.Calls[slot].Children { + if !isEmptyInlinedCall(k, calls) { + kids = append(kids, k) + } + } + } else { + for k := 0; k < len(calls.Calls); k += 1 { + if calls.Calls[k].Root && !isEmptyInlinedCall(k, calls) { + kids = append(kids, k) + } + } + } + return kids +} + +func inlinedVarTable(inlcalls *InlCalls) map[*Var]bool { + vars := make(map[*Var]bool) + for _, ic := range inlcalls.Calls { + for _, v := range ic.InlVars { + vars[v] = true + } + } + return vars +} + +// The s.Scopes slice contains variables were originally part of the +// function being emitted, as well as variables that were imported +// from various callee functions during the inlining process. This +// function prunes out any variables from the latter category (since +// they will be emitted as part of DWARF inlined_subroutine DIEs) and +// then generates scopes for vars in the former category. +func putPrunedScopes(ctxt Context, s *FnState, fnabbrev int) error { + if len(s.Scopes) == 0 { + return nil + } + scopes := make([]Scope, len(s.Scopes), len(s.Scopes)) + pvars := inlinedVarTable(&s.InlCalls) + for k, s := range s.Scopes { + var pruned Scope = Scope{Parent: s.Parent, Ranges: s.Ranges} + for i := 0; i < len(s.Vars); i++ { + _, found := pvars[s.Vars[i]] + if !found { + pruned.Vars = append(pruned.Vars, s.Vars[i]) + } + } + sort.Sort(byChildIndex(pruned.Vars)) + scopes[k] = pruned + } + var encbuf [20]byte + if putscope(ctxt, s, scopes, 0, fnabbrev, encbuf[:0]) < int32(len(scopes)) { + return errors.New("multiple toplevel scopes") + } return nil } -func putscope(ctxt Context, s, ranges Sym, startPC Sym, curscope int32, scopes []Scope, encbuf []byte) int32 { +// Emit DWARF attributes and child DIEs for an 'abstract' subprogram. +// The abstract subprogram DIE for a function contains its +// location-independent attributes (name, type, etc). Other instances +// of the function (any inlined copy of it, or the single out-of-line +// 'concrete' instance) will contain a pointer back to this abstract +// DIE (as a space-saving measure, so that name/type etc doesn't have +// to be repeated for each inlined copy). +func PutAbstractFunc(ctxt Context, s *FnState) error { + + if logDwarf { + ctxt.Logf("PutAbstractFunc(%v)\n", s.Absfn) + } + + abbrev := DW_ABRV_FUNCTION_ABSTRACT + Uleb128put(ctxt, s.Absfn, int64(abbrev)) + + fullname := s.Name + if strings.HasPrefix(s.Name, "\"\".") { + // Generate a fully qualified name for the function in the + // abstract case. This is so as to avoid the need for the + // linker to process the DIE with patchDWARFName(); we can't + // allow the name attribute of an abstract subprogram DIE to + // be rewritten, since it would change the offsets of the + // child DIEs (which we're relying on in order for abstract + // origin references to work). + fullname = s.Importpath + "." + s.Name[3:] + } + putattr(ctxt, s.Absfn, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(fullname)), fullname) + + // DW_AT_inlined value + putattr(ctxt, s.Absfn, abbrev, DW_FORM_data1, DW_CLS_CONSTANT, int64(DW_INL_inlined), nil) + + var ev int64 + if s.External { + ev = 1 + } + putattr(ctxt, s.Absfn, abbrev, DW_FORM_flag, DW_CLS_FLAG, ev, 0) + + // Child variables (may be empty) + var flattened []*Var + + // This slice will hold the offset in bytes for each child var DIE + // with respect to the start of the parent subprogram DIE. + var offsets []int32 + + // Scopes/vars + if len(s.Scopes) > 0 { + // For abstract subprogram DIEs we want to flatten out scope info: + // lexical scope DIEs contain range and/or hi/lo PC attributes, + // which we explicitly don't want for the abstract subprogram DIE. + pvars := inlinedVarTable(&s.InlCalls) + for _, scope := range s.Scopes { + for i := 0; i < len(scope.Vars); i++ { + _, found := pvars[scope.Vars[i]] + if !found { + flattened = append(flattened, scope.Vars[i]) + } + } + } + if len(flattened) > 0 { + sort.Sort(byChildIndex(flattened)) + + // This slice will hold the offset in bytes for each child + // variable DIE with respect to the start of the parent + // subprogram DIE. + for _, v := range flattened { + offsets = append(offsets, int32(ctxt.CurrentOffset(s.Absfn))) + putAbstractVar(ctxt, s.Absfn, v) + } + } + } + ctxt.RecordChildDieOffsets(s.Absfn, flattened, offsets) + + Uleb128put(ctxt, s.Absfn, 0) + return nil +} + +// Emit DWARF attributes and child DIEs for an inlined subroutine. The +// first attribute of an inlined subroutine DIE is a reference back to +// its corresponding 'abstract' DIE (containing location-independent +// attributes such as name, type, etc). Inlined subroutine DIEs can +// have other inlined subroutine DIEs as children. +func PutInlinedFunc(ctxt Context, s *FnState, callersym Sym, callIdx int) error { + ic := s.InlCalls.Calls[callIdx] + callee := ic.AbsFunSym + + abbrev := DW_ABRV_INLINED_SUBROUTINE_RANGES + if len(ic.Ranges) == 1 { + abbrev = DW_ABRV_INLINED_SUBROUTINE + } + Uleb128put(ctxt, s.Info, int64(abbrev)) + + if logDwarf { + ctxt.Logf("PutInlinedFunc(caller=%v,callee=%v,abbrev=%d)\n", callersym, callee, abbrev) + } + + // Abstract origin. + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, callee) + + if abbrev == DW_ABRV_INLINED_SUBROUTINE_RANGES { + putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, s.Ranges.Len(), s.Ranges) + PutRanges(ctxt, s.Ranges, s.StartPC, ic.Ranges) + } else { + st := ic.Ranges[0].Start + en := ic.Ranges[0].End + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, st, s.StartPC) + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, en, s.StartPC) + } + + // Emit call file, line attrs. + ctxt.AddFileRef(s.Info, ic.CallFile) + putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(ic.CallLine), nil) + + // Variables associated with this inlined routine instance. + vars := ic.InlVars + sort.Sort(byChildIndex(vars)) + inlIndex := ic.InlIndex + var encbuf [20]byte + for _, v := range vars { + putvar(ctxt, s, v, callee, abbrev, inlIndex, encbuf[:0]) + } + + // Children of this inline. + for _, sib := range inlChildren(callIdx, &s.InlCalls) { + absfn := s.InlCalls.Calls[sib].AbsFunSym + err := PutInlinedFunc(ctxt, s, absfn, sib) + if err != nil { + return err + } + } + + Uleb128put(ctxt, s.Info, 0) + return nil +} + +// Emit DWARF attributes and child DIEs for a 'concrete' subprogram, +// meaning the out-of-line copy of a function that was inlined at some +// point during the compilation of its containing package. The first +// attribute for a concrete DIE is a reference to the 'abstract' DIE +// for the function (which holds location-independent attributes such +// as name, type), then the remainder of the attributes are specific +// to this instance (location, frame base, etc). +func PutConcreteFunc(ctxt Context, s *FnState) error { + if logDwarf { + ctxt.Logf("PutConcreteFunc(%v)\n", s.Info) + } + abbrev := DW_ABRV_FUNCTION_CONCRETE + Uleb128put(ctxt, s.Info, int64(abbrev)) + + // Abstract origin. + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, s.Absfn) + + // Start/end PC. + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, 0, s.StartPC) + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, s.Size, s.StartPC) + + // cfa / frame base + putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, 1, []byte{DW_OP_call_frame_cfa}) + + // Scopes + if err := putPrunedScopes(ctxt, s, abbrev); err != nil { + return err + } + + // Inlined subroutines. + for _, sib := range inlChildren(-1, &s.InlCalls) { + absfn := s.InlCalls.Calls[sib].AbsFunSym + err := PutInlinedFunc(ctxt, s, absfn, sib) + if err != nil { + return err + } + } + + Uleb128put(ctxt, s.Info, 0) + return nil +} + +// Emit DWARF attributes and child DIEs for a subprogram. Here +// 'default' implies that the function in question was not inlined +// when its containing package was compiled (hence there is no need to +// emit an abstract version for it to use as a base for inlined +// routine records). +func PutDefaultFunc(ctxt Context, s *FnState) error { + if logDwarf { + ctxt.Logf("PutDefaultFunc(%v)\n", s.Info) + } + abbrev := DW_ABRV_FUNCTION + Uleb128put(ctxt, s.Info, int64(abbrev)) + + putattr(ctxt, s.Info, DW_ABRV_FUNCTION, DW_FORM_string, DW_CLS_STRING, int64(len(s.Name)), s.Name) + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, 0, s.StartPC) + putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, s.Size, s.StartPC) + putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, 1, []byte{DW_OP_call_frame_cfa}) + ctxt.AddFileRef(s.Info, s.Filesym) + + var ev int64 + if s.External { + ev = 1 + } + putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, ev, 0) + + // Scopes + if err := putPrunedScopes(ctxt, s, abbrev); err != nil { + return err + } + + // Inlined subroutines. + for _, sib := range inlChildren(-1, &s.InlCalls) { + absfn := s.InlCalls.Calls[sib].AbsFunSym + err := PutInlinedFunc(ctxt, s, absfn, sib) + if err != nil { + return err + } + } + + Uleb128put(ctxt, s.Info, 0) + return nil +} + +func putscope(ctxt Context, s *FnState, scopes []Scope, curscope int32, fnabbrev int, encbuf []byte) int32 { + + if logDwarf { + ctxt.Logf("putscope(%v,%d): vars:", s.Info, curscope) + for i, v := range scopes[curscope].Vars { + ctxt.Logf(" %d:%d:%s", i, v.ChildIndex, v.Name) + } + ctxt.Logf("\n") + } + for _, v := range scopes[curscope].Vars { - putvar(ctxt, s, v, encbuf) + putvar(ctxt, s, v, s.Absfn, fnabbrev, -1, encbuf) } this := curscope curscope++ @@ -719,49 +1308,211 @@ func putscope(ctxt Context, s, ranges Sym, startPC Sym, curscope int32, scopes [ } if len(scope.Ranges) == 1 { - Uleb128put(ctxt, s, DW_ABRV_LEXICAL_BLOCK_SIMPLE) - putattr(ctxt, s, DW_ABRV_LEXICAL_BLOCK_SIMPLE, DW_FORM_addr, DW_CLS_ADDRESS, scope.Ranges[0].Start, startPC) - putattr(ctxt, s, DW_ABRV_LEXICAL_BLOCK_SIMPLE, DW_FORM_addr, DW_CLS_ADDRESS, scope.Ranges[0].End, startPC) + Uleb128put(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_SIMPLE) + putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_SIMPLE, DW_FORM_addr, DW_CLS_ADDRESS, scope.Ranges[0].Start, s.StartPC) + putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_SIMPLE, DW_FORM_addr, DW_CLS_ADDRESS, scope.Ranges[0].End, s.StartPC) } else { - Uleb128put(ctxt, s, DW_ABRV_LEXICAL_BLOCK_RANGES) - putattr(ctxt, s, DW_ABRV_LEXICAL_BLOCK_RANGES, DW_FORM_sec_offset, DW_CLS_PTR, ranges.Len(), ranges) + Uleb128put(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_RANGES) + putattr(ctxt, s.Info, DW_ABRV_LEXICAL_BLOCK_RANGES, DW_FORM_sec_offset, DW_CLS_PTR, s.Ranges.Len(), s.Ranges) - ctxt.AddAddress(ranges, nil, -1) - ctxt.AddAddress(ranges, startPC, 0) - for _, r := range scope.Ranges { - ctxt.AddAddress(ranges, nil, r.Start) - ctxt.AddAddress(ranges, nil, r.End) - } - ctxt.AddAddress(ranges, nil, 0) - ctxt.AddAddress(ranges, nil, 0) + PutRanges(ctxt, s.Ranges, s.StartPC, scope.Ranges) } - curscope = putscope(ctxt, s, ranges, startPC, curscope, scopes, encbuf) - - Uleb128put(ctxt, s, 0) + curscope = putscope(ctxt, s, scopes, curscope, fnabbrev, encbuf) + Uleb128put(ctxt, s.Info, 0) } return curscope } -func putvar(ctxt Context, s Sym, v *Var, encbuf []byte) { - n := v.Name +// Pick the correct abbrev code for variable or parameter DIE. +func determineVarAbbrev(v *Var, fnabbrev int) (int, bool, bool) { + abbrev := v.Abbrev - Uleb128put(ctxt, s, int64(v.Abbrev)) - putattr(ctxt, s, v.Abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(n)), n) - loc := append(encbuf[:0], DW_OP_call_frame_cfa) - if v.Offset != 0 { - loc = append(loc, DW_OP_consts) - loc = AppendSleb128(loc, int64(v.Offset)) - loc = append(loc, DW_OP_plus) + // If the variable was entirely optimized out, don't emit a location list; + // convert to an inline abbreviation and emit an empty location. + missing := false + switch { + case abbrev == DW_ABRV_AUTO_LOCLIST && len(v.LocationList) == 0: + missing = true + abbrev = DW_ABRV_AUTO + case abbrev == DW_ABRV_PARAM_LOCLIST && len(v.LocationList) == 0: + missing = true + abbrev = DW_ABRV_PARAM } - putattr(ctxt, s, v.Abbrev, DW_FORM_block1, DW_CLS_BLOCK, int64(len(loc)), loc) - putattr(ctxt, s, v.Abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) + + concrete := true + switch fnabbrev { + case DW_ABRV_FUNCTION: + concrete = false + break + case DW_ABRV_FUNCTION_CONCRETE, DW_ABRV_INLINED_SUBROUTINE, DW_ABRV_INLINED_SUBROUTINE_RANGES: + switch abbrev { + case DW_ABRV_AUTO: + if v.IsInlFormal { + abbrev = DW_ABRV_PARAM_CONCRETE + } else { + abbrev = DW_ABRV_AUTO_CONCRETE + } + concrete = true + case DW_ABRV_AUTO_LOCLIST: + if v.IsInlFormal { + abbrev = DW_ABRV_PARAM_CONCRETE_LOCLIST + } else { + abbrev = DW_ABRV_AUTO_CONCRETE_LOCLIST + } + case DW_ABRV_PARAM: + abbrev = DW_ABRV_PARAM_CONCRETE + case DW_ABRV_PARAM_LOCLIST: + abbrev = DW_ABRV_PARAM_CONCRETE_LOCLIST + } + default: + panic("should never happen") + } + + return abbrev, missing, concrete +} + +func abbrevUsesLoclist(abbrev int) bool { + switch abbrev { + case DW_ABRV_AUTO_LOCLIST, DW_ABRV_AUTO_CONCRETE_LOCLIST, + DW_ABRV_PARAM_LOCLIST, DW_ABRV_PARAM_CONCRETE_LOCLIST: + return true + default: + return false + } +} + +// Emit DWARF attributes for a variable belonging to an 'abstract' subprogram. +func putAbstractVar(ctxt Context, info Sym, v *Var) { + // Remap abbrev + abbrev := v.Abbrev + switch abbrev { + case DW_ABRV_AUTO, DW_ABRV_AUTO_LOCLIST: + abbrev = DW_ABRV_AUTO_ABSTRACT + case DW_ABRV_PARAM, DW_ABRV_PARAM_LOCLIST: + abbrev = DW_ABRV_PARAM_ABSTRACT + } + + Uleb128put(ctxt, info, int64(abbrev)) + putattr(ctxt, info, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(v.Name)), v.Name) + + // Isreturn attribute if this is a param + if abbrev == DW_ABRV_PARAM_ABSTRACT { + var isReturn int64 + if v.IsReturnValue { + isReturn = 1 + } + putattr(ctxt, info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil) + } + + // Line + putattr(ctxt, info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil) + + // Type + putattr(ctxt, info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) + + // Var has no children => no terminator +} + +func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int, encbuf []byte) { + // Remap abbrev according to parent DIE abbrev + abbrev, missing, concrete := determineVarAbbrev(v, fnabbrev) + + Uleb128put(ctxt, s.Info, int64(abbrev)) + + // Abstract origin for concrete / inlined case + if concrete { + // Here we are making a reference to a child DIE of an abstract + // function subprogram DIE. The child DIE has no LSym, so instead + // after the call to 'putattr' below we make a call to register + // the child DIE reference. + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, absfn) + ctxt.RecordDclReference(s.Info, absfn, int(v.ChildIndex), inlIndex) + } else { + // Var name, line for abstract and default cases + n := v.Name + putattr(ctxt, s.Info, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(n)), n) + if abbrev == DW_ABRV_PARAM || abbrev == DW_ABRV_PARAM_LOCLIST || abbrev == DW_ABRV_PARAM_ABSTRACT { + var isReturn int64 + if v.IsReturnValue { + isReturn = 1 + } + putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil) + } + putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil) + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) + } + + if abbrevUsesLoclist(abbrev) { + putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, int64(s.Loc.Len()), s.Loc) + addLocList(ctxt, s.Loc, s.StartPC, v, encbuf) + } else { + loc := encbuf[:0] + switch { + case missing: + break // no location + case v.StackOffset == 0: + loc = append(loc, DW_OP_call_frame_cfa) + default: + loc = append(loc, DW_OP_fbreg) + loc = AppendSleb128(loc, int64(v.StackOffset)) + } + putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, int64(len(loc)), loc) + } + + // Var has no children => no terminator +} + +func addLocList(ctxt Context, listSym, startPC Sym, v *Var, encbuf []byte) { + // Base address entry: max ptr followed by the base address. + ctxt.AddInt(listSym, ctxt.PtrSize(), ^0) + ctxt.AddAddress(listSym, startPC, 0) + for _, entry := range v.LocationList { + ctxt.AddInt(listSym, ctxt.PtrSize(), entry.StartPC) + ctxt.AddInt(listSym, ctxt.PtrSize(), entry.EndPC) + locBuf := encbuf[:0] + for _, piece := range entry.Pieces { + if !piece.Missing { + if piece.OnStack { + if piece.StackOffset == 0 { + locBuf = append(locBuf, DW_OP_call_frame_cfa) + } else { + locBuf = append(locBuf, DW_OP_fbreg) + locBuf = AppendSleb128(locBuf, int64(piece.StackOffset)) + } + } else { + if piece.RegNum < 32 { + locBuf = append(locBuf, DW_OP_reg0+byte(piece.RegNum)) + } else { + locBuf = append(locBuf, DW_OP_regx) + locBuf = AppendUleb128(locBuf, uint64(piece.RegNum)) + } + } + } + if len(entry.Pieces) > 1 { + locBuf = append(locBuf, DW_OP_piece) + locBuf = AppendUleb128(locBuf, uint64(piece.Length)) + } + } + ctxt.AddInt(listSym, 2, int64(len(locBuf))) + ctxt.AddBytes(listSym, locBuf) + } + // End list + ctxt.AddInt(listSym, ctxt.PtrSize(), 0) + ctxt.AddInt(listSym, ctxt.PtrSize(), 0) } // VarsByOffset attaches the methods of sort.Interface to []*Var, -// sorting in increasing Offset. +// sorting in increasing StackOffset. type VarsByOffset []*Var func (s VarsByOffset) Len() int { return len(s) } -func (s VarsByOffset) Less(i, j int) bool { return s[i].Offset < s[j].Offset } +func (s VarsByOffset) Less(i, j int) bool { return s[i].StackOffset < s[j].StackOffset } func (s VarsByOffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// byChildIndex implements sort.Interface for []*dwarf.Var by child index. +type byChildIndex []*Var + +func (s byChildIndex) Len() int { return len(s) } +func (s byChildIndex) Less(i, j int) bool { return s[i].ChildIndex < s[j].ChildIndex } +func (s byChildIndex) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/src/cmd/internal/edit/edit.go b/src/cmd/internal/edit/edit.go new file mode 100644 index 00000000000..2d470f4c8a9 --- /dev/null +++ b/src/cmd/internal/edit/edit.go @@ -0,0 +1,93 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edit implements buffered position-based editing of byte slices. +package edit + +import ( + "fmt" + "sort" +) + +// A Buffer is a queue of edits to apply to a given byte slice. +type Buffer struct { + old []byte + q edits +} + +// An edit records a single text modification: change the bytes in [start,end) to new. +type edit struct { + start int + end int + new string +} + +// An edits is a list of edits that is sortable by start offset, breaking ties by end offset. +type edits []edit + +func (x edits) Len() int { return len(x) } +func (x edits) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x edits) Less(i, j int) bool { + if x[i].start != x[j].start { + return x[i].start < x[j].start + } + return x[i].end < x[j].end +} + +// NewBuffer returns a new buffer to accumulate changes to an initial data slice. +// The returned buffer maintains a reference to the data, so the caller must ensure +// the data is not modified until after the Buffer is done being used. +func NewBuffer(data []byte) *Buffer { + return &Buffer{old: data} +} + +func (b *Buffer) Insert(pos int, new string) { + if pos < 0 || pos > len(b.old) { + panic("invalid edit position") + } + b.q = append(b.q, edit{pos, pos, new}) +} + +func (b *Buffer) Delete(start, end int) { + if end < start || start < 0 || end > len(b.old) { + panic("invalid edit position") + } + b.q = append(b.q, edit{start, end, ""}) +} + +func (b *Buffer) Replace(start, end int, new string) { + if end < start || start < 0 || end > len(b.old) { + panic("invalid edit position") + } + b.q = append(b.q, edit{start, end, new}) +} + +// Bytes returns a new byte slice containing the original data +// with the queued edits applied. +func (b *Buffer) Bytes() []byte { + // Sort edits by starting position and then by ending position. + // Breaking ties by ending position allows insertions at point x + // to be applied before a replacement of the text at [x, y). + sort.Stable(b.q) + + var new []byte + offset := 0 + for i, e := range b.q { + if e.start < offset { + e0 := b.q[i-1] + panic(fmt.Sprintf("overlapping edits: [%d,%d)->%q, [%d,%d)->%q", e0.start, e0.end, e0.new, e.start, e.end, e.new)) + } + new = append(new, b.old[offset:e.start]...) + offset = e.end + new = append(new, e.new...) + } + new = append(new, b.old[offset:]...) + return new +} + +// String returns a string containing the original data +// with the queued edits applied. +func (b *Buffer) String() string { + return string(b.Bytes()) +} diff --git a/src/cmd/internal/edit/edit_test.go b/src/cmd/internal/edit/edit_test.go new file mode 100644 index 00000000000..0e0c564d987 --- /dev/null +++ b/src/cmd/internal/edit/edit_test.go @@ -0,0 +1,28 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edit + +import "testing" + +func TestEdit(t *testing.T) { + b := NewBuffer([]byte("0123456789")) + b.Insert(8, ",7½,") + b.Replace(9, 10, "the-end") + b.Insert(10, "!") + b.Insert(4, "3.14,") + b.Insert(4, "π,") + b.Insert(4, "3.15,") + b.Replace(3, 4, "three,") + want := "012three,3.14,π,3.15,4567,7½,8the-end!" + + s := b.String() + if s != want { + t.Errorf("b.String() = %q, want %q", s, want) + } + sb := b.Bytes() + if string(sb) != want { + t.Errorf("b.Bytes() = %q, want %q", sb, want) + } +} diff --git a/src/cmd/internal/goobj/goobj_test.go b/src/cmd/internal/goobj/goobj_test.go new file mode 100644 index 00000000000..3b41589bbf1 --- /dev/null +++ b/src/cmd/internal/goobj/goobj_test.go @@ -0,0 +1,318 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package goobj + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "fmt" + "internal/testenv" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" +) + +var ( + buildDir string + go1obj string + go2obj string + goarchive string + cgoarchive string +) + +func TestMain(m *testing.M) { + if !testenv.HasGoBuild() { + return + } + + if err := buildGoobj(); err != nil { + fmt.Println(err) + os.RemoveAll(buildDir) + os.Exit(1) + } + + exit := m.Run() + + os.RemoveAll(buildDir) + os.Exit(exit) +} + +func copyDir(dst, src string) error { + err := os.MkdirAll(dst, 0777) + if err != nil { + return err + } + fis, err := ioutil.ReadDir(src) + if err != nil { + return err + } + for _, fi := range fis { + err = copyFile(filepath.Join(dst, fi.Name()), filepath.Join(src, fi.Name())) + if err != nil { + return err + } + } + return nil +} + +func copyFile(dst, src string) (err error) { + var s, d *os.File + s, err = os.Open(src) + if err != nil { + return err + } + defer s.Close() + d, err = os.Create(dst) + if err != nil { + return err + } + defer func() { + e := d.Close() + if err == nil { + err = e + } + }() + _, err = io.Copy(d, s) + if err != nil { + return err + } + return nil +} + +func buildGoobj() error { + var err error + + buildDir, err = ioutil.TempDir("", "TestGoobj") + if err != nil { + return err + } + + go1obj = filepath.Join(buildDir, "go1.o") + go2obj = filepath.Join(buildDir, "go2.o") + goarchive = filepath.Join(buildDir, "go.a") + + gotool, err := testenv.GoTool() + if err != nil { + return err + } + + go1src := filepath.Join("testdata", "go1.go") + go2src := filepath.Join("testdata", "go2.go") + + out, err := exec.Command(gotool, "tool", "compile", "-o", go1obj, go1src).CombinedOutput() + if err != nil { + return fmt.Errorf("go tool compile -o %s %s: %v\n%s", go1obj, go1src, err, out) + } + out, err = exec.Command(gotool, "tool", "compile", "-o", go2obj, go2src).CombinedOutput() + if err != nil { + return fmt.Errorf("go tool compile -o %s %s: %v\n%s", go2obj, go2src, err, out) + } + out, err = exec.Command(gotool, "tool", "pack", "c", goarchive, go1obj, go2obj).CombinedOutput() + if err != nil { + return fmt.Errorf("go tool pack c %s %s %s: %v\n%s", goarchive, go1obj, go2obj, err, out) + } + + if testenv.HasCGO() { + gopath := filepath.Join(buildDir, "gopath") + err = copyDir(filepath.Join(gopath, "src", "mycgo"), filepath.Join("testdata", "mycgo")) + if err != nil { + return err + } + cmd := exec.Command(gotool, "install", "-gcflags=all="+os.Getenv("GO_GCFLAGS"), "mycgo") + cmd.Env = append(os.Environ(), "GOPATH="+gopath) + out, err = cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("go install mycgo: %v\n%s", err, out) + } + pat := filepath.Join(gopath, "pkg", "*", "mycgo.a") + ms, err := filepath.Glob(pat) + if err != nil { + return err + } + if len(ms) == 0 { + return fmt.Errorf("cannot found paths for pattern %s", pat) + } + cgoarchive = ms[0] + } + + return nil +} + +func TestParseGoobj(t *testing.T) { + path := go1obj + + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + p, err := Parse(f, "mypkg") + if err != nil { + t.Fatal(err) + } + if p.Arch != runtime.GOARCH { + t.Errorf("%s: got %v, want %v", path, p.Arch, runtime.GOARCH) + } + var found bool + for _, s := range p.Syms { + if s.Name == "mypkg.go1" { + found = true + break + } + } + if !found { + t.Errorf(`%s: symbol "mypkg.go1" not found`, path) + } +} + +func TestParseArchive(t *testing.T) { + path := goarchive + + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + p, err := Parse(f, "mypkg") + if err != nil { + t.Fatal(err) + } + if p.Arch != runtime.GOARCH { + t.Errorf("%s: got %v, want %v", path, p.Arch, runtime.GOARCH) + } + var found1 bool + var found2 bool + for _, s := range p.Syms { + if s.Name == "mypkg.go1" { + found1 = true + } + if s.Name == "mypkg.go2" { + found2 = true + } + } + if !found1 { + t.Errorf(`%s: symbol "mypkg.go1" not found`, path) + } + if !found2 { + t.Errorf(`%s: symbol "mypkg.go2" not found`, path) + } +} + +func TestParseCGOArchive(t *testing.T) { + testenv.MustHaveCGO(t) + + path := cgoarchive + + f, err := os.Open(path) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + p, err := Parse(f, "mycgo") + if err != nil { + t.Fatal(err) + } + if p.Arch != runtime.GOARCH { + t.Errorf("%s: got %v, want %v", path, p.Arch, runtime.GOARCH) + } + var found1 bool + var found2 bool + for _, s := range p.Syms { + if s.Name == "mycgo.go1" { + found1 = true + } + if s.Name == "mycgo.go2" { + found2 = true + } + } + if !found1 { + t.Errorf(`%s: symbol "mycgo.go1" not found`, path) + } + if !found2 { + t.Errorf(`%s: symbol "mycgo.go2" not found`, path) + } + + c1 := "c1" + c2 := "c2" + + found1 = false + found2 = false + + switch runtime.GOOS { + case "darwin": + c1 = "_" + c1 + c2 = "_" + c2 + for _, obj := range p.Native { + mf, err := macho.NewFile(obj) + if err != nil { + t.Fatal(err) + } + if mf.Symtab == nil { + continue + } + for _, s := range mf.Symtab.Syms { + switch s.Name { + case c1: + found1 = true + case c2: + found2 = true + } + } + } + case "windows": + if runtime.GOARCH == "386" { + c1 = "_" + c1 + c2 = "_" + c2 + } + for _, obj := range p.Native { + pf, err := pe.NewFile(obj) + if err != nil { + t.Fatal(err) + } + for _, s := range pf.Symbols { + switch s.Name { + case c1: + found1 = true + case c2: + found2 = true + } + } + } + default: + for _, obj := range p.Native { + ef, err := elf.NewFile(obj) + if err != nil { + t.Fatal(err) + } + syms, err := ef.Symbols() + if err != nil { + t.Fatal(err) + } + for _, s := range syms { + switch s.Name { + case c1: + found1 = true + case c2: + found2 = true + } + } + } + } + + if !found1 { + t.Errorf(`%s: symbol %q not found`, path, c1) + } + if !found2 { + t.Errorf(`%s: symbol %q not found`, path, c2) + } +} diff --git a/src/cmd/internal/goobj/read.go b/src/cmd/internal/goobj/read.go index b6c90d3bd7b..ebdc37575f2 100644 --- a/src/cmd/internal/goobj/read.go +++ b/src/cmd/internal/goobj/read.go @@ -6,7 +6,6 @@ // // TODO(rsc): Decide where this package should live. (golang.org/issue/6932) // TODO(rsc): Decide the appropriate integer types for various fields. -// TODO(rsc): Write tests. (File format still up in the air a little.) package goobj import ( @@ -16,6 +15,7 @@ import ( "errors" "fmt" "io" + "os" "strconv" "strings" ) @@ -25,7 +25,7 @@ type Sym struct { SymID // symbol identifier (name and version) Kind objabi.SymKind // kind of symbol DupOK bool // are duplicate definitions okay? - Size int // size of corresponding data + Size int64 // size of corresponding data Type SymID // symbol for Go type information Data Data // memory image of symbol Reloc []Reloc // relocations to apply to Data @@ -43,7 +43,7 @@ type SymID struct { // declarations in C) have a non-zero version distinguishing // a symbol in one file from a symbol of the same name // in another file - Version int + Version int64 } func (s SymID) String() string { @@ -67,10 +67,10 @@ type Reloc struct { // The bytes at [Offset, Offset+Size) within the containing Sym // should be updated to refer to the address Add bytes after the start // of the symbol Sym. - Offset int - Size int + Offset int64 + Size int64 Sym SymID - Add int + Add int64 // The Type records the form of address expected in the bytes // described by the previous fields: absolute, PC-relative, and so on. @@ -85,16 +85,16 @@ type Var struct { // identifies a variable in a function stack frame. // Using fewer of these - in particular, using only Name - does not. Name string // Name of variable. - Kind int // TODO(rsc): Define meaning. - Offset int // Frame offset. TODO(rsc): Define meaning. + Kind int64 // TODO(rsc): Define meaning. + Offset int64 // Frame offset. TODO(rsc): Define meaning. Type SymID // Go type for variable. } // Func contains additional per-symbol information specific to functions. type Func struct { - Args int // size in bytes of argument frame: inputs and outputs - Frame int // size in bytes of local variable frame + Args int64 // size in bytes of argument frame: inputs and outputs + Frame int64 // size in bytes of local variable frame Leaf bool // function omits save of link register (ARM) NoSplit bool // function omits stack split prologue Var []Var // detail about local variables @@ -119,20 +119,26 @@ type FuncData struct { // An InlinedCall is a node in an InlTree. // See cmd/internal/obj.InlTree for details. type InlinedCall struct { - Parent int + Parent int64 File string - Line int + Line int64 Func SymID } // A Package is a parsed Go object file or archive defining a Go package. type Package struct { - ImportPath string // import path denoting this package - Imports []string // packages imported by this package - SymRefs []SymID // list of symbol names and versions referred to by this pack - Syms []*Sym // symbols defined by this package - MaxVersion int // maximum Version in any SymID in Syms - Arch string // architecture + ImportPath string // import path denoting this package + Imports []string // packages imported by this package + SymRefs []SymID // list of symbol names and versions referred to by this pack + Syms []*Sym // symbols defined by this package + MaxVersion int64 // maximum Version in any SymID in Syms + Arch string // architecture + Native []*NativeReader // native object data (e.g. ELF) +} + +type NativeReader struct { + Name string + io.ReaderAt } var ( @@ -150,7 +156,7 @@ var ( type objReader struct { p *Package b *bufio.Reader - f io.ReadSeeker + f *os.File err error offset int64 dataOffset int64 @@ -160,7 +166,7 @@ type objReader struct { } // init initializes r to read package p from f. -func (r *objReader) init(f io.ReadSeeker, p *Package) { +func (r *objReader) init(f *os.File, p *Package) { r.f = f r.p = p r.offset, _ = f.Seek(0, io.SeekCurrent) @@ -185,6 +191,24 @@ func (r *objReader) error(err error) error { return r.err } +// peek returns the next n bytes without advancing the reader. +func (r *objReader) peek(n int) ([]byte, error) { + if r.err != nil { + return nil, r.err + } + if r.offset >= r.limit { + r.error(io.ErrUnexpectedEOF) + return nil, r.err + } + b, err := r.b.Peek(n) + if err != nil { + if err != bufio.ErrBufferFull { + r.error(err) + } + } + return b, err +} + // readByte reads and returns a byte from the input file. // On I/O error or EOF, it records the error but returns byte 0. // A sequence of 0 bytes will eventually terminate any @@ -231,7 +255,7 @@ func (r *objReader) readFull(b []byte) error { } // readInt reads a zigzag varint from the input file. -func (r *objReader) readInt() int { +func (r *objReader) readInt() int64 { var u uint64 for shift := uint(0); ; shift += 7 { @@ -246,12 +270,7 @@ func (r *objReader) readInt() int { } } - v := int64(u>>1) ^ (int64(u) << 63 >> 63) - if int64(int(v)) != v { - r.error(errCorruptObject) // TODO - return 0 - } - return int(v) + return int64(u>>1) ^ (int64(u) << 63 >> 63) } // readString reads a length-delimited string from the input file. @@ -289,8 +308,8 @@ func (r *objReader) readRef() { // readData reads a data reference from the input file. func (r *objReader) readData() Data { n := r.readInt() - d := Data{Offset: r.dataOffset, Size: int64(n)} - r.dataOffset += int64(n) + d := Data{Offset: r.dataOffset, Size: n} + r.dataOffset += n return d } @@ -322,9 +341,9 @@ func (r *objReader) skip(n int64) { } } -// Parse parses an object file or archive from r, +// Parse parses an object file or archive from f, // assuming that its import path is pkgpath. -func Parse(r io.ReadSeeker, pkgpath string) (*Package, error) { +func Parse(f *os.File, pkgpath string) (*Package, error) { if pkgpath == "" { pkgpath = `""` } @@ -332,7 +351,7 @@ func Parse(r io.ReadSeeker, pkgpath string) (*Package, error) { p.ImportPath = pkgpath var rd objReader - rd.init(r, p) + rd.init(f, p) err := rd.readFull(rd.tmp[:8]) if err != nil { if err == io.EOF { @@ -365,9 +384,6 @@ func trimSpace(b []byte) string { } // parseArchive parses a Unix archive of Go object files. -// TODO(rsc): Need to skip non-Go object files. -// TODO(rsc): Maybe record table of contents in r.p so that -// linker can avoid having code to parse archives too. func (r *objReader) parseArchive() error { for r.offset < r.limit { if err := r.readFull(r.tmp[:60]); err != nil { @@ -413,9 +429,22 @@ func (r *objReader) parseArchive() error { default: oldLimit := r.limit r.limit = r.offset + size - if err := r.parseObject(nil); err != nil { - return fmt.Errorf("parsing archive member %q: %v", name, err) + + p, err := r.peek(8) + if err != nil { + return err } + if bytes.Equal(p, goobjHeader) { + if err := r.parseObject(nil); err != nil { + return fmt.Errorf("parsing archive member %q: %v", name, err) + } + } else { + r.p.Native = append(r.p.Native, &NativeReader{ + Name: name, + ReaderAt: io.NewSectionReader(r.f, r.offset, size), + }) + } + r.skip(r.limit - r.offset) r.limit = oldLimit } @@ -496,7 +525,7 @@ func (r *objReader) parseObject(prefix []byte) error { r.readInt() // n files - ignore r.dataOffset = r.offset - r.skip(int64(dataLength)) + r.skip(dataLength) // Symbols. for { @@ -581,7 +610,7 @@ func (r *objReader) parseObject(prefix []byte) error { } func (r *Reloc) String(insnOffset uint64) string { - delta := r.Offset - int(insnOffset) + delta := r.Offset - int64(insnOffset) s := fmt.Sprintf("[%d:%d]%s", delta, delta+r.Size, r.Type) if r.Sym.Name != "" { if r.Add != 0 { diff --git a/src/cmd/internal/goobj/testdata/go1.go b/src/cmd/internal/goobj/testdata/go1.go new file mode 100644 index 00000000000..37d1ec19bbc --- /dev/null +++ b/src/cmd/internal/goobj/testdata/go1.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mypkg + +import "fmt" + +func go1() { + fmt.Println("go1") +} diff --git a/src/cmd/internal/goobj/testdata/go2.go b/src/cmd/internal/goobj/testdata/go2.go new file mode 100644 index 00000000000..0e9c0d7338c --- /dev/null +++ b/src/cmd/internal/goobj/testdata/go2.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mypkg + +import "fmt" + +func go2() { + fmt.Println("go2") +} diff --git a/src/cmd/internal/goobj/testdata/mycgo/c1.c b/src/cmd/internal/goobj/testdata/mycgo/c1.c new file mode 100644 index 00000000000..869a324a8bc --- /dev/null +++ b/src/cmd/internal/goobj/testdata/mycgo/c1.c @@ -0,0 +1,9 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include + +void c1(void) { + puts("c1"); +} diff --git a/src/cmd/internal/goobj/testdata/mycgo/c2.c b/src/cmd/internal/goobj/testdata/mycgo/c2.c new file mode 100644 index 00000000000..1cf904fb6f5 --- /dev/null +++ b/src/cmd/internal/goobj/testdata/mycgo/c2.c @@ -0,0 +1,9 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include + +void c2(void) { + puts("c2"); +} diff --git a/src/cmd/internal/goobj/testdata/mycgo/go.go b/src/cmd/internal/goobj/testdata/mycgo/go.go new file mode 100644 index 00000000000..7b74f9138a6 --- /dev/null +++ b/src/cmd/internal/goobj/testdata/mycgo/go.go @@ -0,0 +1,5 @@ +package mycgo + +// void c1(void); +// void c2(void); +import "C" diff --git a/src/cmd/internal/goobj/testdata/mycgo/go1.go b/src/cmd/internal/goobj/testdata/mycgo/go1.go new file mode 100644 index 00000000000..eb3924cc4c8 --- /dev/null +++ b/src/cmd/internal/goobj/testdata/mycgo/go1.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mycgo + +import "fmt" + +func go1() { + fmt.Println("go1") +} diff --git a/src/cmd/internal/goobj/testdata/mycgo/go2.go b/src/cmd/internal/goobj/testdata/mycgo/go2.go new file mode 100644 index 00000000000..ea3e26fa913 --- /dev/null +++ b/src/cmd/internal/goobj/testdata/mycgo/go2.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mycgo + +import "fmt" + +func go2() { + fmt.Println("go2") +} diff --git a/src/cmd/internal/obj/arm/a.out.go b/src/cmd/internal/obj/arm/a.out.go index 6ea7d4be3b8..d4d95102300 100644 --- a/src/cmd/internal/obj/arm/a.out.go +++ b/src/cmd/internal/obj/arm/a.out.go @@ -116,7 +116,8 @@ const ( C_REGREG C_REGREG2 C_REGLIST - C_SHIFT + C_SHIFT /* register shift R>>x */ + C_SHIFTADDR /* memory address with shifted offset R>>x(R) */ C_FREG C_PSR C_FCR @@ -230,6 +231,24 @@ const ( ASUBD AMULF AMULD + ANMULF + ANMULD + AMULAF + AMULAD + ANMULAF + ANMULAD + AMULSF + AMULSD + ANMULSF + ANMULSD + AFMULAF + AFMULAD + AFNMULAF + AFNMULAD + AFMULSF + AFMULSD + AFNMULSF + AFNMULSD ADIVF ADIVD ASQRTF @@ -294,6 +313,16 @@ const ( AREVSH ARBIT + AXTAB + AXTAH + AXTABU + AXTAHU + + ABFX + ABFXU + ABFC + ABFI + AMULWT AMULWB AMULBB diff --git a/src/cmd/internal/obj/arm/anames.go b/src/cmd/internal/obj/arm/anames.go index 63cc5da393a..cb60eba8245 100644 --- a/src/cmd/internal/obj/arm/anames.go +++ b/src/cmd/internal/obj/arm/anames.go @@ -53,6 +53,24 @@ var Anames = []string{ "SUBD", "MULF", "MULD", + "NMULF", + "NMULD", + "MULAF", + "MULAD", + "NMULAF", + "NMULAD", + "MULSF", + "MULSD", + "NMULSF", + "NMULSD", + "FMULAF", + "FMULAD", + "FNMULAF", + "FNMULAD", + "FMULSF", + "FMULSD", + "FNMULSF", + "FNMULSD", "DIVF", "DIVD", "SQRTF", @@ -107,6 +125,14 @@ var Anames = []string{ "REV16", "REVSH", "RBIT", + "XTAB", + "XTAH", + "XTABU", + "XTAHU", + "BFX", + "BFXU", + "BFC", + "BFI", "MULWT", "MULWB", "MULBB", diff --git a/src/cmd/internal/obj/arm/anames5.go b/src/cmd/internal/obj/arm/anames5.go index bb98d3b0814..f2743b91d60 100644 --- a/src/cmd/internal/obj/arm/anames5.go +++ b/src/cmd/internal/obj/arm/anames5.go @@ -11,6 +11,7 @@ var cnames5 = []string{ "REGREG2", "REGLIST", "SHIFT", + "SHIFTADDR", "FREG", "PSR", "FCR", diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 8abf732b2ca..0439954fe9e 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -69,6 +69,7 @@ type Optab struct { param int16 flag int8 pcrelsiz uint8 + scond uint8 // optional flags accepted by the instruction } type Opcross [32][2][32]uint8 @@ -82,236 +83,252 @@ const ( var optab = []Optab{ /* struct Optab: - OPCODE, from, prog->reg, to, type,size,param,flag */ - {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0}, - {AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0}, - {AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, - {AAND, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0}, - {AAND, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, - {AORR, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0}, - {AORR, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, - {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, - {AMVN, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, - {ACMP, C_REG, C_REG, C_NONE, 1, 4, 0, 0, 0}, - {AADD, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0}, - {AADD, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0}, - {AAND, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0}, - {AAND, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0}, - {AORR, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0}, - {AORR, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0}, - {AMOVW, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0}, - {AMVN, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0}, - {ACMP, C_RCON, C_REG, C_NONE, 2, 4, 0, 0, 0}, - {AADD, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0}, - {AADD, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0}, - {AAND, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0}, - {AAND, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0}, - {AORR, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0}, - {AORR, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0}, - {AMVN, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0}, - {ACMP, C_SHIFT, C_REG, C_NONE, 3, 4, 0, 0, 0}, - {AMOVW, C_RACON, C_NONE, C_REG, 4, 4, REGSP, 0, 0}, - {AB, C_NONE, C_NONE, C_SBRA, 5, 4, 0, LPOOL, 0}, - {ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, - {ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0}, - {ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, - {ABEQ, C_RCON, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // prediction hinted form, hint ignored - - {AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0}, - {ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0}, - {ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0}, - {ABX, C_NONE, C_NONE, C_ROREG, 75, 12, 0, 0, 0}, - {ABXRET, C_NONE, C_NONE, C_ROREG, 76, 4, 0, 0, 0}, - {ASLL, C_RCON, C_REG, C_REG, 8, 4, 0, 0, 0}, - {ASLL, C_RCON, C_NONE, C_REG, 8, 4, 0, 0, 0}, - {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, - {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0}, - {ASWI, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0}, - {ASWI, C_NONE, C_NONE, C_LCON, 10, 4, 0, 0, 0}, - {AWORD, C_NONE, C_NONE, C_LCON, 11, 4, 0, 0, 0}, - {AWORD, C_NONE, C_NONE, C_LCONADDR, 11, 4, 0, 0, 0}, - {AWORD, C_NONE, C_NONE, C_ADDR, 11, 4, 0, 0, 0}, - {AWORD, C_NONE, C_NONE, C_TLS_LE, 103, 4, 0, 0, 0}, - {AWORD, C_NONE, C_NONE, C_TLS_IE, 104, 4, 0, 0, 0}, - {AMOVW, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0}, - {AMOVW, C_SCON, C_NONE, C_REG, 12, 4, 0, 0, 0}, - {AMOVW, C_LCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0}, - {AMOVW, C_LCONADDR, C_NONE, C_REG, 12, 4, 0, LFROM | LPCREL, 4}, - {AADD, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0}, - {AADD, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0}, - {AAND, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0}, - {AAND, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0}, - {AORR, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0}, - {AORR, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0}, - {AMVN, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0}, - {ACMP, C_NCON, C_REG, C_NONE, 13, 8, 0, 0, 0}, - {AADD, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0}, - {AADD, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0}, - {AAND, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0}, - {AAND, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0}, - {AORR, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0}, - {AORR, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0}, - {AMVN, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0}, - {ACMP, C_SCON, C_REG, C_NONE, 13, 8, 0, 0, 0}, - {AADD, C_RCON2A, C_REG, C_REG, 106, 8, 0, 0, 0}, - {AADD, C_RCON2A, C_NONE, C_REG, 106, 8, 0, 0, 0}, - {AORR, C_RCON2A, C_REG, C_REG, 106, 8, 0, 0, 0}, - {AORR, C_RCON2A, C_NONE, C_REG, 106, 8, 0, 0, 0}, - {AADD, C_RCON2S, C_REG, C_REG, 107, 8, 0, 0, 0}, - {AADD, C_RCON2S, C_NONE, C_REG, 107, 8, 0, 0, 0}, - {AADD, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0}, - {AADD, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0}, - {AAND, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0}, - {AAND, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0}, - {AORR, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0}, - {AORR, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0}, - {AMVN, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0}, - {ACMP, C_LCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0}, - {AMOVB, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, - {AMOVBS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0}, - {AMOVBU, C_REG, C_NONE, C_REG, 58, 4, 0, 0, 0}, - {AMOVH, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0}, - {AMOVHS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0}, - {AMOVHU, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0}, - {AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0}, - {AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0}, - {ADIV, C_REG, C_REG, C_REG, 16, 4, 0, 0, 0}, - {ADIV, C_REG, C_NONE, C_REG, 16, 4, 0, 0, 0}, - {ADIVHW, C_REG, C_REG, C_REG, 105, 4, 0, 0, 0}, - {ADIVHW, C_REG, C_NONE, C_REG, 105, 4, 0, 0, 0}, - {AMULL, C_REG, C_REG, C_REGREG, 17, 4, 0, 0, 0}, - {AMOVW, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0}, - {AMOVW, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0}, - {AMOVB, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0}, - {AMOVB, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0}, - {AMOVBS, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0}, - {AMOVBS, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0}, - {AMOVBU, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0}, - {AMOVBU, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0}, - {AMOVW, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVW, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0}, - {AMOVBU, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, - {AMOVBU, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0}, - {AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, - {AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, - {AMOVW, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4}, - {AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, - {AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, - {AMOVB, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4}, - {AMOVBS, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, - {AMOVBS, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, - {AMOVBS, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4}, - {AMOVBU, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, - {AMOVBU, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, - {AMOVBU, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4}, - {AMOVW, C_TLS_LE, C_NONE, C_REG, 101, 4, 0, LFROM, 0}, - {AMOVW, C_TLS_IE, C_NONE, C_REG, 102, 8, 0, LFROM, 0}, - {AMOVW, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0}, - {AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0}, - {AMOVW, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4}, - {AMOVBU, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0}, - {AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0}, - {AMOVBU, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4}, - {AMOVW, C_LACON, C_NONE, C_REG, 34, 8, REGSP, LFROM, 0}, - {AMOVW, C_PSR, C_NONE, C_REG, 35, 4, 0, 0, 0}, - {AMOVW, C_REG, C_NONE, C_PSR, 36, 4, 0, 0, 0}, - {AMOVW, C_RCON, C_NONE, C_PSR, 37, 4, 0, 0, 0}, - {AMOVM, C_REGLIST, C_NONE, C_SOREG, 38, 4, 0, 0, 0}, - {AMOVM, C_SOREG, C_NONE, C_REGLIST, 39, 4, 0, 0, 0}, - {ASWPW, C_SOREG, C_REG, C_REG, 40, 4, 0, 0, 0}, - {ARFE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0}, - {AMOVF, C_FREG, C_NONE, C_FAUTO, 50, 4, REGSP, 0, 0}, - {AMOVF, C_FREG, C_NONE, C_FOREG, 50, 4, 0, 0, 0}, - {AMOVF, C_FAUTO, C_NONE, C_FREG, 51, 4, REGSP, 0, 0}, - {AMOVF, C_FOREG, C_NONE, C_FREG, 51, 4, 0, 0, 0}, - {AMOVF, C_FREG, C_NONE, C_LAUTO, 52, 12, REGSP, LTO, 0}, - {AMOVF, C_FREG, C_NONE, C_LOREG, 52, 12, 0, LTO, 0}, - {AMOVF, C_LAUTO, C_NONE, C_FREG, 53, 12, REGSP, LFROM, 0}, - {AMOVF, C_LOREG, C_NONE, C_FREG, 53, 12, 0, LFROM, 0}, - {AMOVF, C_FREG, C_NONE, C_ADDR, 68, 8, 0, LTO | LPCREL, 4}, - {AMOVF, C_ADDR, C_NONE, C_FREG, 69, 8, 0, LFROM | LPCREL, 4}, - {AADDF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0}, - {AADDF, C_FREG, C_FREG, C_FREG, 54, 4, 0, 0, 0}, - {AMOVF, C_FREG, C_NONE, C_FREG, 55, 4, 0, 0, 0}, - {ANEGF, C_FREG, C_NONE, C_FREG, 55, 4, 0, 0, 0}, - {AMOVW, C_REG, C_NONE, C_FCR, 56, 4, 0, 0, 0}, - {AMOVW, C_FCR, C_NONE, C_REG, 57, 4, 0, 0, 0}, - {AMOVW, C_SHIFT, C_NONE, C_REG, 59, 4, 0, 0, 0}, - {AMOVBU, C_SHIFT, C_NONE, C_REG, 59, 4, 0, 0, 0}, - {AMOVB, C_SHIFT, C_NONE, C_REG, 60, 4, 0, 0, 0}, - {AMOVBS, C_SHIFT, C_NONE, C_REG, 60, 4, 0, 0, 0}, - {AMOVW, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0}, - {AMOVB, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0}, - {AMOVBS, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0}, - {AMOVBU, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0}, - {AMOVH, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0}, - {AMOVH, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0}, - {AMOVHS, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0}, - {AMOVHS, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0}, - {AMOVHU, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0}, - {AMOVHU, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0}, - {AMOVB, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0}, - {AMOVB, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0}, - {AMOVBS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0}, - {AMOVBS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0}, - {AMOVH, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0}, - {AMOVH, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0}, - {AMOVHS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0}, - {AMOVHS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0}, - {AMOVHU, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0}, - {AMOVHU, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0}, - {AMOVH, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0}, - {AMOVH, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0}, - {AMOVH, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4}, - {AMOVHS, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0}, - {AMOVHS, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0}, - {AMOVHS, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4}, - {AMOVHU, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0}, - {AMOVHU, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0}, - {AMOVHU, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4}, - {AMOVB, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0}, - {AMOVB, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0}, - {AMOVB, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4}, - {AMOVBS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0}, - {AMOVBS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0}, - {AMOVBS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4}, - {AMOVH, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0}, - {AMOVH, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0}, - {AMOVH, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4}, - {AMOVHS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0}, - {AMOVHS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0}, - {AMOVHS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4}, - {AMOVHU, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0}, - {AMOVHU, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0}, - {AMOVHU, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4}, - {ALDREX, C_SOREG, C_NONE, C_REG, 77, 4, 0, 0, 0}, - {ASTREX, C_SOREG, C_REG, C_REG, 78, 4, 0, 0, 0}, - {AMOVF, C_ZFCON, C_NONE, C_FREG, 80, 8, 0, 0, 0}, - {AMOVF, C_SFCON, C_NONE, C_FREG, 81, 4, 0, 0, 0}, - {ACMPF, C_FREG, C_FREG, C_NONE, 82, 8, 0, 0, 0}, - {ACMPF, C_FREG, C_NONE, C_NONE, 83, 8, 0, 0, 0}, - {AMOVFW, C_FREG, C_NONE, C_FREG, 84, 4, 0, 0, 0}, - {AMOVWF, C_FREG, C_NONE, C_FREG, 85, 4, 0, 0, 0}, - {AMOVFW, C_FREG, C_NONE, C_REG, 86, 8, 0, 0, 0}, - {AMOVWF, C_REG, C_NONE, C_FREG, 87, 8, 0, 0, 0}, - {AMOVW, C_REG, C_NONE, C_FREG, 88, 4, 0, 0, 0}, - {AMOVW, C_FREG, C_NONE, C_REG, 89, 4, 0, 0, 0}, - {ALDREXD, C_SOREG, C_NONE, C_REG, 91, 4, 0, 0, 0}, - {ASTREXD, C_SOREG, C_REG, C_REG, 92, 4, 0, 0, 0}, - {APLD, C_SOREG, C_NONE, C_NONE, 95, 4, 0, 0, 0}, - {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 96, 4, 0, 0, 0}, - {ACLZ, C_REG, C_NONE, C_REG, 97, 4, 0, 0, 0}, - {AMULWT, C_REG, C_REG, C_REG, 98, 4, 0, 0, 0}, - {AMULAWT, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0}, - {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0}, - {obj.AFUNCDATA, C_LCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0}, - {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0}, - {obj.ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as ABL - {obj.ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as ABL - - {ADATABUNDLE, C_NONE, C_NONE, C_NONE, 100, 4, 0, 0, 0}, - {ADATABUNDLEEND, C_NONE, C_NONE, C_NONE, 100, 0, 0, 0, 0}, - {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0}, + OPCODE, from, prog->reg, to, type, size, param, flag, extra data size, optional suffix */ + {obj.ATEXT, C_ADDR, C_NONE, C_TEXTSIZE, 0, 0, 0, 0, 0, 0}, + {AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AAND, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AAND, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AORR, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AORR, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {AMVN, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, C_SBIT}, + {ACMP, C_REG, C_REG, C_NONE, 1, 4, 0, 0, 0, 0}, + {AADD, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AADD, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AAND, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AAND, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AORR, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AORR, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, C_SBIT}, + {AMOVW, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, 0}, + {AMVN, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0, 0}, + {ACMP, C_RCON, C_REG, C_NONE, 2, 4, 0, 0, 0, 0}, + {AADD, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AADD, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AAND, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AAND, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AORR, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AORR, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {AMVN, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0, C_SBIT}, + {ACMP, C_SHIFT, C_REG, C_NONE, 3, 4, 0, 0, 0, 0}, + {AMOVW, C_RACON, C_NONE, C_REG, 4, 4, REGSP, 0, 0, C_SBIT}, + {AB, C_NONE, C_NONE, C_SBRA, 5, 4, 0, LPOOL, 0, 0}, + {ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, + {ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0, 0}, + {ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, + {ABEQ, C_RCON, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // prediction hinted form, hint ignored + {AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0, 0}, + {ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0, 0}, + {ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0, 0}, + {ABX, C_NONE, C_NONE, C_ROREG, 75, 12, 0, 0, 0, 0}, + {ABXRET, C_NONE, C_NONE, C_ROREG, 76, 4, 0, 0, 0, 0}, + {ASLL, C_RCON, C_REG, C_REG, 8, 4, 0, 0, 0, C_SBIT}, + {ASLL, C_RCON, C_NONE, C_REG, 8, 4, 0, 0, 0, C_SBIT}, + {ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0, C_SBIT}, + {ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0, C_SBIT}, + {ASWI, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0, 0}, + {ASWI, C_NONE, C_NONE, C_LCON, 10, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_LCON, 11, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_LCONADDR, 11, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_ADDR, 11, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_TLS_LE, 103, 4, 0, 0, 0, 0}, + {AWORD, C_NONE, C_NONE, C_TLS_IE, 104, 4, 0, 0, 0, 0}, + {AMOVW, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, + {AMOVW, C_SCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, + {AMOVW, C_LCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0, 0}, + {AMOVW, C_LCONADDR, C_NONE, C_REG, 12, 4, 0, LFROM | LPCREL, 4, 0}, + {AMVN, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0, 0}, + {AADD, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AADD, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AAND, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AAND, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AORR, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AORR, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {ACMP, C_NCON, C_REG, C_NONE, 13, 8, 0, 0, 0, 0}, + {AADD, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AADD, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AAND, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AAND, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AORR, C_SCON, C_REG, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AORR, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, C_SBIT}, + {AMVN, C_SCON, C_NONE, C_REG, 13, 8, 0, 0, 0, 0}, + {ACMP, C_SCON, C_REG, C_NONE, 13, 8, 0, 0, 0, 0}, + {AADD, C_RCON2A, C_REG, C_REG, 106, 8, 0, 0, 0, 0}, + {AADD, C_RCON2A, C_NONE, C_REG, 106, 8, 0, 0, 0, 0}, + {AORR, C_RCON2A, C_REG, C_REG, 106, 8, 0, 0, 0, 0}, + {AORR, C_RCON2A, C_NONE, C_REG, 106, 8, 0, 0, 0, 0}, + {AADD, C_RCON2S, C_REG, C_REG, 107, 8, 0, 0, 0, 0}, + {AADD, C_RCON2S, C_NONE, C_REG, 107, 8, 0, 0, 0, 0}, + {AADD, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AADD, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AAND, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AAND, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AORR, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AORR, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, C_SBIT}, + {AMVN, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0, 0}, + {ACMP, C_LCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0, 0}, + {AMOVB, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, 0}, + {AMOVBS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, + {AMOVBU, C_REG, C_NONE, C_REG, 58, 4, 0, 0, 0, 0}, + {AMOVH, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0, 0}, + {AMOVHS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, + {AMOVHU, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0, 0}, + {AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0, C_SBIT}, + {AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0, C_SBIT}, + {ADIV, C_REG, C_REG, C_REG, 16, 4, 0, 0, 0, 0}, + {ADIV, C_REG, C_NONE, C_REG, 16, 4, 0, 0, 0, 0}, + {ADIVHW, C_REG, C_REG, C_REG, 105, 4, 0, 0, 0, 0}, + {ADIVHW, C_REG, C_NONE, C_REG, 105, 4, 0, 0, 0, 0}, + {AMULL, C_REG, C_REG, C_REGREG, 17, 4, 0, 0, 0, C_SBIT}, + {ABFX, C_LCON, C_REG, C_REG, 18, 4, 0, 0, 0, 0}, // width in From, LSB in From3 + {ABFX, C_LCON, C_NONE, C_REG, 18, 4, 0, 0, 0, 0}, // width in From, LSB in From3 + {AMOVW, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AXTAB, C_SHIFT, C_REG, C_REG, 22, 4, 0, 0, 0, 0}, + {AXTAB, C_SHIFT, C_NONE, C_REG, 22, 4, 0, 0, 0, 0}, + {AMOVW, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, C_SBIT}, + {AMOVB, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVBS, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVBU, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVH, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVHS, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVHU, C_SHIFT, C_NONE, C_REG, 23, 4, 0, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_TLS_LE, C_NONE, C_REG, 101, 4, 0, LFROM, 0, 0}, + {AMOVW, C_TLS_IE, C_NONE, C_REG, 102, 8, 0, LFROM, 0, 0}, + {AMOVW, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_LACON, C_NONE, C_REG, 34, 8, REGSP, LFROM, 0, C_SBIT}, + {AMOVW, C_PSR, C_NONE, C_REG, 35, 4, 0, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_PSR, 36, 4, 0, 0, 0, 0}, + {AMOVW, C_RCON, C_NONE, C_PSR, 37, 4, 0, 0, 0, 0}, + {AMOVM, C_REGLIST, C_NONE, C_SOREG, 38, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVM, C_SOREG, C_NONE, C_REGLIST, 39, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {ASWPW, C_SOREG, C_REG, C_REG, 40, 4, 0, 0, 0, 0}, + {ARFE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0, 0}, + {AMOVF, C_FREG, C_NONE, C_FAUTO, 50, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FREG, C_NONE, C_FOREG, 50, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FAUTO, C_NONE, C_FREG, 51, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FOREG, C_NONE, C_FREG, 51, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FREG, C_NONE, C_LAUTO, 52, 12, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FREG, C_NONE, C_LOREG, 52, 12, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_LAUTO, C_NONE, C_FREG, 53, 12, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_LOREG, C_NONE, C_FREG, 53, 12, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_FREG, C_NONE, C_ADDR, 68, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVF, C_ADDR, C_NONE, C_FREG, 69, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AADDF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0, 0}, + {AADDF, C_FREG, C_FREG, C_FREG, 54, 4, 0, 0, 0, 0}, + {AMOVF, C_FREG, C_NONE, C_FREG, 55, 4, 0, 0, 0, 0}, + {ANEGF, C_FREG, C_NONE, C_FREG, 55, 4, 0, 0, 0, 0}, + {AMOVW, C_REG, C_NONE, C_FCR, 56, 4, 0, 0, 0, 0}, + {AMOVW, C_FCR, C_NONE, C_REG, 57, 4, 0, 0, 0, 0}, + {AMOVW, C_SHIFTADDR, C_NONE, C_REG, 59, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_SHIFTADDR, C_NONE, C_REG, 59, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_SHIFTADDR, C_NONE, C_REG, 60, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVW, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBU, C_REG, C_NONE, C_SHIFTADDR, 61, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_SHIFTADDR, 62, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVB, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVBS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVH, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0, C_PBIT | C_WBIT | C_UBIT}, + {AMOVHU, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4, C_PBIT | C_WBIT | C_UBIT}, + {ALDREX, C_SOREG, C_NONE, C_REG, 77, 4, 0, 0, 0, 0}, + {ASTREX, C_SOREG, C_REG, C_REG, 78, 4, 0, 0, 0, 0}, + {AMOVF, C_ZFCON, C_NONE, C_FREG, 80, 8, 0, 0, 0, 0}, + {AMOVF, C_SFCON, C_NONE, C_FREG, 81, 4, 0, 0, 0, 0}, + {ACMPF, C_FREG, C_FREG, C_NONE, 82, 8, 0, 0, 0, 0}, + {ACMPF, C_FREG, C_NONE, C_NONE, 83, 8, 0, 0, 0, 0}, + {AMOVFW, C_FREG, C_NONE, C_FREG, 84, 4, 0, 0, 0, C_UBIT}, + {AMOVWF, C_FREG, C_NONE, C_FREG, 85, 4, 0, 0, 0, C_UBIT}, + {AMOVFW, C_FREG, C_NONE, C_REG, 86, 8, 0, 0, 0, C_UBIT}, + {AMOVWF, C_REG, C_NONE, C_FREG, 87, 8, 0, 0, 0, C_UBIT}, + {AMOVW, C_REG, C_NONE, C_FREG, 88, 4, 0, 0, 0, 0}, + {AMOVW, C_FREG, C_NONE, C_REG, 89, 4, 0, 0, 0, 0}, + {ALDREXD, C_SOREG, C_NONE, C_REG, 91, 4, 0, 0, 0, 0}, + {ASTREXD, C_SOREG, C_REG, C_REG, 92, 4, 0, 0, 0, 0}, + {APLD, C_SOREG, C_NONE, C_NONE, 95, 4, 0, 0, 0, 0}, + {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 96, 4, 0, 0, 0, 0}, + {ACLZ, C_REG, C_NONE, C_REG, 97, 4, 0, 0, 0, 0}, + {AMULWT, C_REG, C_REG, C_REG, 98, 4, 0, 0, 0, 0}, + {AMULA, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0, C_SBIT}, + {AMULAWT, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0, 0}, + {obj.APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0, 0}, + {obj.AFUNCDATA, C_LCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0, 0}, + {obj.ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0, 0}, + {obj.ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // same as ABL + {obj.ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0, 0}, // same as ABL + {ADATABUNDLE, C_NONE, C_NONE, C_NONE, 100, 4, 0, 0, 0, 0}, + {ADATABUNDLEEND, C_NONE, C_NONE, C_NONE, 100, 0, 0, 0, 0, 0}, + {obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0, 0}, } var oprange [ALAST & obj.AMask][]Optab @@ -601,61 +618,18 @@ func (c *ctxt5) asmoutnacl(origPC int32, p *obj.Prog, o *Optab, out []uint32) in return size } -const ( - T_SBIT = 1 << 0 - T_PBIT = 1 << 1 - T_WBIT = 1 << 2 -) - -var mayHaveSuffix = map[obj.As]uint8{ - // bit logic - AAND: T_SBIT, - AEOR: T_SBIT, - AORR: T_SBIT, - ABIC: T_SBIT, - // arithmatic - ASUB: T_SBIT, - AADD: T_SBIT, - ASBC: T_SBIT, - AADC: T_SBIT, - ARSB: T_SBIT, - ARSC: T_SBIT, - // mov - AMVN: T_SBIT, - AMOVW: T_SBIT | T_PBIT | T_WBIT, - AMOVB: T_SBIT | T_PBIT | T_WBIT, - AMOVBS: T_SBIT | T_PBIT | T_WBIT, - AMOVBU: T_SBIT | T_PBIT | T_WBIT, - AMOVH: T_SBIT | T_PBIT | T_WBIT, - AMOVHS: T_SBIT | T_PBIT | T_WBIT, - AMOVHU: T_SBIT | T_PBIT | T_WBIT, - AMOVM: T_PBIT | T_WBIT, - // shift - ASRL: T_SBIT, - ASRA: T_SBIT, - ASLL: T_SBIT, - // mul - AMUL: T_SBIT, - AMULU: T_SBIT, - AMULL: T_SBIT, - AMULLU: T_SBIT, - // mula - AMULA: T_SBIT, - AMULAL: T_SBIT, - AMULALU: T_SBIT, - // MRC/MCR - AMRC: T_SBIT, -} - -func checkBits(ctxt *obj.Link, p *obj.Prog) { - if p.Scond&C_SBIT != 0 && mayHaveSuffix[p.As]&T_SBIT == 0 { - ctxt.Diag("invalid .S suffix: %v", p) +func checkSuffix(c *ctxt5, p *obj.Prog, o *Optab) { + if p.Scond&C_SBIT != 0 && o.scond&C_SBIT == 0 { + c.ctxt.Diag("invalid .S suffix: %v", p) } - if p.Scond&C_PBIT != 0 && mayHaveSuffix[p.As]&T_PBIT == 0 { - ctxt.Diag("invalid .P suffix: %v", p) + if p.Scond&C_PBIT != 0 && o.scond&C_PBIT == 0 { + c.ctxt.Diag("invalid .P suffix: %v", p) } - if p.Scond&C_WBIT != 0 && mayHaveSuffix[p.As]&T_WBIT == 0 { - ctxt.Diag("invalid .W suffix: %v", p) + if p.Scond&C_WBIT != 0 && o.scond&C_WBIT == 0 { + c.ctxt.Diag("invalid .W suffix: %v", p) + } + if p.Scond&C_UBIT != 0 && o.scond&C_UBIT == 0 { + c.ctxt.Diag("invalid .U suffix: %v", p) } } @@ -738,8 +712,6 @@ func span5(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { c.flushpool(p, 0, 0) } - checkBits(ctxt, p) - pc += int32(m) } @@ -1143,7 +1115,13 @@ func (c *ctxt5) aclass(a *obj.Addr) int { return C_REGLIST case obj.TYPE_SHIFT: - return C_SHIFT + if a.Reg == 0 { + // register shift R>>i + return C_SHIFT + } else { + // memory address with shifted offset R>>i(R) + return C_SHIFTADDR + } case obj.TYPE_MEM: switch a.Name { @@ -1365,19 +1343,14 @@ func (c *ctxt5) oplook(p *obj.Prog) *Optab { // check illegal base register switch a1 { - case C_SHIFT: - if p.From.Reg == 0 { // no base register - break - } - fallthrough - case C_SOREG, C_LOREG, C_HOREG, C_FOREG, C_ROREG, C_HFOREG, C_SROREG: + case C_SOREG, C_LOREG, C_HOREG, C_FOREG, C_ROREG, C_HFOREG, C_SROREG, C_SHIFTADDR: if p.From.Reg < REG_R0 || REG_R15 < p.From.Reg { c.ctxt.Diag("illegal base register: %v", p) } default: } switch a3 { - case C_SOREG, C_LOREG, C_HOREG, C_FOREG, C_ROREG, C_HFOREG, C_SROREG, C_SHIFT: + case C_SOREG, C_LOREG, C_HOREG, C_FOREG, C_ROREG, C_HFOREG, C_SROREG, C_SHIFTADDR: if p.To.Reg < REG_R0 || REG_R15 < p.To.Reg { c.ctxt.Diag("illegal base register: %v", p) } @@ -1405,6 +1378,7 @@ func (c *ctxt5) oplook(p *obj.Prog) *Optab { op := &ops[i] if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] { p.Optab = uint16(cap(optab) - cap(ops) + i + 1) + checkSuffix(c, p, op) return op } } @@ -1559,6 +1533,7 @@ func buildop(ctxt *obj.Link) { switch r { default: ctxt.Diag("unknown op in build: %v", r) + ctxt.DiagFlush() log.Fatalf("bad code") case AADD: @@ -1643,6 +1618,24 @@ func buildop(ctxt *obj.Link) { opset(ASUBD, r0) opset(AMULF, r0) opset(AMULD, r0) + opset(ANMULF, r0) + opset(ANMULD, r0) + opset(AMULAF, r0) + opset(AMULAD, r0) + opset(AMULSF, r0) + opset(AMULSD, r0) + opset(ANMULAF, r0) + opset(ANMULAD, r0) + opset(ANMULSF, r0) + opset(ANMULSD, r0) + opset(AFMULAF, r0) + opset(AFMULAD, r0) + opset(AFMULSF, r0) + opset(AFMULSD, r0) + opset(AFNMULAF, r0) + opset(AFNMULAD, r0) + opset(AFNMULSF, r0) + opset(AFNMULSD, r0) opset(ADIVF, r0) opset(ADIVD, r0) @@ -1680,23 +1673,33 @@ func buildop(ctxt *obj.Link) { case AMULAWT: opset(AMULAWB, r0) opset(AMULABB, r0) - opset(AMULA, r0) opset(AMULS, r0) opset(AMMULA, r0) opset(AMMULS, r0) + case ABFX: + opset(ABFXU, r0) + opset(ABFC, r0) + opset(ABFI, r0) + case ACLZ: opset(AREV, r0) opset(AREV16, r0) opset(AREVSH, r0) opset(ARBIT, r0) + case AXTAB: + opset(AXTAH, r0) + opset(AXTABU, r0) + opset(AXTAHU, r0) + case ALDREX, ASTREX, ALDREXD, ASTREXD, APLD, AAND, + AMULA, obj.AUNDEF, obj.AFUNCDATA, obj.APCDATA, @@ -1944,6 +1947,8 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { case 12: /* movw $lcon, reg */ if o.a1 == C_SCON { o1 = c.omvs(p, &p.From, int(p.To.Reg)) + } else if p.As == AMVN { + o1 = c.omvr(p, &p.From, int(p.To.Reg)) } else { o1 = c.omvl(p, &p.From, int(p.To.Reg)) } @@ -2030,6 +2035,33 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { r := int(p.Reg) o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16 | (uint32(rt2)&15)<<12 + case 18: /* BFX/BFXU/BFC/BFI */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + rt := int(p.To.Reg) + r := int(p.Reg) + if r == 0 { + r = rt + } else if p.As == ABFC { // only "BFC $width, $lsb, Reg" is accepted, p.Reg must be 0 + c.ctxt.Diag("illegal combination: %v", p) + } + if p.GetFrom3() == nil || p.GetFrom3().Type != obj.TYPE_CONST { + c.ctxt.Diag("%v: missing or wrong LSB", p) + break + } + lsb := p.GetFrom3().Offset + width := p.From.Offset + if lsb < 0 || lsb > 31 || width <= 0 || (lsb+width) > 32 { + c.ctxt.Diag("%v: wrong width or LSB", p) + } + switch p.As { + case ABFX, ABFXU: // (width-1) is encoded + o1 |= (uint32(r)&15)<<0 | (uint32(rt)&15)<<12 | uint32(lsb)<<7 | uint32(width-1)<<16 + case ABFC, ABFI: // MSB is encoded + o1 |= (uint32(r)&15)<<0 | (uint32(rt)&15)<<12 | uint32(lsb)<<7 | uint32(lsb+width-1)<<16 + default: + c.ctxt.Diag("illegal combination: %v", p) + } + case 20: /* mov/movb/movbu R,O(R) */ c.aclass(&p.To) @@ -2051,6 +2083,32 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= 1 << 22 } + case 22: /* XTAB R@>i, [R], R */ + o1 = c.oprrr(p, p.As, int(p.Scond)) + switch p.From.Offset &^ 0xf { + // only 0/8/16/24 bits rotation is accepted + case SHIFT_RR, SHIFT_RR | 8<<7, SHIFT_RR | 16<<7, SHIFT_RR | 24<<7: + o1 |= uint32(p.From.Offset) & 0xc0f + default: + c.ctxt.Diag("illegal shift: %v", p) + } + rt := p.To.Reg + r := p.Reg + if r == 0 { + r = rt + } + o1 |= (uint32(rt)&15)<<12 | (uint32(r)&15)<<16 + + case 23: /* MOVW/MOVB/MOVH R@>i, R */ + switch p.As { + case AMOVW: + o1 = c.mov(p) + case AMOVBU, AMOVBS, AMOVB, AMOVHU, AMOVHS, AMOVH: + o1 = c.movxt(p) + default: + c.ctxt.Diag("illegal combination: %v", p) + } + case 30: /* mov/movb/movbu R,L(R) */ o1 = c.omvl(p, &p.To, REGTMP) @@ -2228,7 +2286,13 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { rt := int(p.To.Reg) r := int(p.Reg) if r == 0 { - r = rt + switch p.As { + case AMULAD, AMULAF, AMULSF, AMULSD, ANMULAF, ANMULAD, ANMULSF, ANMULSD, + AFMULAD, AFMULAF, AFMULSF, AFMULSD, AFNMULAF, AFNMULAD, AFNMULSF, AFNMULSD: + c.ctxt.Diag("illegal combination: %v", p) + default: + r = rt + } } o1 |= (uint32(rf)&15)<<0 | (uint32(r)&15)<<16 | (uint32(rt)&15)<<12 @@ -2267,15 +2331,12 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { case 59: /* movw/bu R< ldr indexed */ if p.From.Reg == 0 { - if p.As != AMOVW { - c.ctxt.Diag("byte MOV from shifter operand") - } - o1 = c.mov(p) + c.ctxt.Diag("source operand is not a memory address: %v", p) break } - if p.From.Offset&(1<<4) != 0 { c.ctxt.Diag("bad shift in LDR") + break } o1 = c.olrr(int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond)) if p.As == AMOVBU { @@ -2284,16 +2345,21 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { case 60: /* movb R(R),R -> ldrsb indexed */ if p.From.Reg == 0 { - c.ctxt.Diag("byte MOV from shifter operand") - o1 = c.mov(p) + c.ctxt.Diag("source operand is not a memory address: %v", p) break } - if p.From.Offset&(^0xf) != 0 { c.ctxt.Diag("bad shift: %v", p) + break } o1 = c.olhrr(int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond)) - o1 ^= 1<<5 | 1<<6 + switch p.As { + case AMOVB, AMOVBS: + o1 ^= 1<<5 | 1<<6 + case AMOVH, AMOVHS: + o1 ^= 1 << 6 + default: + } if p.Scond&C_UBIT != 0 { o1 &^= 1 << 23 } @@ -2307,6 +2373,19 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 |= 1 << 22 } + case 62: /* MOVH/MOVHS/MOVHU Reg, Reg<<0(Reg) -> strh */ + if p.To.Reg == 0 { + c.ctxt.Diag("MOV to shifter operand") + } + if p.To.Offset&(^0xf) != 0 { + c.ctxt.Diag("bad shift: %v", p) + } + o1 = c.olhrr(int(p.To.Offset), int(p.To.Reg), int(p.From.Reg), int(p.Scond)) + o1 ^= 1 << 20 + if p.Scond&C_UBIT != 0 { + o1 &^= 1 << 23 + } + /* reloc ops */ case 64: /* mov/movb/movbu R,addr */ o1 = c.omvl(p, &p.To, REGTMP) @@ -2336,15 +2415,9 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { } case 101: /* movw tlsvar,R, local exec*/ - if p.Scond&C_SCOND != C_SCOND_NONE { - c.ctxt.Diag("conditional tls") - } o1 = c.omvl(p, &p.From, int(p.To.Reg)) case 102: /* movw tlsvar,R, initial exec*/ - if p.Scond&C_SCOND != C_SCOND_NONE { - c.ctxt.Diag("conditional tls") - } o1 = c.omvl(p, &p.From, int(p.To.Reg)) o2 = c.olrr(int(p.To.Reg)&15, (REGPC & 15), int(p.To.Reg), int(p.Scond)) @@ -2499,6 +2572,9 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { if c.instoffset != 0 { c.ctxt.Diag("offset must be zero in STREX") } + if p.To.Reg == p.From.Reg || p.To.Reg == p.Reg { + c.ctxt.Diag("cannot use same register as both source and destination: %v", p) + } o1 = 0x18<<20 | 0xf90 o1 |= (uint32(p.From.Reg) & 15) << 16 o1 |= (uint32(p.Reg) & 15) << 0 @@ -2613,6 +2689,12 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { if c.instoffset != 0 { c.ctxt.Diag("offset must be zero in STREX") } + if p.Reg&1 != 0 { + c.ctxt.Diag("source register must be even in STREXD: %v", p) + } + if p.To.Reg == p.From.Reg || p.To.Reg == p.Reg || p.To.Reg == p.Reg+1 { + c.ctxt.Diag("cannot use same register as both source and destination: %v", p) + } o1 = 0x1a<<20 | 0xf90 o1 |= (uint32(p.From.Reg) & 15) << 16 o1 |= (uint32(p.Reg) & 15) << 0 @@ -2715,6 +2797,31 @@ func (c *ctxt5) asmout(p *obj.Prog, o *Optab, out []uint32) { return } +func (c *ctxt5) movxt(p *obj.Prog) uint32 { + o1 := ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28 + switch p.As { + case AMOVB, AMOVBS: + o1 |= 0x6af<<16 | 0x7<<4 + case AMOVH, AMOVHS: + o1 |= 0x6bf<<16 | 0x7<<4 + case AMOVBU: + o1 |= 0x6ef<<16 | 0x7<<4 + case AMOVHU: + o1 |= 0x6ff<<16 | 0x7<<4 + default: + c.ctxt.Diag("illegal combination: %v", p) + } + switch p.From.Offset &^ 0xf { + // only 0/8/16/24 bits rotation is accepted + case SHIFT_RR, SHIFT_RR | 8<<7, SHIFT_RR | 16<<7, SHIFT_RR | 24<<7: + o1 |= uint32(p.From.Offset) & 0xc0f + default: + c.ctxt.Diag("illegal shift: %v", p) + } + o1 |= (uint32(p.To.Reg) & 15) << 12 + return o1 +} + func (c *ctxt5) mov(p *obj.Prog) uint32 { c.aclass(&p.From) o1 := c.oprrr(p, p.As, int(p.Scond)) @@ -2820,6 +2927,42 @@ func (c *ctxt5) oprrr(p *obj.Prog, a obj.As, sc int) uint32 { return o | 0xe<<24 | 0x2<<20 | 0xb<<8 | 0<<4 case AMULF: return o | 0xe<<24 | 0x2<<20 | 0xa<<8 | 0<<4 + case ANMULD: + return o | 0xe<<24 | 0x2<<20 | 0xb<<8 | 0x4<<4 + case ANMULF: + return o | 0xe<<24 | 0x2<<20 | 0xa<<8 | 0x4<<4 + case AMULAD: + return o | 0xe<<24 | 0xb<<8 + case AMULAF: + return o | 0xe<<24 | 0xa<<8 + case AMULSD: + return o | 0xe<<24 | 0xb<<8 | 0x4<<4 + case AMULSF: + return o | 0xe<<24 | 0xa<<8 | 0x4<<4 + case ANMULAD: + return o | 0xe<<24 | 0x1<<20 | 0xb<<8 + case ANMULAF: + return o | 0xe<<24 | 0x1<<20 | 0xa<<8 + case ANMULSD: + return o | 0xe<<24 | 0x1<<20 | 0xb<<8 | 0x4<<4 + case ANMULSF: + return o | 0xe<<24 | 0x1<<20 | 0xa<<8 | 0x4<<4 + case AFMULAD: + return o | 0xe<<24 | 0xa<<20 | 0xb<<8 + case AFMULAF: + return o | 0xe<<24 | 0xa<<20 | 0xa<<8 + case AFMULSD: + return o | 0xe<<24 | 0xa<<20 | 0xb<<8 | 0x4<<4 + case AFMULSF: + return o | 0xe<<24 | 0xa<<20 | 0xa<<8 | 0x4<<4 + case AFNMULAD: + return o | 0xe<<24 | 0x9<<20 | 0xb<<8 | 0x4<<4 + case AFNMULAF: + return o | 0xe<<24 | 0x9<<20 | 0xa<<8 | 0x4<<4 + case AFNMULSD: + return o | 0xe<<24 | 0x9<<20 | 0xb<<8 + case AFNMULSF: + return o | 0xe<<24 | 0x9<<20 | 0xa<<8 case ADIVD: return o | 0xe<<24 | 0x8<<20 | 0xb<<8 | 0<<4 case ADIVF: @@ -2884,6 +3027,30 @@ func (c *ctxt5) oprrr(p *obj.Prog, a obj.As, sc int) uint32 { case -ACMP: // cmp imm return o | 0x3<<24 | 0x5<<20 + case ABFX: + return o | 0x3d<<21 | 0x5<<4 + + case ABFXU: + return o | 0x3f<<21 | 0x5<<4 + + case ABFC: + return o | 0x3e<<21 | 0x1f + + case ABFI: + return o | 0x3e<<21 | 0x1<<4 + + case AXTAB: + return o | 0x6a<<20 | 0x7<<4 + + case AXTAH: + return o | 0x6b<<20 | 0x7<<4 + + case AXTABU: + return o | 0x6e<<20 | 0x7<<4 + + case AXTAHU: + return o | 0x6f<<20 | 0x7<<4 + // CLZ doesn't support .nil case ACLZ: return o&(0xf<<28) | 0x16f<<16 | 0xf1<<4 @@ -2977,9 +3144,6 @@ func (c *ctxt5) opbra(p *obj.Prog, a obj.As, sc int) uint32 { } func (c *ctxt5) olr(v int32, b int, r int, sc int) uint32 { - if sc&C_SBIT != 0 { - c.ctxt.Diag(".nil on LDR/STR instruction") - } o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28 if sc&C_PBIT == 0 { o |= 1 << 24 @@ -3009,9 +3173,6 @@ func (c *ctxt5) olr(v int32, b int, r int, sc int) uint32 { } func (c *ctxt5) olhr(v int32, b int, r int, sc int) uint32 { - if sc&C_SBIT != 0 { - c.ctxt.Diag(".nil on LDRH/STRH instruction") - } o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28 if sc&C_PBIT == 0 { o |= 1 << 24 @@ -3113,6 +3274,19 @@ func (c *ctxt5) omvs(p *obj.Prog, a *obj.Addr, dr int) uint32 { return o1 } +// MVN $C_NCON, Reg -> MOVW $C_RCON, Reg +func (c *ctxt5) omvr(p *obj.Prog, a *obj.Addr, dr int) uint32 { + o1 := c.oprrr(p, AMOVW, int(p.Scond)) + o1 |= (uint32(dr) & 15) << 12 + v := immrot(^uint32(a.Offset)) + if v == 0 { + c.ctxt.Diag("%v: missing literal", p) + return 0 + } + o1 |= uint32(v) + return o1 +} + func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { var o1 uint32 if p.Pcond == nil { @@ -3136,7 +3310,7 @@ func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { func (c *ctxt5) chipzero5(e float64) int { // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions. - if objabi.GOARM < 7 || e != 0 { + if objabi.GOARM < 7 || math.Float64bits(e) != 0 { return -1 } return 0 @@ -3181,8 +3355,8 @@ func (c *ctxt5) chipfloat5(e float64) int { func nocache(p *obj.Prog) { p.Optab = 0 p.From.Class = 0 - if p.From3 != nil { - p.From3.Class = 0 + if p.GetFrom3() != nil { + p.GetFrom3().Class = 0 } p.To.Class = 0 } diff --git a/src/cmd/internal/obj/arm/list5.go b/src/cmd/internal/obj/arm/list5.go index 169a7f5ad9d..6522f9aff86 100644 --- a/src/cmd/internal/obj/arm/list5.go +++ b/src/cmd/internal/obj/arm/list5.go @@ -38,6 +38,7 @@ import ( func init() { obj.RegisterRegister(obj.RBaseARM, MAXREG, rconv) obj.RegisterOpcode(obj.ABaseARM, Anames) + obj.RegisterRegisterList(obj.RegListARMLo, obj.RegListARMHi, rlconv) } func rconv(r int) string { @@ -81,3 +82,25 @@ func DRconv(a int) string { fp += s return fp } + +func rlconv(list int64) string { + str := "" + for i := 0; i < 16; i++ { + if list&(1< +const ( + REG_ARNG = obj.RBaseARM64 + 1<<10 + iota<<9 // Vn. + REG_ELEM // Vn.[index] + REG_ELEM_END +) + // Not registers, but flags that can be combined with regular register // constants to indicate extended register conversion. When checking, // you should subtract obj.RBaseARM64 first. From this difference, bit 11 @@ -264,9 +272,12 @@ const ( C_VREG // V0..V31 C_PAIR // (Rn, Rm) C_SHIFT // Rn<<2 - C_EXTREG // Rn.UXTB<<3 + C_EXTREG // Rn.UXTB[<<3] C_SPR // REG_NZCV C_COND // EQ, NE, etc + C_ARNG // Vn. + C_ELEM // Vn.[index] + C_LIST // [V1, V2, V3] C_ZCON // $0 or ZR C_ABCON0 // could be C_ADDCON0 or C_BITCON @@ -291,6 +302,7 @@ const ( C_NPAUTO // -512 <= x < 0, 0 mod 8 C_NSAUTO // -256 <= x < 0 + C_PSAUTO_8 // 0 to 255, 0 mod 8 C_PSAUTO // 0 to 255 C_PPAUTO // 0 to 504, 0 mod 8 C_UAUTO4K_8 // 0 to 4095, 0 mod 8 @@ -315,6 +327,7 @@ const ( C_ZOREG // 0(R) C_NPOREG // must mirror NPAUTO, etc C_NSOREG + C_PSOREG_8 C_PSOREG C_PPOREG C_UOREG4K_8 @@ -718,6 +731,22 @@ const ( ASHA256H2 ASHA256SU0 ASHA256SU1 + AVADD + AVADDP + AVAND + AVCMEQ + AVEOR + AVMOV + AVLD1 + AVORR + AVREV32 + AVST1 + AVDUP + AVMOVS + AVADDV + AVMOVI + AVUADDLV + AVSUB ALAST AB = obj.AJMP ABL = obj.ACALL @@ -729,3 +758,20 @@ const ( SHIFT_LR = 1 << 22 SHIFT_AR = 2 << 22 ) + +// Arrangement for ARM64 SIMD instructions +const ( + // arrangement types + ARNG_8B = iota + ARNG_16B + ARNG_1D + ARNG_4H + ARNG_8H + ARNG_2S + ARNG_4S + ARNG_2D + ARNG_B + ARNG_H + ARNG_S + ARNG_D +) diff --git a/src/cmd/internal/obj/arm64/anames.go b/src/cmd/internal/obj/arm64/anames.go index 4ee4043af75..4070a436412 100644 --- a/src/cmd/internal/obj/arm64/anames.go +++ b/src/cmd/internal/obj/arm64/anames.go @@ -366,5 +366,21 @@ var Anames = []string{ "SHA256H2", "SHA256SU0", "SHA256SU1", + "VADD", + "VADDP", + "VAND", + "VCMEQ", + "VEOR", + "VMOV", + "VLD1", + "VORR", + "VREV32", + "VST1", + "VDUP", + "VMOVS", + "VADDV", + "VMOVI", + "VUADDLV", + "VSUB", "LAST", } diff --git a/src/cmd/internal/obj/arm64/anames7.go b/src/cmd/internal/obj/arm64/anames7.go index 24911f657d5..cb4b13934d3 100644 --- a/src/cmd/internal/obj/arm64/anames7.go +++ b/src/cmd/internal/obj/arm64/anames7.go @@ -16,6 +16,9 @@ var cnames7 = []string{ "EXTREG", "SPR", "COND", + "ARNG", + "ELEM", + "LIST", "ZCON", "ABCON0", "ADDCON0", @@ -35,6 +38,7 @@ var cnames7 = []string{ "LBRA", "NPAUTO", "NSAUTO", + "PSAUTO_8", "PSAUTO", "PPAUTO", "UAUTO4K_8", @@ -57,6 +61,7 @@ var cnames7 = []string{ "ZOREG", "NPOREG", "NSOREG", + "PSOREG_8", "PSOREG", "PPOREG", "UOREG4K_8", diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 4419909f691..824fece5505 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -271,7 +271,7 @@ var optab = []Optab{ {ALSL, C_VCON, C_NONE, C_REG, 8, 4, 0, 0, 0}, {ALSL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0}, {ALSL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0}, - {ASVC, C_NONE, C_NONE, C_VCON, 10, 4, 0, 0, 0}, + {ASVC, C_VCON, C_NONE, C_NONE, 10, 4, 0, 0, 0}, {ASVC, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0}, {ADWORD, C_NONE, C_NONE, C_VCON, 11, 8, 0, 0, 0}, {ADWORD, C_NONE, C_NONE, C_LEXT, 11, 8, 0, 0, 0}, @@ -320,6 +320,10 @@ var optab = []Optab{ {AMOVW, C_REG, C_NONE, C_ZOREG, 20, 4, 0, 0, 0}, {AMOVW, C_REG, C_NONE, C_UOREG16K, 20, 4, 0, 0, 0}, + {AVMOVS, C_VREG, C_NONE, C_UAUTO16K, 20, 4, REGSP, 0, 0}, + {AVMOVS, C_VREG, C_NONE, C_ZOREG, 20, 4, 0, 0, 0}, + {AVMOVS, C_VREG, C_NONE, C_UOREG16K, 20, 4, 0, 0, 0}, + /* unscaled 9-bit signed displacement store */ {AMOVB, C_REG, C_NONE, C_NSAUTO, 20, 4, REGSP, 0, 0}, {AMOVB, C_REG, C_NONE, C_NSOREG, 20, 4, 0, 0, 0}, @@ -368,6 +372,10 @@ var optab = []Optab{ {AMOVD, C_UOREG32K, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, {AMOVD, C_NSOREG, C_NONE, C_REG, 21, 4, REGSP, 0, 0}, + {AVMOVS, C_UAUTO16K, C_NONE, C_VREG, 21, 4, REGSP, 0, 0}, + {AVMOVS, C_ZOREG, C_NONE, C_VREG, 21, 4, 0, 0, 0}, + {AVMOVS, C_UOREG16K, C_NONE, C_VREG, 21, 4, 0, 0, 0}, + /* long displacement store */ {AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0}, {AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0}, @@ -403,6 +411,7 @@ var optab = []Optab{ {AMOVBU, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPOST}, {AFMOVS, C_LOREG, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST}, {AFMOVD, C_LOREG, C_NONE, C_FREG, 22, 4, 0, 0, C_XPOST}, + {AVMOVS, C_LOREG, C_NONE, C_VREG, 22, 4, 0, 0, C_XPOST}, {AMOVD, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, {AMOVW, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, {AMOVH, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, @@ -410,6 +419,7 @@ var optab = []Optab{ {AMOVBU, C_LOREG, C_NONE, C_REG, 22, 4, 0, 0, C_XPRE}, {AFMOVS, C_LOREG, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE}, {AFMOVD, C_LOREG, C_NONE, C_FREG, 22, 4, 0, 0, C_XPRE}, + {AVMOVS, C_LOREG, C_NONE, C_VREG, 22, 4, 0, 0, C_XPRE}, /* pre/post-indexed store (unscaled, signed 9-bit offset) */ {AMOVD, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, @@ -419,6 +429,7 @@ var optab = []Optab{ {AMOVBU, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, {AFMOVS, C_FREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, {AFMOVD, C_FREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, + {AVMOVS, C_VREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPOST}, {AMOVD, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, {AMOVW, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, {AMOVH, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, @@ -426,13 +437,61 @@ var optab = []Optab{ {AMOVBU, C_REG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, {AFMOVS, C_FREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, {AFMOVD, C_FREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, + {AVMOVS, C_VREG, C_NONE, C_LOREG, 23, 4, 0, 0, C_XPRE}, - /* pre/post-indexed load/store register pair - (unscaled, signed 10-bit quad-aligned offset) */ - {ALDP, C_LOREG, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, - {ALDP, C_LOREG, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, - {ASTP, C_PAIR, C_NONE, C_LOREG, 67, 4, 0, 0, C_XPRE}, - {ASTP, C_PAIR, C_NONE, C_LOREG, 67, 4, 0, 0, C_XPOST}, + /* pre/post-indexed/signed-offset load/store register pair + (unscaled, signed 10-bit quad-aligned and long offset) */ + {ALDP, C_NPAUTO, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, + {ALDP, C_NPAUTO, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, + {ALDP, C_NPAUTO, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, + {ALDP, C_PPAUTO, C_NONE, C_PAIR, 66, 4, REGSP, 0, 0}, + {ALDP, C_PPAUTO, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPRE}, + {ALDP, C_PPAUTO, C_NONE, C_PAIR, 66, 4, REGSP, 0, C_XPOST}, + {ALDP, C_UAUTO4K, C_NONE, C_PAIR, 74, 8, REGSP, 0, 0}, + {ALDP, C_UAUTO4K, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPRE}, + {ALDP, C_UAUTO4K, C_NONE, C_PAIR, 74, 8, REGSP, 0, C_XPOST}, + {ALDP, C_LAUTO, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, 0}, + {ALDP, C_LAUTO, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPRE}, + {ALDP, C_LAUTO, C_NONE, C_PAIR, 75, 12, REGSP, LFROM, C_XPOST}, + {ALDP, C_NPOREG, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, + {ALDP, C_NPOREG, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, + {ALDP, C_NPOREG, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, + {ALDP, C_PPOREG, C_NONE, C_PAIR, 66, 4, 0, 0, 0}, + {ALDP, C_PPOREG, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPRE}, + {ALDP, C_PPOREG, C_NONE, C_PAIR, 66, 4, 0, 0, C_XPOST}, + {ALDP, C_UOREG4K, C_NONE, C_PAIR, 74, 8, 0, 0, 0}, + {ALDP, C_UOREG4K, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPRE}, + {ALDP, C_UOREG4K, C_NONE, C_PAIR, 74, 8, 0, 0, C_XPOST}, + {ALDP, C_LOREG, C_NONE, C_PAIR, 75, 12, 0, LFROM, 0}, + {ALDP, C_LOREG, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPRE}, + {ALDP, C_LOREG, C_NONE, C_PAIR, 75, 12, 0, LFROM, C_XPOST}, + {ALDP, C_ADDR, C_NONE, C_PAIR, 88, 12, 0, 0, 0}, + + {ASTP, C_PAIR, C_NONE, C_NPAUTO, 67, 4, REGSP, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NPAUTO, 67, 4, REGSP, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NPAUTO, 67, 4, REGSP, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_PPAUTO, 67, 4, REGSP, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_UAUTO4K, 76, 8, REGSP, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, 0}, + {ASTP, C_PAIR, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_LAUTO, 77, 12, REGSP, LTO, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_NPOREG, 67, 4, 0, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_NPOREG, 67, 4, 0, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_NPOREG, 67, 4, 0, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_PPOREG, 67, 4, 0, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_PPOREG, 67, 4, 0, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_PPOREG, 67, 4, 0, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_UOREG4K, 76, 8, 0, 0, 0}, + {ASTP, C_PAIR, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_UOREG4K, 76, 8, 0, 0, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_LOREG, 77, 12, 0, LTO, 0}, + {ASTP, C_PAIR, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPRE}, + {ASTP, C_PAIR, C_NONE, C_LOREG, 77, 12, 0, LTO, C_XPOST}, + {ASTP, C_PAIR, C_NONE, C_ADDR, 87, 12, 0, 0, 0}, /* special */ {AMOVD, C_SPR, C_NONE, C_REG, 35, 4, 0, 0, 0}, @@ -491,7 +550,7 @@ var optab = []Optab{ {AFCMPS, C_FREG, C_FREG, C_NONE, 56, 4, 0, 0, 0}, {AFCMPS, C_FCON, C_FREG, C_NONE, 56, 4, 0, 0, 0}, {AFCCMPS, C_COND, C_FREG, C_VCON, 57, 4, 0, 0, 0}, - {AFCSELD, C_COND, C_REG, C_FREG, 18, 4, 0, 0, 0}, + {AFCSELD, C_COND, C_FREG, C_FREG, 18, 4, 0, 0, 0}, {AFCVTSD, C_FREG, C_NONE, C_FREG, 29, 4, 0, 0, 0}, {ACLREX, C_NONE, C_NONE, C_VCON, 38, 4, 0, 0, 0}, {ACLREX, C_NONE, C_NONE, C_NONE, 38, 4, 0, 0, 0}, @@ -512,8 +571,30 @@ var optab = []Optab{ // { ASTXP, C_REG, C_NONE, C_ZOREG, 59, 4, 0 , 0}, // TODO(aram): - {AAESD, C_VREG, C_NONE, C_VREG, 29, 4, 0, 0, 0}, + {AAESD, C_VREG, C_NONE, C_VREG, 29, 4, 0, 0, 0}, // for compatibility with old code + {AAESD, C_ARNG, C_NONE, C_ARNG, 29, 4, 0, 0, 0}, // recommend using the new one for better readability {ASHA1C, C_VREG, C_REG, C_VREG, 1, 4, 0, 0, 0}, + {ASHA1C, C_ARNG, C_VREG, C_VREG, 1, 4, 0, 0, 0}, + {ASHA1H, C_VREG, C_NONE, C_VREG, 29, 4, 0, 0, 0}, + {ASHA1SU0, C_ARNG, C_ARNG, C_ARNG, 1, 4, 0, 0, 0}, + {ASHA256H, C_ARNG, C_VREG, C_VREG, 1, 4, 0, 0, 0}, + {AVADDP, C_ARNG, C_ARNG, C_ARNG, 72, 4, 0, 0, 0}, + {AVADD, C_ARNG, C_ARNG, C_ARNG, 72, 4, 0, 0, 0}, + {AVADD, C_VREG, C_VREG, C_VREG, 89, 4, 0, 0, 0}, + {AVADD, C_VREG, C_NONE, C_VREG, 89, 4, 0, 0, 0}, + {AVLD1, C_ZOREG, C_NONE, C_LIST, 81, 4, 0, 0, 0}, + {AVLD1, C_LOREG, C_NONE, C_LIST, 81, 4, 0, 0, C_XPOST}, + {AVMOV, C_ELEM, C_NONE, C_REG, 73, 4, 0, 0, 0}, + {AVMOV, C_REG, C_NONE, C_ARNG, 82, 4, 0, 0, 0}, + {AVMOV, C_ARNG, C_NONE, C_ARNG, 83, 4, 0, 0, 0}, + {AVMOV, C_REG, C_NONE, C_ELEM, 78, 4, 0, 0, 0}, + {AVMOV, C_ELEM, C_NONE, C_VREG, 80, 4, 0, 0, 0}, + {AVREV32, C_ARNG, C_NONE, C_ARNG, 83, 4, 0, 0, 0}, + {AVST1, C_LIST, C_NONE, C_ZOREG, 84, 4, 0, 0, 0}, + {AVST1, C_LIST, C_NONE, C_LOREG, 84, 4, 0, 0, C_XPOST}, + {AVDUP, C_ELEM, C_NONE, C_ARNG, 79, 4, 0, 0, 0}, + {AVADDV, C_ARNG, C_NONE, C_VREG, 85, 4, 0, 0, 0}, + {AVMOVI, C_ADDCON, C_NONE, C_ARNG, 86, 4, 0, 0, 0}, {obj.AUNDEF, C_NONE, C_NONE, C_NONE, 90, 4, 0, 0, 0}, {obj.APCDATA, C_VCON, C_NONE, C_VCON, 0, 0, 0, 0, 0}, @@ -529,14 +610,22 @@ var optab = []Optab{ * valid pstate field values, and value to use in instruction */ var pstatefield = []struct { - a uint32 - b uint32 + reg int16 + enc uint32 }{ {REG_SPSel, 0<<16 | 4<<12 | 5<<5}, {REG_DAIFSet, 3<<16 | 4<<12 | 6<<5}, {REG_DAIFClr, 3<<16 | 4<<12 | 7<<5}, } +// the System register values, and value to use in instruction +var systemreg = []struct { + reg int16 + enc uint32 +}{ + {REG_ELR_EL1, 8<<16 | 4<<12 | 1<<5}, +} + func span7(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p := cursym.Func.Text if p == nil || p.Link == nil { // handle external functions and ELF section symbols @@ -761,6 +850,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { fallthrough case C_PSAUTO, + C_PSAUTO_8, C_PPAUTO, C_UAUTO4K_8, C_UAUTO4K_4, @@ -777,6 +867,7 @@ func (c *ctxt7) addpool(p *obj.Prog, a *obj.Addr) { C_LAUTO, C_PPOREG, C_PSOREG, + C_PSOREG_8, C_UOREG4K_8, C_UOREG4K_4, C_UOREG4K_2, @@ -997,9 +1088,12 @@ func autoclass(l int64) int { } if l <= 255 { + if (l & 7) == 0 { + return C_PSAUTO_8 + } return C_PSAUTO } - if l <= 504 && (l&7) == 0 { + if l <= 504 && l&7 == 0 { return C_PPAUTO } if l <= 4095 { @@ -1104,7 +1198,11 @@ func rclass(r int16) int { return C_COND case r == REGSP: return C_RSP - case r®_EXT != 0: + case r >= REG_ARNG && r < REG_ELEM: + return C_ARNG + case r >= REG_ELEM && r < REG_ELEM_END: + return C_ELEM + case r >= REG_UXTB && r < REG_SPECIAL: return C_EXTREG case r >= REG_SPECIAL: return C_SPR @@ -1126,6 +1224,9 @@ func (c *ctxt7) aclass(a *obj.Addr) int { case obj.TYPE_SHIFT: return C_SHIFT + case obj.TYPE_REGLIST: + return C_LIST + case obj.TYPE_MEM: switch a.Name { case obj.NAME_EXTERN, obj.NAME_STATIC: @@ -1396,32 +1497,37 @@ func cmp(a int, b int) bool { return true } + case C_PSAUTO: + if b == C_PSAUTO_8 { + return true + } + case C_PPAUTO: - if b == C_PSAUTO { + if b == C_PSAUTO_8 { return true } case C_UAUTO4K: switch b { - case C_PSAUTO, C_PPAUTO, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8: + case C_PSAUTO, C_PSAUTO_8, C_PPAUTO, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8: return true } case C_UAUTO8K: switch b { - case C_PSAUTO, C_PPAUTO, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO8K_4, C_UAUTO8K_8: + case C_PSAUTO, C_PSAUTO_8, C_PPAUTO, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO8K_4, C_UAUTO8K_8: return true } case C_UAUTO16K: switch b { - case C_PSAUTO, C_PPAUTO, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO8K_4, C_UAUTO8K_8, C_UAUTO16K_8: + case C_PSAUTO, C_PSAUTO_8, C_PPAUTO, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO8K_4, C_UAUTO8K_8, C_UAUTO16K_8: return true } case C_UAUTO32K: switch b { - case C_PSAUTO, C_PPAUTO, C_UAUTO4K_8, C_UAUTO8K_8, C_UAUTO16K_8: + case C_PSAUTO, C_PSAUTO_8, C_PPAUTO, C_UAUTO4K_8, C_UAUTO8K_8, C_UAUTO16K_8: return true } @@ -1430,7 +1536,7 @@ func cmp(a int, b int) bool { case C_LAUTO: switch b { - case C_PSAUTO, C_PPAUTO, + case C_PSAUTO, C_PSAUTO_8, C_PPAUTO, C_UAUTO4K, C_UAUTO4K_2, C_UAUTO4K_4, C_UAUTO4K_8, C_UAUTO8K, C_UAUTO8K_4, C_UAUTO8K_8, C_UAUTO16K, C_UAUTO16K_8, @@ -1440,36 +1546,37 @@ func cmp(a int, b int) bool { return cmp(C_NPAUTO, b) case C_PSOREG: - if b == C_ZOREG { + if b == C_ZOREG || b == C_PSOREG_8 { return true } case C_PPOREG: - if b == C_ZOREG || b == C_PSOREG { + switch b { + case C_ZOREG, C_PSOREG_8: return true } case C_UOREG4K: switch b { - case C_ZOREG, C_PSOREG, C_PPOREG, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8: + case C_ZOREG, C_PSOREG_8, C_PSOREG, C_PPOREG, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8: return true } case C_UOREG8K: switch b { - case C_ZOREG, C_PSOREG, C_PPOREG, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8, C_UOREG8K_4, C_UOREG8K_8: + case C_ZOREG, C_PSOREG_8, C_PSOREG, C_PPOREG, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8, C_UOREG8K_4, C_UOREG8K_8: return true } case C_UOREG16K: switch b { - case C_ZOREG, C_PSOREG, C_PPOREG, C_UOREG4K_4, C_UOREG4K_8, C_UOREG8K_4, C_UOREG8K_8, C_UOREG16K_8: + case C_ZOREG, C_PSOREG_8, C_PSOREG, C_PPOREG, C_UOREG4K_4, C_UOREG4K_8, C_UOREG8K_4, C_UOREG8K_8, C_UOREG16K_8: return true } case C_UOREG32K: switch b { - case C_ZOREG, C_PSOREG, C_PPOREG, C_UOREG4K_8, C_UOREG8K_8, C_UOREG16K_8: + case C_ZOREG, C_PSOREG_8, C_PSOREG, C_PPOREG, C_UOREG4K_8, C_UOREG8K_8, C_UOREG16K_8: return true } @@ -1478,7 +1585,7 @@ func cmp(a int, b int) bool { case C_LOREG: switch b { - case C_ZOREG, C_PSOREG, C_PPOREG, + case C_ZOREG, C_PSOREG_8, C_PSOREG, C_PPOREG, C_UOREG4K, C_UOREG4K_2, C_UOREG4K_4, C_UOREG4K_8, C_UOREG8K, C_UOREG8K_4, C_UOREG8K_8, C_UOREG16K, C_UOREG16K_8, @@ -1562,6 +1669,7 @@ func buildop(ctxt *obj.Link) { switch r { default: ctxt.Diag("unknown op in build: %v", r) + ctxt.DiagFlush() log.Fatalf("bad code") case AADD: @@ -1817,8 +1925,8 @@ func buildop(ctxt *obj.Link) { break case ASVC: - oprangeset(AHLT, t) oprangeset(AHVC, t) + oprangeset(AHLT, t) oprangeset(ASMC, t) oprangeset(ABRK, t) oprangeset(ADCPS1, t) @@ -1954,22 +2062,45 @@ func buildop(ctxt *obj.Link) { case ASTXP: oprangeset(ASTXPW, t) + case AVADDP: + oprangeset(AVAND, t) + oprangeset(AVCMEQ, t) + oprangeset(AVORR, t) + oprangeset(AVEOR, t) + + case AVADD: + oprangeset(AVSUB, t) + case AAESD: oprangeset(AAESE, t) oprangeset(AAESMC, t) oprangeset(AAESIMC, t) - oprangeset(ASHA1H, t) oprangeset(ASHA1SU1, t) oprangeset(ASHA256SU0, t) case ASHA1C: oprangeset(ASHA1P, t) oprangeset(ASHA1M, t) - oprangeset(ASHA1SU0, t) - oprangeset(ASHA256H, t) + + case ASHA256H: oprangeset(ASHA256H2, t) + + case ASHA1SU0: oprangeset(ASHA256SU1, t) + case AVADDV: + oprangeset(AVUADDLV, t) + + case ASHA1H, + AVMOV, + AVLD1, + AVREV32, + AVST1, + AVDUP, + AVMOVS, + AVMOVI: + break + case obj.ANOP, obj.AUNDEF, obj.AFUNCDATA, @@ -2185,8 +2316,8 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { case 10: /* brk/hvc/.../svc [$con] */ o1 = c.opimm(p, p.As) - if p.To.Type != obj.TYPE_NONE { - o1 |= uint32((p.To.Offset & 0xffff) << 5) + if p.From.Type != obj.TYPE_NONE { + o1 |= uint32((p.From.Offset & 0xffff) << 5) } case 11: /* dword */ @@ -2260,7 +2391,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { var r int var ra int if p.From3Type() == obj.TYPE_REG { - r = int(p.From3.Reg) + r = int(p.GetFrom3().Reg) ra = int(p.Reg) if ra == 0 { ra = REGZERO @@ -2322,7 +2453,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { cond ^= 1 } else { - rf = int(p.From3.Reg) /* CSEL */ + rf = int(p.GetFrom3().Reg) /* CSEL */ } } else { /* CSET */ @@ -2347,12 +2478,12 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { cond -= COND_EQ } var rf int - if p.From3.Type == obj.TYPE_REG { + if p.GetFrom3().Type == obj.TYPE_REG { o1 = c.oprrr(p, p.As) - rf = int(p.From3.Reg) /* Rm */ + rf = int(p.GetFrom3().Reg) /* Rm */ } else { o1 = c.opirr(p, p.As) - rf = int(p.From3.Offset & 0x1F) + rf = int(p.GetFrom3().Offset & 0x1F) } o1 |= (uint32(rf&31) << 16) | (uint32(cond&15) << 12) | (uint32(p.Reg&31) << 5) | uint32(nzcv) @@ -2455,8 +2586,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = c.opxrrr(p, p.As) if (p.From.Reg-obj.RBaseARM64)®_EXT != 0 { - c.ctxt.Diag("extended register not implemented\n%v", p) - // o1 |= uint32(p.From.Offset) /* includes reg, op, etc */ + o1 |= uint32(p.From.Offset) /* includes reg, op, etc */ } else { o1 |= uint32(p.From.Reg&31) << 16 } @@ -2597,22 +2727,19 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = c.opirr(p, p.As) d := p.From.Offset - if (d >> 16) != 0 { + s := movcon(d) + if s < 0 || s >= 4 { + c.ctxt.Diag("bad constant for MOVK: %#x\n%v", uint64(d), p) + } + if (o1&S64) == 0 && s >= 2 { + c.ctxt.Diag("illegal bit position\n%v", p) + } + if ((d >> uint(s*16)) >> 16) != 0 { c.ctxt.Diag("requires uimm16\n%v", p) } - s := 0 - if p.From3Type() != obj.TYPE_NONE { - if p.From3.Type != obj.TYPE_CONST { - c.ctxt.Diag("missing bit position\n%v", p) - } - s = int(p.From3.Offset / 16) - if (s*16&0xF) != 0 || s >= 4 || (o1&S64) == 0 && s >= 2 { - c.ctxt.Diag("illegal bit position\n%v", p) - } - } - rt := int(p.To.Reg) - o1 |= uint32(((d & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31)) + + o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31)) case 34: /* mov $lacon,R */ o1 = c.omovlit(AMOVD, p, &p.From, REGTMP) @@ -2633,21 +2760,41 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { case 35: /* mov SPR,R -> mrs */ o1 = c.oprrr(p, AMRS) - v := int32(p.From.Offset) + v := uint32(0) + for i := 0; i < len(systemreg); i++ { + if systemreg[i].reg == p.From.Reg { + v = systemreg[i].enc + break + } + } + if v == 0 { + c.ctxt.Diag("illegal system register:\n%v", p) + } if (o1 & uint32(v&^(3<<19))) != 0 { c.ctxt.Diag("MRS register value overlap\n%v", p) } - o1 |= uint32(v) + + o1 |= v o1 |= uint32(p.To.Reg & 31) case 36: /* mov R,SPR */ o1 = c.oprrr(p, AMSR) - v := int32(p.To.Offset) + v := uint32(0) + for i := 0; i < len(systemreg); i++ { + if systemreg[i].reg == p.To.Reg { + v = systemreg[i].enc + break + } + } + if v == 0 { + c.ctxt.Diag("illegal system register:\n%v", p) + } if (o1 & uint32(v&^(3<<19))) != 0 { c.ctxt.Diag("MSR register value overlap\n%v", p) } - o1 |= uint32(v) + + o1 |= v o1 |= uint32(p.From.Reg & 31) case 37: /* mov $con,PSTATEfield -> MSR [immediate] */ @@ -2656,10 +2803,10 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { } o1 = c.opirr(p, AMSR) o1 |= uint32((p.From.Offset & 0xF) << 8) /* Crm */ - v := int32(0) + v := uint32(0) for i := 0; i < len(pstatefield); i++ { - if int64(pstatefield[i].a) == p.To.Offset { - v = int32(pstatefield[i].b) + if pstatefield[i].reg == p.To.Reg { + v = pstatefield[i].enc break } } @@ -2667,7 +2814,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { if v == 0 { c.ctxt.Diag("illegal PSTATE field for immediate move\n%v", p) } - o1 |= uint32(v) + o1 |= v case 38: /* clrex [$imm] */ o1 = c.opimm(p, p.As) @@ -2699,12 +2846,12 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = c.op0(p, p.As) case 42: /* bfm R,r,s,R */ - o1 = c.opbfm(p, p.As, int(p.From.Offset), int(p.From3.Offset), int(p.Reg), int(p.To.Reg)) + o1 = c.opbfm(p, p.As, int(p.From.Offset), int(p.GetFrom3().Offset), int(p.Reg), int(p.To.Reg)) case 43: /* bfm aliases */ r := int(p.From.Offset) - s := int(p.From3.Offset) + s := int(p.GetFrom3().Offset) rf := int(p.Reg) rt := int(p.To.Reg) if rf == 0 { @@ -2753,7 +2900,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { } case 44: /* extr $b, Rn, Rm, Rd */ - o1 = c.opextr(p, p.As, int32(p.From.Offset), int(p.From3.Reg), int(p.Reg), int(p.To.Reg)) + o1 = c.opextr(p, p.As, int32(p.From.Offset), int(p.GetFrom3().Reg), int(p.Reg), int(p.To.Reg)) case 45: /* sxt/uxt[bhw] R,R; movT R,R -> sxtT R,R */ rf := int(p.From.Reg) @@ -2885,7 +3032,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = c.oprrr(p, p.As) var rf int - if p.From.Type == obj.TYPE_CONST { + if p.From.Type == obj.TYPE_FCONST { o1 |= 8 /* zero */ rf = 0 } else { @@ -2909,11 +3056,11 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { c.ctxt.Diag("implausible condition\n%v", p) } rf := int(p.Reg) - if p.From3 == nil || p.From3.Reg < REG_F0 || p.From3.Reg > REG_F31 { + if p.GetFrom3() == nil || p.GetFrom3().Reg < REG_F0 || p.GetFrom3().Reg > REG_F31 { c.ctxt.Diag("illegal FCCMP\n%v", p) break } - rt := int(p.From3.Reg) + rt := int(p.GetFrom3().Reg) o1 |= uint32(rf&31)<<16 | uint32(cond&15)<<12 | uint32(rt&31)<<5 | uint32(nzcv) case 58: /* ldar/ldxr/ldaxr */ @@ -3001,31 +3148,50 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { o3 = c.olsr12u(p, int32(c.opldr12(p, p.As)), 0, REGTMP, int(p.To.Reg)) case 66: /* ldp O(R)!, (r1, r2); ldp (R)O!, (r1, r2) */ - v := int32(p.From.Offset) + v := int32(c.regoff(&p.From)) + r := int(p.From.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid ldp source: %v\n", p) + } - if v < -512 || v > 504 { - c.ctxt.Diag("offset out of range\n%v", p) + if v < -512 || v > 504 || v%8 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) } if o.scond == C_XPOST { o1 |= 1 << 23 - } else { + } else if o.scond == C_XPRE { o1 |= 3 << 23 + } else { + o1 |= 2 << 23 } o1 |= 1 << 22 - o1 |= uint32(int64(2<<30|5<<27|((uint32(v)/8)&0x7f)<<15) | p.To.Offset<<10 | int64(uint32(p.From.Reg&31)<<5) | int64(p.To.Reg&31)) + o1 |= uint32(int64(2<<30|5<<27|((uint32(v)/8)&0x7f)<<15) | (p.To.Offset&31)<<10 | int64(uint32(r&31)<<5) | int64(p.To.Reg&31)) case 67: /* stp (r1, r2), O(R)!; stp (r1, r2), (R)O! */ - v := int32(p.To.Offset) - - if v < -512 || v > 504 { - c.ctxt.Diag("offset out of range\n%v", p) + r := int(p.To.Reg) + if r == obj.REG_NONE { + r = int(o.param) } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid stp destination: %v\n", p) + } + + v := int32(c.regoff(&p.To)) + if v < -512 || v > 504 || v%8 != 0 { + c.ctxt.Diag("invalid offset %v\n", p) + } + if o.scond == C_XPOST { o1 |= 1 << 23 - } else { + } else if o.scond == C_XPRE { o1 |= 3 << 23 + } else { + o1 |= 2 << 23 } - o1 |= uint32(int64(2<<30|5<<27|((uint32(v)/8)&0x7f)<<15) | p.From.Offset<<10 | int64(uint32(p.To.Reg&31)<<5) | int64(p.From.Reg&31)) + o1 |= uint32(int64(2<<30|5<<27|((uint32(v)/8)&0x7f)<<15) | (p.From.Offset&31)<<10 | int64(uint32(r&31)<<5) | int64(p.From.Reg&31)) case 68: /* movT $vconaddr(SB), reg -> adrp + add + reloc */ if p.As == AMOVW { @@ -3075,6 +3241,474 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Add = 0 rel.Type = objabi.R_ARM64_GOTPCREL + case 72: /* vaddp/vand/vcmeq/vorr/vadd/veor Vm., Vn., Vd. */ + af := int((p.From.Reg >> 5) & 15) + af3 := int((p.Reg >> 5) & 15) + at := int((p.To.Reg >> 5) & 15) + if af != af3 || af != at { + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + o1 = c.oprrr(p, p.As) + rf := int((p.From.Reg) & 31) + rt := int((p.To.Reg) & 31) + r := int((p.Reg) & 31) + + Q := 0 + size := 0 + switch af { + case ARNG_16B: + Q = 1 + size = 0 + case ARNG_2D: + Q = 1 + size = 3 + case ARNG_2S: + Q = 0 + size = 2 + case ARNG_4H: + Q = 0 + size = 1 + case ARNG_4S: + Q = 1 + size = 2 + case ARNG_8B: + Q = 0 + size = 0 + case ARNG_8H: + Q = 1 + size = 1 + default: + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + + if (p.As == AVORR || p.As == AVAND || p.As == AVEOR) && + (af != ARNG_16B && af != ARNG_8B) { + c.ctxt.Diag("invalid arrangement on op %v", p.As) + } else if p.As == AVORR { + size = 2 + } else if p.As == AVAND || p.As == AVEOR { + size = 0 + } + + o1 |= (uint32(Q&1) << 30) | (uint32(size&3) << 22) | (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + + case 73: /* vmov V.[index], R */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + imm5 := 0 + o1 = 7<<25 | 0xf<<10 + switch (p.From.Reg >> 5) & 15 { + case ARNG_B: + imm5 |= 1 + imm5 |= int(p.From.Index) << 1 + case ARNG_H: + imm5 |= 2 + imm5 |= int(p.From.Index) << 2 + case ARNG_S: + imm5 |= 4 + imm5 |= int(p.From.Index) << 3 + case ARNG_D: + imm5 |= 8 + imm5 |= int(p.From.Index) << 4 + o1 |= 1 << 30 + default: + c.ctxt.Diag("invalid arrangement on op V.[index], R: %v\n", p) + } + o1 |= (uint32(imm5&0x1f) << 16) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 74: + // add $O, R, Rtmp + // ldp (Rtmp), (R1, R2) + r := int(p.From.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid ldp source: %v\n", p) + } + + v := int32(c.regoff(&p.From)) + if v < 0 || v > 4095 { + c.ctxt.Diag("offset out of range%v\n", p) + } + + if o.scond == C_XPOST { + o2 |= 1 << 23 + } else if o.scond == C_XPRE { + o2 |= 3 << 23 + } else { + o2 |= 2 << 23 + } + + o1 = c.oaddi(p, int32(c.opirr(p, AADD)), v, r, REGTMP) + o2 |= 1 << 22 + o2 |= uint32(int64(2<<30|5<<27) | (p.To.Offset&31)<<10 | int64(uint32(REGTMP&31)<<5) | int64(p.To.Reg&31)) + + case 75: + // mov $L, Rtmp (from constant pool) + // add Rtmp, R, Rtmp + // ldp (Rtmp), (R1, R2) + r := int(p.From.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid ldp source: %v\n", p) + } + + if o.scond == C_XPOST { + o3 |= 1 << 23 + } else if o.scond == C_XPRE { + o3 |= 3 << 23 + } else { + o3 |= 2 << 23 + } + + o1 = c.omovlit(AMOVD, p, &p.From, REGTMP) + o2 = c.opxrrr(p, AADD) + o2 |= (REGTMP & 31) << 16 + o2 |= uint32(r&31) << 5 + o2 |= uint32(REGTMP & 31) + o3 |= 1 << 22 + o3 |= uint32(int64(2<<30|5<<27) | (p.To.Offset&31)<<10 | int64(uint32(REGTMP&31)<<5) | int64(p.To.Reg&31)) + + case 76: + // add $O, R, Rtmp + // stp (R1, R2), (Rtmp) + r := int(p.To.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid stp destination: %v\n", p) + } + + v := int32(c.regoff(&p.To)) + if v < 0 || v > 4095 { + c.ctxt.Diag("offset out of range%v\n", p) + } + if o.scond == C_XPOST { + o2 |= 1 << 23 + } else if o.scond == C_XPRE { + o2 |= 3 << 23 + } else { + o2 |= 2 << 23 + } + + o1 = c.oaddi(p, int32(c.opirr(p, AADD)), v, r, REGTMP) + o2 |= uint32(int64(2<<30|5<<27) | (p.From.Offset&31)<<10 | int64(uint32(REGTMP&31)<<5) | int64(p.From.Reg&31)) + + case 77: + // mov $L, Rtmp (from constant pool) + // add Rtmp, R, Rtmp + // stp (R1, R2), (Rtmp) + r := int(p.To.Reg) + if r == obj.REG_NONE { + r = int(o.param) + } + if r == obj.REG_NONE { + c.ctxt.Diag("invalid stp destination: %v\n", p) + } + + if o.scond == C_XPOST { + o3 |= 1 << 23 + } else if o.scond == C_XPRE { + o3 |= 3 << 23 + } else { + o3 |= 2 << 23 + } + o1 = c.omovlit(AMOVD, p, &p.To, REGTMP) + o2 = c.opxrrr(p, AADD) + o2 |= REGTMP & 31 << 16 + o2 |= uint32(r&31) << 5 + o2 |= uint32(REGTMP & 31) + o3 |= uint32(int64(2<<30|5<<27) | (p.From.Offset&31)<<10 | int64(uint32(REGTMP&31)<<5) | int64(p.From.Reg&31)) + + case 78: /* vmov R, V.[index] */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + imm5 := 0 + o1 = 1<<30 | 7<<25 | 7<<10 + switch (p.To.Reg >> 5) & 15 { + case ARNG_B: + imm5 |= 1 + imm5 |= int(p.From.Index) << 1 + case ARNG_H: + imm5 |= 2 + imm5 |= int(p.From.Index) << 2 + case ARNG_S: + imm5 |= 4 + imm5 |= int(p.From.Index) << 3 + case ARNG_D: + imm5 |= 8 + imm5 |= int(p.From.Index) << 4 + default: + c.ctxt.Diag("invalid arrangement on op R, V.[index]: %v\n", p) + } + o1 |= (uint32(imm5&0x1f) << 16) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 79: /* vdup Vn.[index], Vd. */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + o1 = 7<<25 | 1<<10 + var imm5, Q uint32 + switch (p.To.Reg >> 5) & 15 { + case ARNG_16B: + Q = 1 + imm5 = 1 + imm5 |= uint32(p.From.Index) << 1 + case ARNG_2D: + Q = 1 + imm5 = 8 + imm5 |= uint32(p.From.Index) << 4 + case ARNG_2S: + Q = 0 + imm5 = 4 + imm5 |= uint32(p.From.Index) << 3 + case ARNG_4H: + Q = 0 + imm5 = 2 + imm5 |= uint32(p.From.Index) << 2 + case ARNG_4S: + Q = 1 + imm5 = 4 + imm5 |= uint32(p.From.Index) << 3 + case ARNG_8B: + Q = 0 + imm5 = 1 + imm5 |= uint32(p.From.Index) << 1 + case ARNG_8H: + Q = 1 + imm5 = 2 + imm5 |= uint32(p.From.Index) << 2 + default: + c.ctxt.Diag("invalid arrangement on VDUP Vn.[index], Vd.: %v\n", p) + } + o1 |= (uint32(Q&1) << 30) | (uint32(imm5&0x1f) << 16) + o1 |= (uint32(rf&31) << 5) | uint32(rt&31) + + case 80: /* vmov V.[index], Vn */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + imm5 := 0 + switch p.As { + case AVMOV: + o1 = 1<<30 | 15<<25 | 1<<10 + switch (p.From.Reg >> 5) & 15 { + case ARNG_B: + imm5 |= 1 + imm5 |= int(p.From.Index) << 1 + case ARNG_H: + imm5 |= 2 + imm5 |= int(p.From.Index) << 2 + case ARNG_S: + imm5 |= 4 + imm5 |= int(p.From.Index) << 3 + case ARNG_D: + imm5 |= 8 + imm5 |= int(p.From.Index) << 4 + default: + c.ctxt.Diag("invalid arrangement on op V.[index], Vn: %v\n", p) + } + default: + c.ctxt.Diag("unsupported op %v", p.As) + } + o1 |= (uint32(imm5&0x1f) << 16) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 81: /* vld1 (Rn), [Vt1., Vt2., ...] */ + r := int(p.From.Reg) + o1 = 3<<26 | 1<<22 + if o.scond == C_XPOST { + o1 |= 1 << 23 + if p.From.Index == 0 { + // immediate offset variant + o1 |= 0x1f << 16 + } else { + // register offset variant + o1 |= uint32(p.From.Index&31) << 16 + } + } + o1 |= uint32(p.To.Offset) + o1 |= uint32(r&31) << 5 + + case 82: /* vmov Rn, Vd. */ + rf := int(p.From.Reg) + rt := int(p.To.Reg) + o1 = 7<<25 | 3<<10 + var imm5, Q uint32 + switch (p.To.Reg >> 5) & 15 { + case ARNG_16B: + Q = 1 + imm5 = 1 + case ARNG_2D: + Q = 1 + imm5 = 8 + case ARNG_2S: + Q = 0 + imm5 = 4 + case ARNG_4H: + Q = 0 + imm5 = 2 + case ARNG_4S: + Q = 1 + imm5 = 4 + case ARNG_8B: + Q = 0 + imm5 = 1 + case ARNG_8H: + Q = 1 + imm5 = 2 + default: + c.ctxt.Diag("invalid arrangement on VMOV Rn, Vd.: %v\n", p) + } + o1 |= (uint32(Q&1) << 30) | (uint32(imm5&0x1f) << 16) + o1 |= (uint32(rf&31) << 5) | uint32(rt&31) + + case 83: /* vmov Vn., Vd. */ + af := int((p.From.Reg >> 5) & 15) + at := int((p.To.Reg >> 5) & 15) + if af != at { + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + o1 = c.oprrr(p, p.As) + rf := int((p.From.Reg) & 31) + rt := int((p.To.Reg) & 31) + + Q := 0 + size := 0 + switch af { + case ARNG_8B: + Q = 0 + size = 0 + case ARNG_16B: + Q = 1 + size = 0 + case ARNG_4H: + Q = 0 + size = 1 + case ARNG_8H: + Q = 1 + size = 1 + default: + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + + if (p.As == AVMOV) && (af != ARNG_16B && af != ARNG_8B) { + c.ctxt.Diag("invalid arrangement on op %v", p.As) + } + + if p.As == AVMOV { + o1 |= uint32(rf&31) << 16 + } + + o1 |= (uint32(Q&1) << 30) | (uint32(size&3) << 22) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 84: /* vst1 [Vt1., Vt2., ...], (Rn) */ + r := int(p.To.Reg) + o1 = 3 << 26 + if o.scond == C_XPOST { + o1 |= 1 << 23 + if p.To.Index == 0 { + // immediate offset variant + o1 |= 0x1f << 16 + } else { + // register offset variant + o1 |= uint32(p.To.Index&31) << 16 + } + } + o1 |= uint32(p.From.Offset) + o1 |= uint32(r&31) << 5 + + case 85: /* vaddv/vuaddlv Vn., Vd*/ + af := int((p.From.Reg >> 5) & 15) + o1 = c.oprrr(p, p.As) + rf := int((p.From.Reg) & 31) + rt := int((p.To.Reg) & 31) + Q := 0 + size := 0 + switch af { + case ARNG_8B: + Q = 0 + size = 0 + case ARNG_16B: + Q = 1 + size = 0 + case ARNG_4H: + Q = 0 + size = 1 + case ARNG_8H: + Q = 1 + size = 1 + case ARNG_4S: + Q = 1 + size = 2 + default: + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + o1 |= (uint32(Q&1) << 30) | (uint32(size&3) << 22) | (uint32(rf&31) << 5) | uint32(rt&31) + + case 86: /* vmovi $imm8, Vd.*/ + at := int((p.To.Reg >> 5) & 15) + r := int(p.From.Offset) + if r > 255 || r < 0 { + c.ctxt.Diag("immediate constant out of range: %v\n", p) + } + rt := int((p.To.Reg) & 31) + Q := 0 + switch at { + case ARNG_8B: + Q = 0 + case ARNG_16B: + Q = 1 + default: + c.ctxt.Diag("invalid arrangement: %v\n", p) + } + o1 = 0xf<<24 | 0xe<<12 | 1<<10 + o1 |= (uint32(Q&1) << 30) | (uint32((r>>5)&7) << 16) | (uint32(r&0x1f) << 5) | uint32(rt&31) + + case 87: /* stp (r,r), addr(SB) -> adrp + add + stp */ + o1 = ADR(1, 0, REGTMP) + o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31 + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.To.Sym + rel.Add = p.To.Offset + rel.Type = objabi.R_ADDRARM64 + o3 |= 2<<30 | 5<<27 | 2<<23 | uint32(p.From.Offset&31)<<10 | (REGTMP&31)<<5 | uint32(p.From.Reg&31) + + case 88: /* ldp addr(SB), (r,r) -> adrp + add + ldp */ + o1 = ADR(1, 0, REGTMP) + o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31 + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Add = p.From.Offset + rel.Type = objabi.R_ADDRARM64 + o3 |= 2<<30 | 5<<27 | 2<<23 | 1<<22 | uint32(p.To.Offset&31)<<10 | (REGTMP&31)<<5 | uint32(p.To.Reg&31) + + case 89: /* vadd/vsub Vm, Vn, Vd */ + switch p.As { + case AVADD: + o1 = 5<<28 | 7<<25 | 7<<21 | 1<<15 | 1<<10 + + case AVSUB: + o1 = 7<<28 | 7<<25 | 7<<21 | 1<<15 | 1<<10 + + default: + c.ctxt.Diag("bad opcode: %v\n", p) + break + } + + rf := int(p.From.Reg) + rt := int(p.To.Reg) + r := int(p.Reg) + if r == 0 { + r = rt + } + o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31) + // This is supposed to be something that stops execution. // It's not supposed to be reached, ever, but if it is, we'd // like to be able to tell how we got there. Assemble as @@ -3098,6 +3732,7 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) { * basic Rm op Rn -> Rd (using shifted register with 0) * also op Rn -> Rt * also Rm*Rn op Ra -> Rd + * also Vm op Vn -> Vd */ func (c *ctxt7) oprrr(p *obj.Prog, a obj.As) uint32 { switch a { @@ -3611,6 +4246,36 @@ func (c *ctxt7) oprrr(p *obj.Prog, a obj.As) uint32 { case AFCVTHD: return FPOP1S(0, 0, 3, 5) + + case AVADD: + return 7<<25 | 1<<21 | 1<<15 | 1<<10 + + case AVADDP: + return 7<<25 | 1<<21 | 1<<15 | 15<<10 + + case AVAND: + return 7<<25 | 1<<21 | 7<<10 + + case AVCMEQ: + return 1<<29 | 0x71<<21 | 0x23<<10 + + case AVEOR: + return 1<<29 | 0x71<<21 | 7<<10 + + case AVORR: + return 7<<25 | 5<<21 | 7<<10 + + case AVREV32: + return 11<<26 | 2<<24 | 1<<21 | 1<<11 + + case AVMOV: + return 7<<25 | 5<<21 | 7<<10 + + case AVADDV: + return 7<<25 | 3<<20 | 3<<15 | 7<<11 + + case AVUADDLV: + return 1<<29 | 7<<25 | 3<<20 | 7<<11 } c.ctxt.Diag("%v: bad rrr %d %v", p, a, a) @@ -4215,6 +4880,9 @@ func (c *ctxt7) opldr12(p *obj.Prog, a obj.As) uint32 { case AFMOVD: return LDSTR12U(3, 1, 1) + + case AVMOVS: + return LDSTR12U(2, 1, 1) } c.ctxt.Diag("bad opldr12 %v\n%v", a, p) @@ -4298,6 +4966,9 @@ func (c *ctxt7) opldrpp(p *obj.Prog, a obj.As) uint32 { case AMOVBU: return 0<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22 + + case AVMOVS: + return 2<<30 | 7<<27 | 1<<26 | 0<<24 | 1<<22 } c.ctxt.Diag("bad opldr %v\n%v", a, p) @@ -4517,7 +5188,7 @@ func movesize(a obj.As) int { case AMOVD: return 3 - case AMOVW, AMOVWU: + case AMOVW, AMOVWU, AVMOVS: return 2 case AMOVH, AMOVHU: diff --git a/src/cmd/internal/obj/arm64/doc.go b/src/cmd/internal/obj/arm64/doc.go new file mode 100644 index 00000000000..f75f49fb9c4 --- /dev/null +++ b/src/cmd/internal/obj/arm64/doc.go @@ -0,0 +1,157 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64 + +/* + +Go Assembly for ARM64 Reference Manual + +1. Alphabetical list of basic instructions + // TODO + +2. Alphabetical list of float-point instructions + // TODO + +3. Alphabetical list of SIMD instructions + VADD: Add (scalar) + VADD , , + Add corresponding low 64-bit elements in and , + place the result into low 64-bit element of . + + VADD: Add (vector). + VADD .T, ., . + Is an arrangement specifier and can have the following values: + 8B, 16B, H4, H8, S2, S4, D2 + + VADDP: Add Pairwise (vector) + VADDP ., ., . + Is an arrangement specifier and can have the following values: + B8, B16, H4, H8, S2, S4, D2 + + VADDV: Add across Vector. + VADDV ., Vd + Is an arrangement specifier and can have the following values: + 8B, 16B, H4, H8, S4 + + VAND: Bitwise AND (vector) + VAND ., ., . + Is an arrangement specifier and can have the following values: + B8, B16 + + VCMEQ: Compare bitwise Equal (vector) + VCMEQ ., ., . + Is an arrangement specifier and can have the following values: + B8, B16, H4, H8, S2, S4, D2 + + VDUP: Duplicate vector element to vector or scalar. + VDUP .[index], . + Is an arrangement specifier and can have the following values: + 8B, 16B, H4, H8, S2, S4, D2 + Is an element size specifier and can have the following values: + B, H, S, D + + VEOR: Bitwise exclusive OR (vector, register) + VEOR ., ., . + Is an arrangement specifier and can have the following values: + B8, B16 + + VLD1: Load multiple single-element structures + VLD1 (Rn), [., . ...] // no offset + VLD1.P imm(Rn), [., . ...] // immediate offset variant + VLD1.P (Rn)(Rm), [., . ...] // register offset variant + Is an arrangement specifier and can have the following values: + B8, B16, H4, H8, S2, S4, D1, D2 + + VMOV: move + VMOV .[index], Rd // Move vector element to general-purpose register. + Is a source width specifier and can have the following values: + B, H, S (Wd) + D (Xd) + + VMOV Rn, . // Duplicate general-purpose register to vector. + Is an arrangement specifier and can have the following values: + B8, B16, H4, H8, S2, S4 (Wn) + D2 (Xn) + + VMOV ., . // Move vector. + Is an arrangement specifier and can have the following values: + B8, B16 + + VMOV Rn, .[index] // Move general-purpose register to a vector element. + Is a source width specifier and can have the following values: + B, H, S (Wd) + D (Xd) + + VMOV .[index], Vn // Move vector element to scalar. + Is an element size specifier and can have the following values: + B, H, S, D + + VMOVI: Move Immediate (vector). + VMOVI $imm8, . + is an arrangement specifier and can have the following values: + 8B, 16B + + VMOVS: Load SIMD&FP Register (immediate offset). ARMv8: LDR (immediate, SIMD&FP) + Store SIMD&FP register (immediate offset). ARMv8: STR (immediate, SIMD&FP) + VMOVS (Rn), Vn + VMOVS.W imm(Rn), Vn + VMOVS.P imm(Rn), Vn + VMOVS Vn, (Rn) + VMOVS.W Vn, imm(Rn) + VMOVS.P Vn, imm(Rn) + + VORR: Bitwise inclusive OR (vector, register) + VORR ., ., . + Is an arrangement specifier and can have the following values: + B8, B16 + + VREV32: Reverse elements in 32-bit words (vector). + REV32 ., . + Is an arrangement specifier and can have the following values: + B8, B16, H4, H8 + + VST1: Store multiple single-element structures + VST1 [., . ...], (Rn) // no offset + VST1.P [., . ...], imm(Rn) // immediate offset variant + VST1.P [., . ...], (Rn)(Rm) // register offset variant + Is an arrangement specifier and can have the following values: + B8, B16, H4, H8, S2, S4, D1, D2 + + VSUB: Sub (scalar) + VSUB , , + Subtract low 64-bit element in from the correponding element in , + place the result into low 64-bit element of . + + VUADDLV: Unsigned sum Long across Vector. + VUADDLV ., Vd + Is an arrangement specifier and can have the following values: + 8B, 16B, H4, H8, S4 + +4. Alphabetical list of cryptographic extension instructions + + SHA1C, SHA1M, SHA1P: SHA1 hash update. + SHA1C .S4, Vn, Vd + SHA1M .S4, Vn, Vd + SHA1P .S4, Vn, Vd + + SHA1H: SHA1 fixed rotate. + SHA1H Vn, Vd + + SHA1SU0: SHA1 schedule update 0. + SHA256SU1: SHA256 schedule update 1. + SHA1SU0 .S4, .S4, .S4 + SHA256SU1 .S4, .S4, .S4 + + SHA1SU1: SHA1 schedule update 1. + SHA256SU0: SHA256 schedule update 0. + SHA1SU1 .S4, .S4 + SHA256SU0 .S4, .S4 + + SHA256H, SHA256H2: SHA256 hash update. + SHA256H .S4, Vn, Vd + SHA256H2 .S4, Vn, Vd + + +*/ diff --git a/src/cmd/internal/obj/arm64/list7.go b/src/cmd/internal/obj/arm64/list7.go index 65be486cee6..9a9f4b45b77 100644 --- a/src/cmd/internal/obj/arm64/list7.go +++ b/src/cmd/internal/obj/arm64/list7.go @@ -57,6 +57,38 @@ var strcond = [16]string{ func init() { obj.RegisterRegister(obj.RBaseARM64, REG_SPECIAL+1024, rconv) obj.RegisterOpcode(obj.ABaseARM64, Anames) + obj.RegisterRegisterList(obj.RegListARM64Lo, obj.RegListARM64Hi, rlconv) +} + +func arrange(a int) string { + switch a { + case ARNG_8B: + return "B8" + case ARNG_16B: + return "B16" + case ARNG_4H: + return "H4" + case ARNG_8H: + return "H8" + case ARNG_2S: + return "S2" + case ARNG_4S: + return "S4" + case ARNG_1D: + return "D1" + case ARNG_2D: + return "D2" + case ARNG_B: + return "B" + case ARNG_H: + return "H" + case ARNG_S: + return "S" + case ARNG_D: + return "D" + default: + return "" + } } func rconv(r int) string { @@ -102,6 +134,58 @@ func rconv(r int) string { return "DAIFSet" case r == REG_DAIFClr: return "DAIFClr" + case REG_UXTB <= r && r < REG_UXTH: + if (r>>5)&7 != 0 { + return fmt.Sprintf("R%d.UXTB<<%d", r&31, (r>>5)&7) + } else { + return fmt.Sprintf("R%d.UXTB", r&31) + } + case REG_UXTH <= r && r < REG_UXTW: + if (r>>5)&7 != 0 { + return fmt.Sprintf("R%d.UXTH<<%d", r&31, (r>>5)&7) + } else { + return fmt.Sprintf("R%d.UXTH", r&31) + } + case REG_UXTW <= r && r < REG_UXTX: + if (r>>5)&7 != 0 { + return fmt.Sprintf("R%d.UXTW<<%d", r&31, (r>>5)&7) + } else { + return fmt.Sprintf("R%d.UXTW", r&31) + } + case REG_UXTX <= r && r < REG_SXTB: + if (r>>5)&7 != 0 { + return fmt.Sprintf("R%d.UXTX<<%d", r&31, (r>>5)&7) + } else { + return fmt.Sprintf("R%d.UXTX", r&31) + } + case REG_SXTB <= r && r < REG_SXTH: + if (r>>5)&7 != 0 { + return fmt.Sprintf("R%d.SXTB<<%d", r&31, (r>>5)&7) + } else { + return fmt.Sprintf("R%d.SXTB", r&31) + } + case REG_SXTH <= r && r < REG_SXTW: + if (r>>5)&7 != 0 { + return fmt.Sprintf("R%d.SXTH<<%d", r&31, (r>>5)&7) + } else { + return fmt.Sprintf("R%d.SXTH", r&31) + } + case REG_SXTW <= r && r < REG_SXTX: + if (r>>5)&7 != 0 { + return fmt.Sprintf("R%d.SXTW<<%d", r&31, (r>>5)&7) + } else { + return fmt.Sprintf("R%d.SXTW", r&31) + } + case REG_SXTX <= r && r < REG_SPECIAL: + if (r>>5)&7 != 0 { + return fmt.Sprintf("R%d.SXTX<<%d", r&31, (r>>5)&7) + } else { + return fmt.Sprintf("R%d.SXTX", r&31) + } + case REG_ARNG <= r && r < REG_ELEM: + return fmt.Sprintf("V%d.%s", r&31, arrange((r>>5)&15)) + case REG_ELEM <= r && r < REG_ELEM_END: + return fmt.Sprintf("V%d.%s", r&31, arrange((r>>5)&15)) } return fmt.Sprintf("badreg(%d)", r) } @@ -112,3 +196,60 @@ func DRconv(a int) string { } return "C_??" } + +func rlconv(list int64) string { + str := "" + + // ARM64 register list follows ARM64 instruction decode schema + // | 31 | 30 | ... | 15 - 12 | 11 - 10 | ... | + // +----+----+-----+---------+---------+-----+ + // | | Q | ... | opcode | size | ... | + + firstReg := int(list & 31) + opcode := (list >> 12) & 15 + var regCnt int + var t string + switch opcode { + case 0x7: + regCnt = 1 + case 0xa: + regCnt = 2 + case 0x6: + regCnt = 3 + case 0x2: + regCnt = 4 + default: + regCnt = -1 + } + // Q:size + arng := ((list>>30)&1)<<2 | (list>>10)&3 + switch arng { + case 0: + t = "B8" + case 4: + t = "B16" + case 1: + t = "H4" + case 5: + t = "H8" + case 2: + t = "S2" + case 6: + t = "S4" + case 3: + t = "D1" + case 7: + t = "D2" + } + for i := 0; i < regCnt; i++ { + if str == "" { + str += "[" + } else { + str += "," + } + str += fmt.Sprintf("V%d.", (firstReg+i)&31) + str += t + } + str += "]" + return str +} diff --git a/src/cmd/internal/obj/arm64/obj7.go b/src/cmd/internal/obj/arm64/obj7.go index 7aa0c8df223..709223223f3 100644 --- a/src/cmd/internal/obj/arm64/obj7.go +++ b/src/cmd/internal/obj/arm64/obj7.go @@ -382,7 +382,7 @@ func (c *ctxt7) rewriteToUseGot(p *obj.Prog) { p.From.Offset = 0 } } - if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { + if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr @@ -515,7 +515,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = p } - var q2 *obj.Prog var retjmp *obj.LSym for p := c.cursym.Func.Text; p != nil; p = p.Link { o := p.As @@ -618,22 +617,25 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if c.cursym.Func.Text.From.Sym.Wrapper() { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // - // MOV g_panic(g), R1 - // CMP ZR, R1 - // BEQ end - // MOV panic_argp(R1), R2 - // ADD $(autosize+8), RSP, R3 - // CMP R2, R3 - // BNE end - // ADD $8, RSP, R4 - // MOVD R4, panic_argp(R1) + // MOV g_panic(g), R1 + // CBNZ checkargp // end: // NOP + // ... function body ... + // checkargp: + // MOV panic_argp(R1), R2 + // ADD $(autosize+8), RSP, R3 + // CMP R2, R3 + // BNE end + // ADD $8, RSP, R4 + // MOVD R4, panic_argp(R1) + // B end // // The NOP is needed to give the jumps somewhere to land. - // It is a liblink NOP, not a ARM64 NOP: it encodes to 0 instruction bytes. + // It is a liblink NOP, not an ARM64 NOP: it encodes to 0 instruction bytes. q = q1 + // MOV g_panic(g), R1 q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_MEM @@ -642,26 +644,36 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Type = obj.TYPE_REG q.To.Reg = REG_R1 - q = obj.Appendp(q, c.newprog) - q.As = ACMP - q.From.Type = obj.TYPE_REG - q.From.Reg = REGZERO - q.Reg = REG_R1 + // CBNZ R1, checkargp + cbnz := obj.Appendp(q, c.newprog) + cbnz.As = ACBNZ + cbnz.From.Type = obj.TYPE_REG + cbnz.From.Reg = REG_R1 + cbnz.To.Type = obj.TYPE_BRANCH - q = obj.Appendp(q, c.newprog) - q.As = ABEQ - q.To.Type = obj.TYPE_BRANCH - q1 = q + // Empty branch target at the top of the function body + end := obj.Appendp(cbnz, c.newprog) + end.As = obj.ANOP - q = obj.Appendp(q, c.newprog) - q.As = AMOVD - q.From.Type = obj.TYPE_MEM - q.From.Reg = REG_R1 - q.From.Offset = 0 // Panic.argp - q.To.Type = obj.TYPE_REG - q.To.Reg = REG_R2 + // find the end of the function + var last *obj.Prog + for last = end; last.Link != nil; last = last.Link { + } - q = obj.Appendp(q, c.newprog) + // MOV panic_argp(R1), R2 + mov := obj.Appendp(last, c.newprog) + mov.As = AMOVD + mov.From.Type = obj.TYPE_MEM + mov.From.Reg = REG_R1 + mov.From.Offset = 0 // Panic.argp + mov.To.Type = obj.TYPE_REG + mov.To.Reg = REG_R2 + + // CBNZ branches to the MOV above + cbnz.Pcond = mov + + // ADD $(autosize+8), SP, R3 + q = obj.Appendp(mov, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST q.From.Offset = int64(c.autosize) + 8 @@ -669,17 +681,20 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Type = obj.TYPE_REG q.To.Reg = REG_R3 + // CMP R2, R3 q = obj.Appendp(q, c.newprog) q.As = ACMP q.From.Type = obj.TYPE_REG q.From.Reg = REG_R2 q.Reg = REG_R3 + // BNE end q = obj.Appendp(q, c.newprog) q.As = ABNE q.To.Type = obj.TYPE_BRANCH - q2 = q + q.Pcond = end + // ADD $8, SP, R4 q = obj.Appendp(q, c.newprog) q.As = AADD q.From.Type = obj.TYPE_CONST @@ -688,6 +703,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Type = obj.TYPE_REG q.To.Reg = REG_R4 + // MOV R4, panic_argp(R1) q = obj.Appendp(q, c.newprog) q.As = AMOVD q.From.Type = obj.TYPE_REG @@ -696,11 +712,11 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.To.Reg = REG_R1 q.To.Offset = 0 // Panic.argp + // B end q = obj.Appendp(q, c.newprog) - - q.As = obj.ANOP - q1.Pcond = q - q2.Pcond = q + q.As = AB + q.To.Type = obj.TYPE_BRANCH + q.Pcond = end } case obj.ARET: @@ -797,7 +813,7 @@ var unaryDst = map[obj.As]bool{ ADWORD: true, ABL: true, AB: true, - ASVC: true, + ACLREX: true, } var Linkarm64 = obj.LinkArch{ diff --git a/src/cmd/internal/obj/data.go b/src/cmd/internal/obj/data.go index 23d1809e0c1..ce0dd09a4f0 100644 --- a/src/cmd/internal/obj/data.go +++ b/src/cmd/internal/obj/data.go @@ -117,9 +117,7 @@ func (s *LSym) WriteInt(ctxt *Link, off int64, siz int, i int64) { } } -// WriteAddr writes an address of size siz into s at offset off. -// rsym and roff specify the relocation for the address. -func (s *LSym) WriteAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64) { +func (s *LSym) writeAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64, rtype objabi.RelocType) { // Allow 4-byte addresses for DWARF. if siz != ctxt.Arch.PtrSize && siz != 4 { ctxt.Diag("WriteAddr: bad address size %d in %s", siz, s.Name) @@ -132,10 +130,24 @@ func (s *LSym) WriteAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64) } r.Siz = uint8(siz) r.Sym = rsym - r.Type = objabi.R_ADDR + r.Type = rtype r.Add = roff } +// WriteAddr writes an address of size siz into s at offset off. +// rsym and roff specify the relocation for the address. +func (s *LSym) WriteAddr(ctxt *Link, off int64, siz int, rsym *LSym, roff int64) { + s.writeAddr(ctxt, off, siz, rsym, roff, objabi.R_ADDR) +} + +// WriteCURelativeAddr writes a pointer-sized address into s at offset off. +// rsym and roff specify the relocation for the address which will be +// resolved by the linker to an offset from the DW_AT_low_pc attribute of +// the DWARF Compile Unit of rsym. +func (s *LSym) WriteCURelativeAddr(ctxt *Link, off int64, rsym *LSym, roff int64) { + s.writeAddr(ctxt, off, ctxt.Arch.PtrSize, rsym, roff, objabi.R_ADDRCUOFF) +} + // WriteOff writes a 4 byte offset to rsym+roff into s at offset off. // After linking the 4 bytes stored at s+off will be // rsym+roff-(start of section that s is in). diff --git a/src/cmd/internal/obj/go.go b/src/cmd/internal/obj/go.go index f0b2c46e4f1..dbe9b406dda 100644 --- a/src/cmd/internal/obj/go.go +++ b/src/cmd/internal/obj/go.go @@ -10,7 +10,7 @@ func Nopout(p *Prog) { p.As = ANOP p.Scond = 0 p.From = Addr{} - p.From3 = nil + p.RestArgs = nil p.Reg = 0 p.To = Addr{} } diff --git a/src/cmd/internal/obj/inl.go b/src/cmd/internal/obj/inl.go index 116921995a9..671239444c7 100644 --- a/src/cmd/internal/obj/inl.go +++ b/src/cmd/internal/obj/inl.go @@ -6,7 +6,7 @@ package obj import "cmd/internal/src" -// InlTree s a collection of inlined calls. The Parent field of an +// InlTree is a collection of inlined calls. The Parent field of an // InlinedCall is the index of another InlinedCall in InlTree. // // The compiler maintains a global inlining tree and adds a node to it @@ -23,6 +23,9 @@ import "cmd/internal/src" // 8 h() // 9 h() // 10 } +// 11 func h() { +// 12 println("H") +// 13 } // // Assuming the global tree starts empty, inlining will produce the // following tree: @@ -61,12 +64,24 @@ func (tree *InlTree) Add(parent int, pos src.XPos, func_ *LSym) int { return r } +func (tree *InlTree) Parent(inlIndex int) int { + return tree.nodes[inlIndex].Parent +} + +func (tree *InlTree) InlinedFunction(inlIndex int) *LSym { + return tree.nodes[inlIndex].Func +} + +func (tree *InlTree) CallPos(inlIndex int) src.XPos { + return tree.nodes[inlIndex].Pos +} + // OutermostPos returns the outermost position corresponding to xpos, // which is where xpos was ultimately inlined to. In the example for // InlTree, main() contains inlined AST nodes from h(), but the // outermost position for those nodes is line 2. func (ctxt *Link) OutermostPos(xpos src.XPos) src.Pos { - pos := ctxt.PosTable.Pos(xpos) + pos := ctxt.InnermostPos(xpos) outerxpos := xpos for ix := pos.Base().InliningIndex(); ix >= 0; { @@ -77,6 +92,17 @@ func (ctxt *Link) OutermostPos(xpos src.XPos) src.Pos { return ctxt.PosTable.Pos(outerxpos) } +// InnermostPos returns the innermost position corresponding to xpos, +// that is, the code that is inlined and that inlines nothing else. +// In the example for InlTree above, the code for println within h +// would have an innermost position with line number 12, whether +// h was not inlined, inlined into g, g-then-f, or g-then-f-then-main. +// This corresponds to what someone debugging main, f, g, or h might +// expect to see while single-stepping. +func (ctxt *Link) InnermostPos(xpos src.XPos) src.Pos { + return ctxt.PosTable.Pos(xpos) +} + func dumpInlTree(ctxt *Link, tree InlTree) { for i, call := range tree.nodes { pos := ctxt.PosTable.Pos(call.Pos) diff --git a/src/cmd/internal/obj/line_test.go b/src/cmd/internal/obj/line_test.go index 6b21abecd25..f159a65e2b9 100644 --- a/src/cmd/internal/obj/line_test.go +++ b/src/cmd/internal/obj/line_test.go @@ -17,7 +17,7 @@ func TestLinkgetlineFromPos(t *testing.T) { afile := src.NewFileBase("a.go", "a.go") bfile := src.NewFileBase("b.go", "/foo/bar/b.go") - lfile := src.NewLinePragmaBase(src.MakePos(afile, 7, 0), "linedir", 100) + lfile := src.NewLinePragmaBase(src.MakePos(afile, 7, 0), "linedir", "linedir", 100) var tests = []struct { pos src.Pos diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index d49bc8c5644..27c74f6a776 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -138,10 +138,13 @@ import ( // offset = second register // // [reg, reg, reg-reg] -// Register list for ARM. +// Register list for ARM and ARM64. // Encoding: // type = TYPE_REGLIST +// On ARM: // offset = bit mask of registers in list; R0 is low bit. +// On ARM64: +// offset = register count (Q:size) | arrangement (opcode) | first register // // reg, reg // Register pair for ARM. @@ -155,6 +158,27 @@ import ( // index = second register // scale = 1 // +// reg.[US]XT[BHWX] +// Register extension for ARM64 +// Encoding: +// type = TYPE_REG +// reg = REG_[US]XT[BHWX] + register + shift amount +// offset = ((reg&31) << 16) | (exttype << 13) | (amount<<10) +// +// reg. +// Register arrangement for ARM64 SIMD register +// e.g.: V1.S4, V2.S2, V7.D2, V2.H4, V6.B16 +// Encoding: +// type = TYPE_REG +// reg = REG_ARNG + register + arrangement +// +// reg.[index] +// Register element for ARM64 +// Encoding: +// type = TYPE_REG +// reg = REG_ELEM + register + arrangement +// index = element index + type Addr struct { Reg int16 Index int16 @@ -184,6 +208,9 @@ const ( // A reference to name@GOT(SB) is a reference to the entry in the global offset // table for 'name'. NAME_GOTREF + // Indicates auto that was optimized away, but whose type + // we want to preserve in the DWARF debug info. + NAME_DELETED_AUTO ) type AddrType uint8 @@ -209,14 +236,19 @@ const ( // // The general instruction form is: // -// As.Scond From, Reg, From3, To, RegTo2 +// (1) As.Scond From [, ...RestArgs], To +// (2) As.Scond From, Reg [, ...RestArgs], To, RegTo2 // // where As is an opcode and the others are arguments: -// From, Reg, From3 are sources, and To, RegTo2 are destinations. +// From, Reg are sources, and To, RegTo2 are destinations. +// RestArgs can hold additional sources and destinations. // Usually, not all arguments are present. // For example, MOVL R1, R2 encodes using only As=MOVL, From=R1, To=R2. // The Scond field holds additional condition bits for systems (like arm) // that have generalized conditional execution. +// (2) form is present for compatibility with older code, +// to avoid too much changes in a single swing. +// (1) scheme is enough to express any kind of operand combination. // // Jump instructions use the Pcond field to point to the target instruction, // which must be in the same linked list as the jump instruction. @@ -232,35 +264,62 @@ const ( // The other fields not yet mentioned are for use by the back ends and should // be left zeroed by creators of Prog lists. type Prog struct { - Ctxt *Link // linker context - Link *Prog // next Prog in linked list - From Addr // first source operand - From3 *Addr // third source operand (second is Reg below) - To Addr // destination operand (second is RegTo2 below) - Pcond *Prog // target of conditional jump - Forwd *Prog // for x86 back end - Rel *Prog // for x86, arm back ends - Pc int64 // for back ends or assembler: virtual or actual program counter, depending on phase - Pos src.XPos // source position of this instruction - Spadj int32 // effect of instruction on stack pointer (increment or decrement amount) - As As // assembler opcode - Reg int16 // 2nd source operand - RegTo2 int16 // 2nd destination operand - Mark uint16 // bitmask of arch-specific items - Optab uint16 // arch-specific opcode index - Scond uint8 // condition bits for conditional instruction (e.g., on ARM) - Back uint8 // for x86 back end: backwards branch state - Ft uint8 // for x86 back end: type index of Prog.From - Tt uint8 // for x86 back end: type index of Prog.To - Isize uint8 // for x86 back end: size of the instruction in bytes + Ctxt *Link // linker context + Link *Prog // next Prog in linked list + From Addr // first source operand + RestArgs []Addr // can pack any operands that not fit into {Prog.From, Prog.To} + To Addr // destination operand (second is RegTo2 below) + Pcond *Prog // target of conditional jump + Forwd *Prog // for x86 back end + Rel *Prog // for x86, arm back ends + Pc int64 // for back ends or assembler: virtual or actual program counter, depending on phase + Pos src.XPos // source position of this instruction + Spadj int32 // effect of instruction on stack pointer (increment or decrement amount) + As As // assembler opcode + Reg int16 // 2nd source operand + RegTo2 int16 // 2nd destination operand + Mark uint16 // bitmask of arch-specific items + Optab uint16 // arch-specific opcode index + Scond uint8 // condition bits for conditional instruction (e.g., on ARM) + Back uint8 // for x86 back end: backwards branch state + Ft uint8 // for x86 back end: type index of Prog.From + Tt uint8 // for x86 back end: type index of Prog.To + Isize uint8 // for x86 back end: size of the instruction in bytes } -// From3Type returns From3.Type, or TYPE_NONE when From3 is nil. +// From3Type returns p.GetFrom3().Type, or TYPE_NONE when +// p.GetFrom3() returns nil. +// +// Deprecated: for the same reasons as Prog.GetFrom3. func (p *Prog) From3Type() AddrType { - if p.From3 == nil { + if p.RestArgs == nil { return TYPE_NONE } - return p.From3.Type + return p.RestArgs[0].Type +} + +// GetFrom3 returns second source operand (the first is Prog.From). +// In combination with Prog.From and Prog.To it makes common 3 operand +// case easier to use. +// +// Should be used only when RestArgs is set with SetFrom3. +// +// Deprecated: better use RestArgs directly or define backend-specific getters. +// Introduced to simplify transition to []Addr. +// Usage of this is discouraged due to fragility and lack of guarantees. +func (p *Prog) GetFrom3() *Addr { + if p.RestArgs == nil { + return nil + } + return &p.RestArgs[0] +} + +// SetFrom3 assigns []Addr{a} to p.RestArgs. +// In pair with Prog.GetFrom3 it can help in emulation of Prog.From3. +// +// Deprecated: for the same reasons as Prog.GetFrom3. +func (p *Prog) SetFrom3(a Addr) { + p.RestArgs = []Addr{a} } // An As denotes an assembler opcode. @@ -295,7 +354,7 @@ const ( // Subspaces are aligned to a power of two so opcodes can be masked // with AMask and used as compact array indices. const ( - ABase386 = (1 + iota) << 10 + ABase386 = (1 + iota) << 11 ABaseARM ABaseAMD64 ABasePPC64 @@ -303,7 +362,7 @@ const ( ABaseMIPS ABaseS390X - AllowedOpCodes = 1 << 10 // The number of opcodes available for any given architecture. + AllowedOpCodes = 1 << 11 // The number of opcodes available for any given architecture. AMask = AllowedOpCodes - 1 // AND with this to use the opcode as an array index. ) @@ -330,8 +389,10 @@ type FuncInfo struct { Autom []*Auto Pcln Pcln - dwarfSym *LSym + dwarfInfoSym *LSym + dwarfLocSym *LSym dwarfRangesSym *LSym + dwarfAbsFnSym *LSym GCArgs LSym GCLocals LSym @@ -370,6 +431,10 @@ const ( // definition. (When not compiling to support Go shared libraries, all symbols are // local in this sense unless there is a cgo_export_* directive). AttrLocal + + // For function symbols; indicates that the specified function was the + // target of an inline during compilation + AttrWasInlined ) func (a Attribute) DuplicateOK() bool { return a&AttrDuplicateOK != 0 } @@ -385,6 +450,7 @@ func (a Attribute) Wrapper() bool { return a&AttrWrapper != 0 } func (a Attribute) NeedCtxt() bool { return a&AttrNeedCtxt != 0 } func (a Attribute) NoFrame() bool { return a&AttrNoFrame != 0 } func (a Attribute) Static() bool { return a&AttrStatic != 0 } +func (a Attribute) WasInlined() bool { return a&AttrWasInlined != 0 } func (a *Attribute) Set(flag Attribute, value bool) { if value { @@ -411,6 +477,7 @@ var textAttrStrings = [...]struct { {bit: AttrNeedCtxt, s: "NEEDCTXT"}, {bit: AttrNoFrame, s: "NOFRAME"}, {bit: AttrStatic, s: "STATIC"}, + {bit: AttrWasInlined, s: ""}, } // TextAttrString formats a for printing in as part of a TEXT prog. @@ -476,26 +543,31 @@ type Pcdata struct { // Link holds the context for writing object code from a compiler // to be linker input or for reading that input into the linker. type Link struct { - Headtype objabi.HeadType - Arch *LinkArch - Debugasm bool - Debugvlog bool - Debugpcln string - Flag_shared bool - Flag_dynlink bool - Flag_optimize bool - Bso *bufio.Writer - Pathname string - hashmu sync.Mutex // protects hash - hash map[string]*LSym // name -> sym mapping - statichash map[string]*LSym // name -> sym mapping for static syms - PosTable src.PosTable - InlTree InlTree // global inlining tree used by gc/inl.go - Imports []string - DiagFunc func(string, ...interface{}) - DebugInfo func(fn *LSym, curfn interface{}) []dwarf.Scope // if non-nil, curfn is a *gc.Node - Errors int + Headtype objabi.HeadType + Arch *LinkArch + Debugasm bool + Debugvlog bool + Debugpcln string + Flag_shared bool + Flag_dynlink bool + Flag_optimize bool + Flag_locationlists bool + Bso *bufio.Writer + Pathname string + hashmu sync.Mutex // protects hash + hash map[string]*LSym // name -> sym mapping + statichash map[string]*LSym // name -> sym mapping for static syms + PosTable src.PosTable + InlTree InlTree // global inlining tree used by gc/inl.go + DwFixups *DwarfFixupTable + Imports []string + DiagFunc func(string, ...interface{}) + DiagFlush func() + DebugInfo func(fn *LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) // if non-nil, curfn is a *gc.Node + GenAbstractFunc func(fn *LSym) + Errors int + InParallel bool // parallel backend phase in effect Framepointer_enabled bool // state for writing objects @@ -533,9 +605,10 @@ func (ctxt *Link) FixedFrameSize() int64 { // LinkArch is the definition of a single architecture. type LinkArch struct { *sys.Arch - Init func(*Link) - Preprocess func(*Link, *LSym, ProgAlloc) - Assemble func(*Link, *LSym, ProgAlloc) - Progedit func(*Link, *Prog, ProgAlloc) - UnaryDst map[As]bool // Instruction takes one operand, a destination. + Init func(*Link) + Preprocess func(*Link, *LSym, ProgAlloc) + Assemble func(*Link, *LSym, ProgAlloc) + Progedit func(*Link, *Prog, ProgAlloc) + UnaryDst map[As]bool // Instruction takes one operand, a destination. + DWARFRegisters map[int16]int16 } diff --git a/src/cmd/internal/obj/mips/asm0.go b/src/cmd/internal/obj/mips/asm0.go index 6257e5b83d2..2dcfa97bf70 100644 --- a/src/cmd/internal/obj/mips/asm0.go +++ b/src/cmd/internal/obj/mips/asm0.go @@ -893,6 +893,7 @@ func buildop(ctxt *obj.Link) { switch r { default: ctxt.Diag("unknown op in build: %v", r) + ctxt.DiagFlush() log.Fatalf("bad code") case AABSF: diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go index e309c5f7e78..2501bba6638 100644 --- a/src/cmd/internal/obj/objfile.go +++ b/src/cmd/internal/obj/objfile.go @@ -15,6 +15,7 @@ import ( "log" "path/filepath" "sort" + "sync" ) // objWriter writes Go object files. @@ -350,6 +351,8 @@ func (w *objWriter) writeSym(s *LSym) { w.writeInt(objabi.A_AUTO) } else if a.Name == NAME_PARAM { w.writeInt(objabi.A_PARAM) + } else if a.Name == NAME_DELETED_AUTO { + w.writeInt(objabi.A_DELETED_AUTO) } else { log.Fatalf("%s: invalid local variable type %d", s.Name, a.Name) } @@ -443,9 +446,6 @@ func (c dwCtxt) AddString(s dwarf.Sym, v string) { ls.WriteString(c.Link, ls.Size, len(v), v) ls.WriteInt(c.Link, ls.Size, 1, 0) } -func (c dwCtxt) SymValue(s dwarf.Sym) int64 { - return 0 -} func (c dwCtxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { ls := s.(*LSym) size := c.PtrSize() @@ -456,43 +456,422 @@ func (c dwCtxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { ls.WriteInt(c.Link, ls.Size, size, value) } } +func (c dwCtxt) AddCURelativeAddress(s dwarf.Sym, data interface{}, value int64) { + ls := s.(*LSym) + rsym := data.(*LSym) + ls.WriteCURelativeAddr(c.Link, ls.Size, rsym, value) +} func (c dwCtxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { ls := s.(*LSym) rsym := t.(*LSym) ls.WriteAddr(c.Link, ls.Size, size, rsym, ofs) r := &ls.R[len(ls.R)-1] - r.Type = objabi.R_DWARFREF + r.Type = objabi.R_DWARFSECREF +} +func (c dwCtxt) AddFileRef(s dwarf.Sym, f interface{}) { + ls := s.(*LSym) + rsym := f.(*LSym) + ls.WriteAddr(c.Link, ls.Size, 4, rsym, 0) + r := &ls.R[len(ls.R)-1] + r.Type = objabi.R_DWARFFILEREF } -// dwarfSym returns the DWARF symbols for TEXT symbol. -func (ctxt *Link) dwarfSym(s *LSym) (dwarfInfoSym, dwarfRangesSym *LSym) { +func (c dwCtxt) CurrentOffset(s dwarf.Sym) int64 { + ls := s.(*LSym) + return ls.Size +} + +// Here "from" is a symbol corresponding to an inlined or concrete +// function, "to" is the symbol for the corresponding abstract +// function, and "dclIdx" is the index of the symbol of interest with +// respect to the Dcl slice of the original pre-optimization version +// of the inlined function. +func (c dwCtxt) RecordDclReference(from dwarf.Sym, to dwarf.Sym, dclIdx int, inlIndex int) { + ls := from.(*LSym) + tls := to.(*LSym) + ridx := len(ls.R) - 1 + c.Link.DwFixups.ReferenceChildDIE(ls, ridx, tls, dclIdx, inlIndex) +} + +func (c dwCtxt) RecordChildDieOffsets(s dwarf.Sym, vars []*dwarf.Var, offsets []int32) { + ls := s.(*LSym) + c.Link.DwFixups.RegisterChildDIEOffsets(ls, vars, offsets) +} + +func (c dwCtxt) Logf(format string, args ...interface{}) { + c.Link.Logf(format, args...) +} + +func (ctxt *Link) dwarfSym(s *LSym) (dwarfInfoSym, dwarfLocSym, dwarfRangesSym, dwarfAbsFnSym *LSym) { if s.Type != objabi.STEXT { ctxt.Diag("dwarfSym of non-TEXT %v", s) } - if s.Func.dwarfSym == nil { - s.Func.dwarfSym = ctxt.LookupDerived(s, dwarf.InfoPrefix+s.Name) + if s.Func.dwarfInfoSym == nil { + s.Func.dwarfInfoSym = ctxt.LookupDerived(s, dwarf.InfoPrefix+s.Name) + if ctxt.Flag_locationlists { + s.Func.dwarfLocSym = ctxt.LookupDerived(s, dwarf.LocPrefix+s.Name) + } s.Func.dwarfRangesSym = ctxt.LookupDerived(s, dwarf.RangePrefix+s.Name) + if s.WasInlined() { + s.Func.dwarfAbsFnSym = ctxt.DwFixups.AbsFuncDwarfSym(s) + } + } - return s.Func.dwarfSym, s.Func.dwarfRangesSym + return s.Func.dwarfInfoSym, s.Func.dwarfLocSym, s.Func.dwarfRangesSym, s.Func.dwarfAbsFnSym } func (s *LSym) Len() int64 { return s.Size } -// populateDWARF fills in the DWARF Debugging Information Entries for TEXT symbol s. -// The DWARFs symbol must already have been initialized in InitTextSym. -func (ctxt *Link) populateDWARF(curfn interface{}, s *LSym) { - dsym, drsym := ctxt.dwarfSym(s) - if dsym.Size != 0 { +// fileSymbol returns a symbol corresponding to the source file of the +// first instruction (prog) of the specified function. This will +// presumably be the file in which the function is defined. +func (ctxt *Link) fileSymbol(fn *LSym) *LSym { + p := fn.Func.Text + if p != nil { + f, _ := linkgetlineFromPos(ctxt, p.Pos) + fsym := ctxt.Lookup(f) + return fsym + } + return nil +} + +// populateDWARF fills in the DWARF Debugging Information Entries for +// TEXT symbol 's'. The various DWARF symbols must already have been +// initialized in InitTextSym. +func (ctxt *Link) populateDWARF(curfn interface{}, s *LSym, myimportpath string) { + info, loc, ranges, absfunc := ctxt.dwarfSym(s) + if info.Size != 0 { ctxt.Diag("makeFuncDebugEntry double process %v", s) } var scopes []dwarf.Scope + var inlcalls dwarf.InlCalls if ctxt.DebugInfo != nil { - scopes = ctxt.DebugInfo(s, curfn) + scopes, inlcalls = ctxt.DebugInfo(s, curfn) + } + var err error + dwctxt := dwCtxt{ctxt} + filesym := ctxt.fileSymbol(s) + fnstate := &dwarf.FnState{ + Name: s.Name, + Importpath: myimportpath, + Info: info, + Filesym: filesym, + Loc: loc, + Ranges: ranges, + Absfn: absfunc, + StartPC: s, + Size: s.Size, + External: !s.Static(), + Scopes: scopes, + InlCalls: inlcalls, + } + if absfunc != nil { + err = dwarf.PutAbstractFunc(dwctxt, fnstate) + if err != nil { + ctxt.Diag("emitting DWARF for %s failed: %v", s.Name, err) + } + err = dwarf.PutConcreteFunc(dwctxt, fnstate) + } else { + err = dwarf.PutDefaultFunc(dwctxt, fnstate) } - err := dwarf.PutFunc(dwCtxt{ctxt}, dsym, drsym, s.Name, !s.Static(), s, s.Size, scopes) if err != nil { ctxt.Diag("emitting DWARF for %s failed: %v", s.Name, err) } } + +// DwarfIntConst creates a link symbol for an integer constant with the +// given name, type and value. +func (ctxt *Link) DwarfIntConst(myimportpath, name, typename string, val int64) { + if myimportpath == "" { + return + } + s := ctxt.LookupInit(dwarf.ConstInfoPrefix+myimportpath, func(s *LSym) { + s.Type = objabi.SDWARFINFO + ctxt.Data = append(ctxt.Data, s) + }) + dwarf.PutIntConst(dwCtxt{ctxt}, s, ctxt.Lookup(dwarf.InfoPrefix+typename), myimportpath+"."+name, val) +} + +func (ctxt *Link) DwarfAbstractFunc(curfn interface{}, s *LSym, myimportpath string) { + absfn := ctxt.DwFixups.AbsFuncDwarfSym(s) + if absfn.Size != 0 { + ctxt.Diag("internal error: DwarfAbstractFunc double process %v", s) + } + if s.Func == nil { + s.Func = new(FuncInfo) + } + scopes, _ := ctxt.DebugInfo(s, curfn) + dwctxt := dwCtxt{ctxt} + filesym := ctxt.fileSymbol(s) + fnstate := dwarf.FnState{ + Name: s.Name, + Importpath: myimportpath, + Info: absfn, + Filesym: filesym, + Absfn: absfn, + External: !s.Static(), + Scopes: scopes, + } + if err := dwarf.PutAbstractFunc(dwctxt, &fnstate); err != nil { + ctxt.Diag("emitting DWARF for %s failed: %v", s.Name, err) + } +} + +// This table is designed to aid in the creation of references betweeen +// DWARF subprogram DIEs. +// +// In most cases when one DWARF DIE has to refer to another DWARF DIE, +// the target of the reference has an LSym, which makes it easy to use +// the existing relocation mechanism. For DWARF inlined routine DIEs, +// however, the subprogram DIE has to refer to a child +// parameter/variable DIE of the abstract subprogram. This child DIE +// doesn't have an LSym, and also of interest is the fact that when +// DWARF generation is happening for inlined function F within caller +// G, it's possible that DWARF generation hasn't happened yet for F, +// so there is no way to know the offset of a child DIE within F's +// abstract function. Making matters more complex, each inlined +// instance of F may refer to a subset of the original F's variables +// (depending on what happens with optimization, some vars may be +// eliminated). +// +// The fixup table below helps overcome this hurdle. At the point +// where a parameter/variable reference is made (via a call to +// "ReferenceChildDIE"), a fixup record is generate that records +// the relocation that is targeting that child variable. At a later +// point when the abstract function DIE is emitted, there will be +// a call to "RegisterChildDIEOffsets", at which point the offsets +// needed to apply fixups are captured. Finally, once the parallel +// portion of the compilation is done, fixups can actually be applied +// during the "Finalize" method (this can't be done during the +// parallel portion of the compile due to the possibility of data +// races). +// +// This table is also used to record the "precursor" function node for +// each function that is the target of an inline -- child DIE references +// have to be made with respect to the original pre-optimization +// version of the function (to allow for the fact that each inlined +// body may be optimized differently). +type DwarfFixupTable struct { + ctxt *Link + mu sync.Mutex + symtab map[*LSym]int // maps abstract fn LSYM to index in svec + svec []symFixups + precursor map[*LSym]fnState // maps fn Lsym to precursor Node, absfn sym +} + +type symFixups struct { + fixups []relFixup + doffsets []declOffset + inlIndex int32 + defseen bool +} + +type declOffset struct { + // Index of variable within DCL list of pre-optimization function + dclIdx int32 + // Offset of var's child DIE with respect to containing subprogram DIE + offset int32 +} + +type relFixup struct { + refsym *LSym + relidx int32 + dclidx int32 +} + +type fnState struct { + // precursor function (really *gc.Node) + precursor interface{} + // abstract function symbol + absfn *LSym +} + +func NewDwarfFixupTable(ctxt *Link) *DwarfFixupTable { + return &DwarfFixupTable{ + ctxt: ctxt, + symtab: make(map[*LSym]int), + precursor: make(map[*LSym]fnState), + } +} + +func (ft *DwarfFixupTable) GetPrecursorFunc(s *LSym) interface{} { + if fnstate, found := ft.precursor[s]; found { + return fnstate.precursor + } + return nil +} + +func (ft *DwarfFixupTable) SetPrecursorFunc(s *LSym, fn interface{}) { + if _, found := ft.precursor[s]; found { + ft.ctxt.Diag("internal error: DwarfFixupTable.SetPrecursorFunc double call on %v", s) + } + + // initialize abstract function symbol now. This is done here so + // as to avoid data races later on during the parallel portion of + // the back end. + absfn := ft.ctxt.LookupDerived(s, dwarf.InfoPrefix+s.Name+dwarf.AbstractFuncSuffix) + absfn.Set(AttrDuplicateOK, true) + absfn.Type = objabi.SDWARFINFO + ft.ctxt.Data = append(ft.ctxt.Data, absfn) + + ft.precursor[s] = fnState{precursor: fn, absfn: absfn} +} + +// Make a note of a child DIE reference: relocation 'ridx' within symbol 's' +// is targeting child 'c' of DIE with symbol 'tgt'. +func (ft *DwarfFixupTable) ReferenceChildDIE(s *LSym, ridx int, tgt *LSym, dclidx int, inlIndex int) { + // Protect against concurrent access if multiple backend workers + ft.mu.Lock() + defer ft.mu.Unlock() + + // Create entry for symbol if not already present. + idx, found := ft.symtab[tgt] + if !found { + ft.svec = append(ft.svec, symFixups{inlIndex: int32(inlIndex)}) + idx = len(ft.svec) - 1 + ft.symtab[tgt] = idx + } + + // Do we have child DIE offsets available? If so, then apply them, + // otherwise create a fixup record. + sf := &ft.svec[idx] + if len(sf.doffsets) > 0 { + found := false + for _, do := range sf.doffsets { + if do.dclIdx == int32(dclidx) { + off := do.offset + s.R[ridx].Add += int64(off) + found = true + break + } + } + if !found { + ft.ctxt.Diag("internal error: DwarfFixupTable.ReferenceChildDIE unable to locate child DIE offset for dclIdx=%d src=%v tgt=%v", dclidx, s, tgt) + } + } else { + sf.fixups = append(sf.fixups, relFixup{s, int32(ridx), int32(dclidx)}) + } +} + +// Called once DWARF generation is complete for a given abstract function, +// whose children might have been referenced via a call above. Stores +// the offsets for any child DIEs (vars, params) so that they can be +// consumed later in on DwarfFixupTable.Finalize, which applies any +// outstanding fixups. +func (ft *DwarfFixupTable) RegisterChildDIEOffsets(s *LSym, vars []*dwarf.Var, coffsets []int32) { + // Length of these two slices should agree + if len(vars) != len(coffsets) { + ft.ctxt.Diag("internal error: RegisterChildDIEOffsets vars/offsets length mismatch") + return + } + + // Generate the slice of declOffset's based in vars/coffsets + doffsets := make([]declOffset, len(coffsets)) + for i := 0; i < len(coffsets); i++ { + doffsets[i].dclIdx = vars[i].ChildIndex + doffsets[i].offset = coffsets[i] + } + + ft.mu.Lock() + defer ft.mu.Unlock() + + // Store offsets for this symbol. + idx, found := ft.symtab[s] + if !found { + sf := symFixups{inlIndex: -1, defseen: true, doffsets: doffsets} + ft.svec = append(ft.svec, sf) + ft.symtab[s] = len(ft.svec) - 1 + } else { + sf := &ft.svec[idx] + sf.doffsets = doffsets + sf.defseen = true + } +} + +func (ft *DwarfFixupTable) processFixups(slot int, s *LSym) { + sf := &ft.svec[slot] + for _, f := range sf.fixups { + dfound := false + for i := 0; i < len(sf.doffsets); i++ { + if sf.doffsets[i].dclIdx == f.dclidx { + f.refsym.R[f.relidx].Add += int64(sf.doffsets[i].offset) + dfound = true + break + } + } + if !dfound { + ft.ctxt.Diag("internal error: DwarfFixupTable has orphaned fixup on %v targeting %v relidx=%d dclidx=%d", f.refsym, s, f.relidx, f.dclidx) + } + } +} + +// return the LSym corresponding to the 'abstract subprogram' DWARF +// info entry for a function. +func (ft *DwarfFixupTable) AbsFuncDwarfSym(fnsym *LSym) *LSym { + // Protect against concurrent access if multiple backend workers + ft.mu.Lock() + defer ft.mu.Unlock() + + if fnstate, found := ft.precursor[fnsym]; found { + return fnstate.absfn + } + ft.ctxt.Diag("internal error: AbsFuncDwarfSym requested for %v, not seen during inlining", fnsym) + return nil +} + +// Called after all functions have been compiled; the main job of this +// function is to identify cases where there are outstanding fixups. +// This scenario crops up when we have references to variables of an +// inlined routine, but that routine is defined in some other package. +// This helper walks through and locate these fixups, then invokes a +// helper to create an abstract subprogram DIE for each one. +func (ft *DwarfFixupTable) Finalize(myimportpath string, trace bool) { + if trace { + ft.ctxt.Logf("DwarfFixupTable.Finalize invoked for %s\n", myimportpath) + } + + // Collect up the keys from the precursor map, then sort the + // resulting list (don't want to rely on map ordering here). + fns := make([]*LSym, len(ft.precursor)) + idx := 0 + for fn, _ := range ft.precursor { + fns[idx] = fn + idx++ + } + sort.Sort(bySymName(fns)) + + // Should not be called during parallel portion of compilation. + if ft.ctxt.InParallel { + ft.ctxt.Diag("internal error: DwarfFixupTable.Finalize call during parallel backend") + } + + // Generate any missing abstract functions. + for i := 0; i < len(fns); i++ { + s := fns[i] + absfn := ft.AbsFuncDwarfSym(s) + slot, found := ft.symtab[absfn] + if !found || !ft.svec[slot].defseen { + ft.ctxt.GenAbstractFunc(s) + } + } + + // Apply fixups. + for i := 0; i < len(fns); i++ { + s := fns[i] + absfn := ft.AbsFuncDwarfSym(s) + slot, found := ft.symtab[absfn] + if !found { + ft.ctxt.Diag("internal error: DwarfFixupTable.Finalize orphan abstract function for %v", s) + } else { + ft.processFixups(slot, s) + } + } +} + +type bySymName []*LSym + +func (s bySymName) Len() int { return len(s) } +func (s bySymName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s bySymName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/src/cmd/internal/obj/pass.go b/src/cmd/internal/obj/pass.go index 7cc187d01f8..edfc3acf794 100644 --- a/src/cmd/internal/obj/pass.go +++ b/src/cmd/internal/obj/pass.go @@ -124,8 +124,8 @@ func linkpatch(ctxt *Link, sym *LSym, newprog ProgAlloc) { for p := sym.Func.Text; p != nil; p = p.Link { checkaddr(ctxt, p, &p.From) - if p.From3 != nil { - checkaddr(ctxt, p, p.From3) + if p.GetFrom3() != nil { + checkaddr(ctxt, p, p.GetFrom3()) } checkaddr(ctxt, p, &p.To) diff --git a/src/cmd/internal/obj/pcln.go b/src/cmd/internal/obj/pcln.go index b85bb8aca21..d1d36cf685c 100644 --- a/src/cmd/internal/obj/pcln.go +++ b/src/cmd/internal/obj/pcln.go @@ -223,6 +223,7 @@ func pctospadj(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg in } if oldval+p.Spadj < -10000 || oldval+p.Spadj > 1100000000 { ctxt.Diag("overflow in spadj: %d + %d = %d", oldval, p.Spadj, oldval+p.Spadj) + ctxt.DiagFlush() log.Fatalf("bad code") } @@ -240,6 +241,7 @@ func pctopcdata(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg i } if int64(int32(p.To.Offset)) != p.To.Offset { ctxt.Diag("overflow in PCDATA instruction: %v", p) + ctxt.DiagFlush() log.Fatalf("bad code") } diff --git a/src/cmd/internal/obj/plist.go b/src/cmd/internal/obj/plist.go index 861da88703c..e2609da35da 100644 --- a/src/cmd/internal/obj/plist.go +++ b/src/cmd/internal/obj/plist.go @@ -19,7 +19,7 @@ type Plist struct { // It is used to provide access to cached/bulk-allocated Progs to the assemblers. type ProgAlloc func() *Prog -func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc) { +func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc, myimportpath string) { // Build list of symbols, and assign instructions to lists. var curtext *LSym var etext *Prog @@ -106,7 +106,7 @@ func Flushplist(ctxt *Link, plist *Plist, newprog ProgAlloc) { ctxt.Arch.Preprocess(ctxt, s, newprog) ctxt.Arch.Assemble(ctxt, s, newprog) linkpcln(ctxt, s) - ctxt.populateDWARF(plist.Curfn, s) + ctxt.populateDWARF(plist.Curfn, s, myimportpath) } } @@ -136,13 +136,17 @@ func (ctxt *Link) InitTextSym(s *LSym, flag int) { ctxt.Text = append(ctxt.Text, s) // Set up DWARF entries for s. - dsym, drsym := ctxt.dwarfSym(s) - dsym.Type = objabi.SDWARFINFO - dsym.Set(AttrDuplicateOK, s.DuplicateOK()) - drsym.Type = objabi.SDWARFRANGE - drsym.Set(AttrDuplicateOK, s.DuplicateOK()) - ctxt.Data = append(ctxt.Data, dsym) - ctxt.Data = append(ctxt.Data, drsym) + info, loc, ranges, _ := ctxt.dwarfSym(s) + info.Type = objabi.SDWARFINFO + info.Set(AttrDuplicateOK, s.DuplicateOK()) + if loc != nil { + loc.Type = objabi.SDWARFLOC + loc.Set(AttrDuplicateOK, s.DuplicateOK()) + ctxt.Data = append(ctxt.Data, loc) + } + ranges.Type = objabi.SDWARFRANGE + ranges.Set(AttrDuplicateOK, s.DuplicateOK()) + ctxt.Data = append(ctxt.Data, info, ranges) // Set up the function's gcargs and gclocals. // They will be filled in later if needed. diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go index 90a204745b0..e684281774e 100644 --- a/src/cmd/internal/obj/ppc64/a.out.go +++ b/src/cmd/internal/obj/ppc64/a.out.go @@ -396,6 +396,7 @@ const ( AADDZECC AADDZEVCC AADDZEV + AADDEX AAND AANDCC AANDN @@ -412,6 +413,7 @@ const ( ABVS // Unordered-set ACMP ACMPU + ACMPEQB ACNTLZW ACNTLZWCC ACRAND @@ -602,6 +604,8 @@ const ( ARFCI + AFCPSGN + AFCPSGNCC /* optional on 32-bit */ AFRES AFRESCC @@ -648,6 +652,8 @@ const ( AFCFIDCC AFCFIDU AFCFIDUCC + AFCFIDS + AFCFIDSCC AFCTID AFCTIDCC AFCTIDZ @@ -712,6 +718,13 @@ const ( APOPCNTD APOPCNTW APOPCNTB + ACOPY + APASTECC + ADARN + ALDMX + AMADDHD + AMADDHDU + AMADDLD /* Vector */ ALV @@ -781,6 +794,7 @@ const ( AVPMSUMH AVPMSUMW AVPMSUMD + AVMSUMUDM AVR AVRLB AVRLH @@ -842,7 +856,11 @@ const ( AVCMPGTSWCC AVCMPGTSD AVCMPGTSDCC + AVCMPNEZB + AVCMPNEZBCC AVPERM + AVBPERMQ + AVBPERMD AVSEL AVSPLT AVSPLTB @@ -885,12 +903,15 @@ const ( AMFFPRD AMFVRD AMFVSRWZ + AMFVSRLD AMTVSR AMTVSRD AMTFPRD AMTVRD AMTVSRWA AMTVSRWZ + AMTVSRDD + AMTVSRWS AXXLAND AXXLANDQ AXXLANDC diff --git a/src/cmd/internal/obj/ppc64/anames.go b/src/cmd/internal/obj/ppc64/anames.go index 5ca29454a60..b7ca1330573 100644 --- a/src/cmd/internal/obj/ppc64/anames.go +++ b/src/cmd/internal/obj/ppc64/anames.go @@ -26,6 +26,7 @@ var Anames = []string{ "ADDZECC", "ADDZEVCC", "ADDZEV", + "ADDEX", "AND", "ANDCC", "ANDN", @@ -42,6 +43,7 @@ var Anames = []string{ "BVS", "CMP", "CMPU", + "CMPEQB", "CNTLZW", "CNTLZWCC", "CRAND", @@ -228,6 +230,8 @@ var Anames = []string{ "SYSCALL", "WORD", "RFCI", + "FCPSGN", + "FCPSGNCC", "FRES", "FRESCC", "FRIM", @@ -269,6 +273,8 @@ var Anames = []string{ "FCFIDCC", "FCFIDU", "FCFIDUCC", + "FCFIDS", + "FCFIDSCC", "FCTID", "FCTIDCC", "FCTIDZ", @@ -329,6 +335,13 @@ var Anames = []string{ "POPCNTD", "POPCNTW", "POPCNTB", + "COPY", + "PASTECC", + "DARN", + "LDMX", + "MADDHD", + "MADDHDU", + "MADDLD", "LV", "LVEBX", "LVEHX", @@ -396,6 +409,7 @@ var Anames = []string{ "VPMSUMH", "VPMSUMW", "VPMSUMD", + "VMSUMUDM", "VR", "VRLB", "VRLH", @@ -457,7 +471,11 @@ var Anames = []string{ "VCMPGTSWCC", "VCMPGTSD", "VCMPGTSDCC", + "VCMPNEZB", + "VCMPNEZBCC", "VPERM", + "VBPERMQ", + "VBPERMD", "VSEL", "VSPLT", "VSPLTB", @@ -498,12 +516,15 @@ var Anames = []string{ "MFFPRD", "MFVRD", "MFVSRWZ", + "MFVSRLD", "MTVSR", "MTVSRD", "MTFPRD", "MTVRD", "MTVSRWA", "MTVSRWZ", + "MTVSRDD", + "MTVSRWS", "XXLAND", "XXLANDQ", "XXLANDC", diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 4d787b1c352..3938cebef62 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -362,8 +362,14 @@ var optab = []Optab{ /* Other ISA 2.05+ instructions */ {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */ {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */ + {ACMPEQB, C_REG, C_REG, C_NONE, C_CREG, 92, 4, 0}, /* compare equal byte, x-form */ {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */ {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */ + {ACOPY, C_REG, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* copy/paste facility, x-form */ + {ADARN, C_SCON, C_NONE, C_NONE, C_REG, 92, 4, 0}, /* deliver random number, x-form */ + {ALDMX, C_SOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, /* load doubleword monitored, x-form */ + {AMADDHD, C_REG, C_REG, C_REG, C_REG, 83, 4, 0}, /* multiply-add high/low doubleword, va-form */ + {AADDEX, C_REG, C_REG, C_SCON, C_REG, 94, 4, 0}, /* add extended using alternate carry, z23-form */ /* Vector instructions */ @@ -392,7 +398,8 @@ var optab = []Optab{ {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */ /* Vector multiply */ - {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */ + {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */ + {AVMSUMUDM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector multiply-sum, va-form */ /* Vector rotate */ {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */ @@ -407,12 +414,16 @@ var optab = []Optab{ {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */ /* Vector compare */ - {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */ - {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */ + {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */ + {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */ + {AVCMPNEZB, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare not equal, vx-form */ /* Vector permute */ {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */ + /* Vector bit permute */ + {AVBPERMQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector bit permute, vx-form */ + /* Vector select */ {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */ @@ -455,6 +466,7 @@ var optab = []Optab{ /* VSX move to VSR */ {AMTVSR, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */ + {AMTVSR, C_REG, C_REG, C_NONE, C_VSREG, 88, 4, 0}, {AMTVSR, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0}, {AMTVSR, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0}, @@ -537,7 +549,9 @@ var optab = []Optab{ {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0}, {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0}, {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0}, - {ADCBF, C_ZOREG, C_REG, C_NONE, C_NONE, 43, 4, 0}, + {ADCBF, C_SOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0}, + {ADCBF, C_ZOREG, C_REG, C_NONE, C_SCON, 43, 4, 0}, + {ADCBF, C_SOREG, C_NONE, C_NONE, C_SCON, 43, 4, 0}, {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0}, {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0}, {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, @@ -758,11 +772,6 @@ func (c *ctxt9) aclass(a *obj.Addr) int { return C_GOTADDR case obj.NAME_AUTO: - if a.Reg == REGSP { - // unset base register for better printing, since - // a.Offset is still relative to pseudo-SP. - a.Reg = obj.REG_NONE - } c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO @@ -770,11 +779,6 @@ func (c *ctxt9) aclass(a *obj.Addr) int { return C_LAUTO case obj.NAME_PARAM: - if a.Reg == REGSP { - // unset base register for better printing, since - // a.Offset is still relative to pseudo-FP. - a.Reg = obj.REG_NONE - } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() if c.instoffset >= -BIG && c.instoffset < BIG { return C_SAUTO @@ -827,11 +831,6 @@ func (c *ctxt9) aclass(a *obj.Addr) int { return C_LCON case obj.NAME_AUTO: - if a.Reg == REGSP { - // unset base register for better printing, since - // a.Offset is still relative to pseudo-SP. - a.Reg = obj.REG_NONE - } c.instoffset = int64(c.autosize) + a.Offset if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON @@ -839,11 +838,6 @@ func (c *ctxt9) aclass(a *obj.Addr) int { return C_LACON case obj.NAME_PARAM: - if a.Reg == REGSP { - // unset base register for better printing, since - // a.Offset is still relative to pseudo-FP. - a.Reg = obj.REG_NONE - } c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() if c.instoffset >= -BIG && c.instoffset < BIG { return C_SACON @@ -911,11 +905,11 @@ func (c *ctxt9) oplook(p *obj.Prog) *Optab { a1-- a3 := C_NONE + 1 - if p.From3 != nil { - a3 = int(p.From3.Class) + if p.GetFrom3() != nil { + a3 = int(p.GetFrom3().Class) if a3 == 0 { - a3 = c.aclass(p.From3) + 1 - p.From3.Class = int8(a3) + a3 = c.aclass(p.GetFrom3()) + 1 + p.GetFrom3().Class = int8(a3) } } @@ -1208,9 +1202,15 @@ func buildop(ctxt *obj.Link) { opset(APOPCNTW, r0) opset(APOPCNTB, r0) + case ACOPY: /* copy, paste. */ + opset(APASTECC, r0) + + case AMADDHD: /* maddhd, maddhdu, maddld */ + opset(AMADDHDU, r0) + opset(AMADDLD, r0) + case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */ opset(AMOVH, r0) - opset(AMOVHZ, r0) case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */ @@ -1375,9 +1375,15 @@ func buildop(ctxt *obj.Link) { opset(AVCMPGTSD, r0) opset(AVCMPGTSDCC, r0) + case AVCMPNEZB: /* vcmpnezb[.] */ + opset(AVCMPNEZBCC, r0) + case AVPERM: /* vperm */ opset(AVPERM, r0) + case AVBPERMQ: /* vbpermq, vbpermd */ + opset(AVBPERMD, r0) + case AVSEL: /* vsel */ opset(AVSEL, r0) @@ -1428,18 +1434,21 @@ func buildop(ctxt *obj.Link) { case ASTXSI: /* stxsiwx */ opset(ASTXSIWX, r0) - case AMFVSR: /* mfvsrd, mfvsrwz (and extended mnemonics) */ + case AMFVSR: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */ opset(AMFVSRD, r0) opset(AMFFPRD, r0) opset(AMFVRD, r0) opset(AMFVSRWZ, r0) + opset(AMFVSRLD, r0) - case AMTVSR: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics) */ + case AMTVSR: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */ opset(AMTVSRD, r0) opset(AMTFPRD, r0) opset(AMTVRD, r0) opset(AMTVSRWA, r0) opset(AMTVSRWZ, r0) + opset(AMTVSRDD, r0) + opset(AMTVSRWS, r0) case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */ opset(AXXLANDQ, r0) @@ -1595,6 +1604,8 @@ func buildop(ctxt *obj.Link) { opset(AFCFIDCC, r0) opset(AFCFIDU, r0) opset(AFCFIDUCC, r0) + opset(AFCFIDS, r0) + opset(AFCFIDSCC, r0) opset(AFRES, r0) opset(AFRESCC, r0) opset(AFRIM, r0) @@ -1614,6 +1625,8 @@ func buildop(ctxt *obj.Link) { opset(AFADDS, r0) opset(AFADDCC, r0) opset(AFADDSCC, r0) + opset(AFCPSGN, r0) + opset(AFCPSGNCC, r0) opset(AFDIV, r0) opset(AFDIVS, r0) opset(AFDIVCC, r0) @@ -1797,6 +1810,11 @@ func buildop(ctxt *obj.Link) { ASLBMTE, AWORD, ADWORD, + ADARN, + ALDMX, + AVMSUMUDM, + AADDEX, + ACMPEQB, obj.ANOP, obj.ATEXT, obj.AUNDEF, @@ -1924,6 +1942,11 @@ func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 } +/* Z23-form, 3-register operands + CY field */ +func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<7 +} + func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 { return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11 } @@ -2448,7 +2471,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { if r == 0 { r = int(p.To.Reg) } - d := c.vregoff(p.From3) + d := c.vregoff(p.GetFrom3()) var a int switch p.As { @@ -2701,7 +2724,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v)) case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */ - v := c.regoff(p.From3) + v := c.regoff(p.GetFrom3()) r := int(p.From.Reg) o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) @@ -2710,7 +2733,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { if p.To.Reg == REGTMP || p.From.Reg == REGTMP { c.ctxt.Diag("can't synthesize large constant\n%v", p) } - v := c.regoff(p.From3) + v := c.regoff(p.GetFrom3()) o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16) o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v)) o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP) @@ -2723,7 +2746,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */ v := c.regoff(&p.From) - d := c.vregoff(p.From3) + d := c.vregoff(p.GetFrom3()) var mask [2]uint8 c.maskgen64(p, mask[:], uint64(d)) var a int @@ -2763,7 +2786,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 30: /* rldimi $sh,s,$mask,a */ v := c.regoff(&p.From) - d := c.vregoff(p.From3) + d := c.vregoff(p.GetFrom3()) // Original opcodes had mask operands which had to be converted to a shift count as expected by // the ppc64 asm. @@ -2834,7 +2857,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r)) case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */ - o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6 + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */ v := c.regoff(&p.To) @@ -2876,13 +2899,28 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = uint32(c.regoff(&p.From)) case 41: /* stswi */ - o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.From3))&0x7F)<<11 + o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 case 42: /* lswi */ - o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.From3))&0x7F)<<11 + o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 - case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */ - o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg)) + case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */ + /* TH field for dcbt/dcbtst: */ + /* 0 = Block access - program will soon access EA. */ + /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */ + /* 16 = Block access - program will soon make a transient access to EA. */ + /* 17 = Block access - program will not access EA for a long time. */ + + /* L field for dcbf: */ + /* 0 = invalidates the block containing EA in all processors. */ + /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */ + /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */ + if p.To.Type == obj.TYPE_NONE { + o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg)) + } else { + th := c.regoff(&p.To) + o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg)) + } case 44: /* indexed store */ o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) @@ -3049,13 +3087,13 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { v := c.regoff(&p.From) var mask [2]uint8 - c.maskgen(p, mask[:], uint32(c.regoff(p.From3))) + c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v)) o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 case 63: /* rlwmi b,s,$mask,a */ var mask [2]uint8 - c.maskgen(p, mask[:], uint32(c.regoff(p.From3))) + c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg)) o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 @@ -3063,7 +3101,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 64: /* mtfsf fr[, $m] {,fpcsr} */ var v int32 if p.From3Type() != obj.TYPE_NONE { - v = c.regoff(p.From3) & 255 + v = c.regoff(p.GetFrom3()) & 255 } else { v = 255 } @@ -3118,7 +3156,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { if p.To.Reg != 0 { c.ctxt.Diag("can't use both mask and CR(n)\n%v", p) } - v = c.regoff(p.From3) & 0xff + v = c.regoff(p.GetFrom3()) & 0xff } else { if p.To.Reg == 0 { v = 0xff /* CR */ @@ -3257,7 +3295,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { /* imm imm reg reg */ /* operand order: SIX, VRA, ST, VRT */ six := int(c.regoff(&p.From)) - st := int(c.regoff(p.From3)) + st := int(c.regoff(p.GetFrom3())) o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six)) } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 { /* imm reg reg */ @@ -3275,19 +3313,19 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { if p.From.Type == obj.TYPE_REG { /* reg reg reg reg */ /* 4-register operand order: VRA, VRB, VRC, VRT */ - o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.From3.Reg)) + o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) } else if p.From.Type == obj.TYPE_CONST { /* imm reg reg reg */ /* operand order: SHB, VRA, VRB, VRT */ shb := int(c.regoff(&p.From)) - o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From3.Reg), uint32(shb)) + o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb)) } case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc bc := c.vregoff(&p.From) // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg - o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.From3.Reg), uint32(bc)) + o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc)) case 85: /* vector instructions, VX-form */ /* reg none reg */ @@ -3335,7 +3373,7 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { case 89: /* VSX instructions, XX2-form */ /* reg none reg OR reg imm reg */ /* 2-register operand order: XB, XT or XB, UIM, XT*/ - uim := int(c.regoff(p.From3)) + uim := int(c.regoff(p.GetFrom3())) o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg)) case 90: /* VSX instructions, XX3-form */ @@ -3346,25 +3384,55 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { } else if p.From3Type() == obj.TYPE_CONST { /* reg reg reg imm */ /* operand order: XA, XB, DM, XT */ - dm := int(c.regoff(p.From3)) + dm := int(c.regoff(p.GetFrom3())) o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm)) } case 91: /* VSX instructions, XX4-form */ /* reg reg reg reg */ /* 3-register operand order: XA, XB, XC, XT */ - o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.From3.Reg)) + o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) case 92: /* X-form instructions, 3-operands */ if p.To.Type == obj.TYPE_CONST { /* imm reg reg */ - /* operand order: FRA, FRB, BF */ - bf := int(c.regoff(&p.To)) << 2 - o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) + xf := int32(p.From.Reg) + if REG_F0 <= xf && xf <= REG_F31 { + /* operand order: FRA, FRB, BF */ + bf := int(c.regoff(&p.To)) << 2 + o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) + } else { + /* operand order: RA, RB, L */ + l := int(c.regoff(&p.To)) + o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg)) + } + } else if p.From3Type() == obj.TYPE_CONST { + /* reg reg imm */ + /* operand order: RB, L, RA */ + l := int(c.regoff(p.GetFrom3())) + o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg)) } else if p.To.Type == obj.TYPE_REG { - /* reg reg reg */ - /* operand order: RS, RB, RA */ - o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) + cr := int32(p.To.Reg) + if REG_CR0 <= cr && cr <= REG_CR7 { + /* cr reg reg */ + /* operand order: RA, RB, BF */ + bf := (int(p.To.Reg) & 7) << 2 + o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) + } else if p.From.Type == obj.TYPE_CONST { + /* reg imm */ + /* operand order: L, RT */ + l := int(c.regoff(&p.From)) + o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg)) + } else { + switch p.As { + case ACOPY, APASTECC: + o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg)) + default: + /* reg reg reg */ + /* operand order: RS, RB, RA */ + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) + } + } } case 93: /* X-form instructions, 2-operands */ @@ -3379,6 +3447,11 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) } + case 94: /* Z23-form instructions, 4-operands */ + /* reg reg reg imm */ + /* operand order: RA, RB, CY, RT */ + cy := int(c.regoff(p.GetFrom3())) + o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy)) } out[0] = o1 @@ -3443,6 +3516,8 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { return OPVCC(31, 202, 1, 0) case AADDZEVCC: return OPVCC(31, 202, 1, 1) + case AADDEX: + return OPVCC(31, 170, 0, 0) /* addex - v3.0b */ case AAND: return OPVCC(31, 28, 0, 0) @@ -3608,6 +3683,10 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { return OPVCC(63, 974, 0, 0) case AFCFIDUCC: return OPVCC(63, 974, 0, 1) + case AFCFIDS: + return OPVCC(59, 846, 0, 0) + case AFCFIDSCC: + return OPVCC(59, 846, 0, 1) case AFCTIW: return OPVCC(63, 14, 0, 0) case AFCTIWCC: @@ -3685,6 +3764,10 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { return OPVCC(59, 30, 0, 0) case AFNMSUBSCC: return OPVCC(59, 30, 0, 1) + case AFCPSGN: + return OPVCC(63, 8, 0, 0) + case AFCPSGNCC: + return OPVCC(63, 8, 0, 1) case AFRES: return OPVCC(59, 24, 0, 0) case AFRESCC: @@ -4008,6 +4091,9 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { case AVPMSUMD: return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */ + case AVMSUMUDM: + return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */ + case AVSUBUBM: return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */ case AVSUBUHM: @@ -4086,6 +4172,11 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { case AVSRAD: return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */ + case AVBPERMQ: + return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */ + case AVBPERMD: + return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */ + case AVCLZB: return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */ case AVCLZH: @@ -4154,6 +4245,11 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { case AVCMPGTSDCC: return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */ + case AVCMPNEZB: + return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */ + case AVCMPNEZBCC: + return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */ + case AVPERM: return OPVX(4, 43, 0, 0) /* vperm - v2.03 */ @@ -4178,6 +4274,8 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */ case AMFVSRWZ: return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */ + case AMFVSRLD: + return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */ case AMTVSRD, AMTFPRD, AMTVRD: return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */ @@ -4185,6 +4283,10 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */ case AMTVSRWZ: return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */ + case AMTVSRDD: + return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */ + case AMTVSRWS: + return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */ case AXXLANDQ: return OPVXX3(60, 130, 0) /* xxland - v2.06 */ @@ -4288,6 +4390,13 @@ func (c *ctxt9) oprrr(a obj.As) uint32 { return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */ /* End of VSX instructions */ + case AMADDHD: + return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */ + case AMADDHDU: + return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */ + case AMADDLD: + return OPVX(4, 51, 0, 0) /* maddld - v3.00 */ + case AXOR: return OPVCC(31, 316, 0, 0) case AXORCC: @@ -4379,9 +4488,19 @@ func (c *ctxt9) opirr(a obj.As) uint32 { return OPVCC(11, 0, 0, 0) /* L=0 */ case ACMPWU: return OPVCC(10, 0, 0, 0) + case ACMPEQB: + return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ + case ALSW: return OPVCC(31, 597, 0, 0) + case ACOPY: + return OPVCC(31, 774, 0, 0) /* copy - v3.00 */ + case APASTECC: + return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */ + case ADARN: + return OPVCC(31, 755, 0, 0) /* darn - v3.00 */ + case AMULLW: return OPVCC(7, 0, 0, 0) @@ -4579,6 +4698,8 @@ func (c *ctxt9) oploadx(a obj.As) uint32 { return OPVCC(31, 21, 0, 0) /* ldx */ case AMOVDU: return OPVCC(31, 53, 0, 0) /* ldux */ + case ALDMX: + return OPVCC(31, 309, 0, 0) /* ldmx */ /* Vector (VMX/Altivec) instructions */ /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go index b1509e3813b..c50cd3b06c8 100644 --- a/src/cmd/internal/obj/ppc64/obj9.go +++ b/src/cmd/internal/obj/ppc64/obj9.go @@ -176,7 +176,7 @@ func (c *ctxt9) rewriteToUseGot(p *obj.Prog) { p.From.Offset = 0 } } - if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { + if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr diff --git a/src/cmd/internal/obj/s390x/a.out.go b/src/cmd/internal/obj/s390x/a.out.go index 9d5b34a0b15..54c1edb4b0c 100644 --- a/src/cmd/internal/obj/s390x/a.out.go +++ b/src/cmd/internal/obj/s390x/a.out.go @@ -283,12 +283,15 @@ const ( AFNEGS ALEDBR ALDEBR + ALPDFR + ALNDFR AFSUB AFSUBS AFSQRT AFSQRTS AFIEBR AFIDBR + ACPSDR // move from GPR to FPR and vice versa ALDGR @@ -324,6 +327,12 @@ const ( ACMPW ACMPWU + // test under mask + ATMHH + ATMHL + ATMLH + ATMLL + // compare and swap ACS ACSG diff --git a/src/cmd/internal/obj/s390x/anames.go b/src/cmd/internal/obj/s390x/anames.go index 42a0222b57c..8488ccc46e3 100644 --- a/src/cmd/internal/obj/s390x/anames.go +++ b/src/cmd/internal/obj/s390x/anames.go @@ -81,12 +81,15 @@ var Anames = []string{ "FNEGS", "LEDBR", "LDEBR", + "LPDFR", + "LNDFR", "FSUB", "FSUBS", "FSQRT", "FSQRTS", "FIEBR", "FIDBR", + "CPSDR", "LDGR", "LGDR", "CEFBRA", @@ -109,6 +112,10 @@ var Anames = []string{ "CMPU", "CMPW", "CMPWU", + "TMHH", + "TMHL", + "TMLH", + "TMLL", "CS", "CSG", "SYNC", diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index 6d2b870f0ac..7304ab3cc45 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -212,6 +212,7 @@ var optab = []Optab{ Optab{ACEFBRA, C_REG, C_NONE, C_NONE, C_FREG, 82, 0}, Optab{ACFEBRA, C_FREG, C_NONE, C_NONE, C_REG, 83, 0}, Optab{AFIEBR, C_SCON, C_FREG, C_NONE, C_FREG, 48, 0}, + Optab{ACPSDR, C_FREG, C_FREG, C_NONE, C_FREG, 49, 0}, // load symbol address (plus offset) Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_REG, 19, 0}, @@ -251,6 +252,9 @@ var optab = []Optab{ Optab{AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 0}, Optab{AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 0}, + // test under mask + Optab{ATMHH, C_REG, C_NONE, C_NONE, C_ANDCON, 91, 0}, + // 32-bit access registers Optab{AMOVW, C_AREG, C_NONE, C_NONE, C_REG, 68, 0}, Optab{AMOVWZ, C_AREG, C_NONE, C_NONE, C_REG, 68, 0}, @@ -654,11 +658,11 @@ func (c *ctxtz) oplook(p *obj.Prog) *Optab { a1-- a3 := C_NONE + 1 - if p.From3 != nil { - a3 = int(p.From3.Class) + if p.GetFrom3() != nil { + a3 = int(p.GetFrom3().Class) if a3 == 0 { - a3 = c.aclass(p.From3) + 1 - p.From3.Class = int8(a3) + a3 = c.aclass(p.GetFrom3()) + 1 + p.GetFrom3().Class = int8(a3) } } @@ -897,6 +901,8 @@ func buildop(ctxt *obj.Link) { opset(ABCL, r) case AFABS: opset(AFNABS, r) + opset(ALPDFR, r) + opset(ALNDFR, r) opset(AFNEG, r) opset(AFNEGS, r) opset(ALEDBR, r) @@ -948,6 +954,10 @@ func buildop(ctxt *obj.Link) { opset(ACMPW, r) case ACMPU: opset(ACMPWU, r) + case ATMHH: + opset(ATMHL, r) + opset(ATMLH, r) + opset(ATMLL, r) case ACEFBRA: opset(ACDFBRA, r) opset(ACEGBRA, r) @@ -3182,6 +3192,10 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { opcode = op_LPDBR case AFNABS: opcode = op_LNDBR + case ALPDFR: + opcode = op_LPDFR + case ALNDFR: + opcode = op_LNDFR case AFNEG: opcode = op_LCDFR case AFNEGS: @@ -3281,6 +3295,9 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { } zRRF(opcode, uint32(m3), 0, uint32(p.To.Reg), uint32(p.Reg), asm) + case 49: // copysign + zRRF(op_CPSDR, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(p.Reg), asm) + case 67: // fmov $0 freg var opcode uint32 switch p.As { @@ -3534,11 +3551,11 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { if l < 1 || l > 256 { c.ctxt.Diag("number of bytes (%v) not in range [1,256]", l) } - if p.From3.Index != 0 || p.To.Index != 0 { + if p.GetFrom3().Index != 0 || p.To.Index != 0 { c.ctxt.Diag("cannot use index reg") } b1 := p.To.Reg - b2 := p.From3.Reg + b2 := p.GetFrom3().Reg if b1 == 0 { b1 = o.param } @@ -3546,7 +3563,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { b2 = o.param } d1 := c.regoff(&p.To) - d2 := c.regoff(p.From3) + d2 := c.regoff(p.GetFrom3()) if d1 < 0 || d1 >= DISP12 { if b2 == REGTMP { c.ctxt.Diag("REGTMP conflict") @@ -3688,12 +3705,26 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { } mask := c.branchMask(p) if int32(int16(v)) != v { - zRIL(_a, opcode2, uint32(p.From.Reg), uint32(c.regoff(p.From3)), asm) + zRIL(_a, opcode2, uint32(p.From.Reg), uint32(c.regoff(p.GetFrom3())), asm) zRIL(_c, op_BRCL, mask, uint32(v-sizeRIL/2), asm) } else { - zRIE(_c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(c.regoff(p.From3)), asm) + zRIE(_c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(c.regoff(p.GetFrom3())), asm) } + case 91: // test under mask (immediate) + var opcode uint32 + switch p.As { + case ATMHH: + opcode = op_TMHH + case ATMHL: + opcode = op_TMHL + case ATMLH: + opcode = op_TMLH + case ATMLL: + opcode = op_TMLL + } + zRI(opcode, uint32(p.From.Reg), uint32(c.vregoff(&p.To)), asm) + case 93: // GOT lookup v := c.vregoff(&p.To) if v != 0 { @@ -3893,9 +3924,9 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 101: // VRX LOAD op, m3, _ := vop(p.As) src := &p.From - if p.From3 != nil { + if p.GetFrom3() != nil { m3 = uint32(c.vregoff(&p.From)) - src = p.From3 + src = p.GetFrom3() } b2 := src.Reg if b2 == 0 { @@ -3917,12 +3948,12 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 103: // VRV GATHER op, _, _ := vop(p.As) m3 := uint32(c.vregoff(&p.From)) - b2 := p.From3.Reg + b2 := p.GetFrom3().Reg if b2 == 0 { b2 = o.param } - d2 := uint32(c.vregoff(p.From3)) - zVRV(op, uint32(p.To.Reg), uint32(p.From3.Index), uint32(b2), d2, m3, asm) + d2 := uint32(c.vregoff(p.GetFrom3())) + zVRV(op, uint32(p.To.Reg), uint32(p.GetFrom3().Index), uint32(b2), d2, m3, asm) case 104: // VRS SHIFT/ROTATE and LOAD GR FROM VR ELEMENT op, m4, _ := vop(p.As) @@ -3962,8 +3993,8 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 108: // VRS LOAD WITH LENGTH op, _, _ := vop(p.As) - offset := uint32(c.vregoff(p.From3)) - reg := p.From3.Reg + offset := uint32(c.vregoff(p.GetFrom3())) + reg := p.GetFrom3().Reg if reg == 0 { reg = o.param } @@ -3972,9 +4003,9 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 109: // VRI-a op, m3, _ := vop(p.As) i2 := uint32(c.vregoff(&p.From)) - if p.From3 != nil { + if p.GetFrom3() != nil { m3 = uint32(c.vregoff(&p.From)) - i2 = uint32(c.vregoff(p.From3)) + i2 = uint32(c.vregoff(p.GetFrom3())) } switch p.As { case AVZERO: @@ -3987,7 +4018,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 110: op, m4, _ := vop(p.As) i2 := uint32(c.vregoff(&p.From)) - i3 := uint32(c.vregoff(p.From3)) + i3 := uint32(c.vregoff(p.GetFrom3())) zVRIb(op, uint32(p.To.Reg), i2, i3, m4, asm) case 111: @@ -3998,7 +4029,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 112: op, m5, _ := vop(p.As) i4 := uint32(c.vregoff(&p.From)) - zVRId(op, uint32(p.To.Reg), uint32(p.Reg), uint32(p.From3.Reg), i4, m5, asm) + zVRId(op, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), i4, m5, asm) case 113: op, m4, _ := vop(p.As) @@ -4044,7 +4075,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { v1 := uint32(p.To.Reg) v2 := uint32(p.From.Reg) v3 := uint32(p.Reg) - v4 := uint32(p.From3.Reg) + v4 := uint32(p.GetFrom3().Reg) zVRRd(op, v1, v2, v3, m6, m5, v4, asm) case 121: // VRR-e @@ -4053,7 +4084,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { v1 := uint32(p.To.Reg) v2 := uint32(p.From.Reg) v3 := uint32(p.Reg) - v4 := uint32(p.From3.Reg) + v4 := uint32(p.GetFrom3().Reg) zVRRe(op, v1, v2, v3, m6, m5, v4, asm) case 122: // VRR-f LOAD VRS FROM GRS DISJOINT @@ -4063,7 +4094,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { case 123: // VPDI $m4, V2, V3, V1 op, _, _ := vop(p.As) m4 := c.regoff(&p.From) - zVRRc(op, uint32(p.To.Reg), uint32(p.Reg), uint32(p.From3.Reg), 0, 0, uint32(m4), asm) + zVRRc(op, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), 0, 0, uint32(m4), asm) } } diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go index 3d3571a4611..45ce68bebf2 100644 --- a/src/cmd/internal/obj/s390x/objz.go +++ b/src/cmd/internal/obj/s390x/objz.go @@ -122,9 +122,11 @@ func (c *ctxtz) rewriteToUseGot(p *obj.Prog) { // We only care about global data: NAME_EXTERN means a global // symbol in the Go sense, and p.Sym.Local is true for a few // internally defined symbols. + // Rewrites must not clobber flags and therefore cannot use the + // ADD instruction. if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { // MOVD $sym, Rx becomes MOVD sym@GOT, Rx - // MOVD $sym+, Rx becomes MOVD sym@GOT, Rx; ADD , Rx + // MOVD $sym+, Rx becomes MOVD sym@GOT, Rx or REGTMP2; MOVD $(Rx or REGTMP2), Rx if p.To.Type != obj.TYPE_REG || p.As != AMOVD { c.ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) } @@ -132,20 +134,28 @@ func (c *ctxtz) rewriteToUseGot(p *obj.Prog) { p.From.Name = obj.NAME_GOTREF q := p if p.From.Offset != 0 { - q = obj.Appendp(p, c.newprog) - q.As = AADD - q.From.Type = obj.TYPE_CONST + target := p.To.Reg + if target == REG_R0 { + // Cannot use R0 as input to address calculation. + // REGTMP might be used by the assembler. + p.To.Reg = REGTMP2 + } + q = obj.Appendp(q, c.newprog) + q.As = AMOVD + q.From.Type = obj.TYPE_ADDR q.From.Offset = p.From.Offset - q.To = p.To + q.From.Reg = p.To.Reg + q.To.Type = obj.TYPE_REG + q.To.Reg = target p.From.Offset = 0 } } - if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { + if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { c.ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr - // MOVD sym, Ry becomes MOVD sym@GOT, REGTMP; MOVD (REGTMP), Ry - // MOVD Ry, sym becomes MOVD sym@GOT, REGTMP; MOVD Ry, (REGTMP) + // MOVD sym, Ry becomes MOVD sym@GOT, REGTMP2; MOVD (REGTMP2), Ry + // MOVD Ry, sym becomes MOVD sym@GOT, REGTMP2; MOVD Ry, (REGTMP2) // An addition may be inserted between the two MOVs if there is an offset. if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { @@ -174,17 +184,17 @@ func (c *ctxtz) rewriteToUseGot(p *obj.Prog) { p1.From.Sym = source.Sym p1.From.Name = obj.NAME_GOTREF p1.To.Type = obj.TYPE_REG - p1.To.Reg = REGTMP + p1.To.Reg = REGTMP2 p2.As = p.As p2.From = p.From p2.To = p.To if p.From.Name == obj.NAME_EXTERN { - p2.From.Reg = REGTMP + p2.From.Reg = REGTMP2 p2.From.Name = obj.NAME_NONE p2.From.Sym = nil } else if p.To.Name == obj.NAME_EXTERN { - p2.To.Reg = REGTMP + p2.To.Reg = REGTMP2 p2.To.Name = obj.NAME_NONE p2.To.Sym = nil } else { diff --git a/src/cmd/internal/obj/sizeof_test.go b/src/cmd/internal/obj/sizeof_test.go index 601e9e2ea64..e70d174637d 100644 --- a/src/cmd/internal/obj/sizeof_test.go +++ b/src/cmd/internal/obj/sizeof_test.go @@ -24,7 +24,7 @@ func TestSizeof(t *testing.T) { }{ {Addr{}, 32, 48}, {LSym{}, 56, 104}, - {Prog{}, 124, 184}, + {Prog{}, 132, 200}, } for _, tt := range tests { diff --git a/src/cmd/internal/obj/sym.go b/src/cmd/internal/obj/sym.go index 3fb2df169a1..3fc17fa8507 100644 --- a/src/cmd/internal/obj/sym.go +++ b/src/cmd/internal/obj/sym.go @@ -45,8 +45,7 @@ func Linknew(arch *LinkArch) *Link { ctxt.Arch = arch ctxt.Pathname = objabi.WorkingDir() - ctxt.Headtype.Set(objabi.GOOS) - if ctxt.Headtype < 0 { + if err := ctxt.Headtype.Set(objabi.GOOS); err != nil { log.Fatalf("unknown goos %s", objabi.GOOS) } diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go index 9bcdbbd127e..245e9e9e9b7 100644 --- a/src/cmd/internal/obj/util.go +++ b/src/cmd/internal/obj/util.go @@ -13,8 +13,31 @@ import ( const REG_NONE = 0 +// Line returns a string containing the filename and line number for p func (p *Prog) Line() string { - return p.Ctxt.OutermostPos(p.Pos).Format(false) + return p.Ctxt.OutermostPos(p.Pos).Format(false, true) +} + +// InnermostLineNumber returns a string containing the line number for the +// innermost inlined function (if any inlining) at p's position +func (p *Prog) InnermostLineNumber() string { + pos := p.Ctxt.InnermostPos(p.Pos) + if !pos.IsKnown() { + return "?" + } + return fmt.Sprintf("%d", pos.Line()) +} + +// InnermostFilename returns a string containing the innermost +// (in inlining) filename at p's position +func (p *Prog) InnermostFilename() string { + // TODO For now, this is only used for debugging output, and if we need more/better information, it might change. + // An example of what we might want to see is the full stack of positions for inlined code, so we get some visibility into what is recorded there. + pos := p.Ctxt.InnermostPos(p.Pos) + if !pos.IsKnown() { + return "" + } + return pos.Filename() } var armCondCode = []string{ @@ -72,6 +95,18 @@ func (p *Prog) String() string { if p == nil { return "" } + if p.Ctxt == nil { + return "" + } + return fmt.Sprintf("%.5d (%v)\t%s", p.Pc, p.Line(), p.InstructionString()) +} + +// InstructionString returns a string representation of the instruction without preceding +// program counter or file and line number. +func (p *Prog) InstructionString() string { + if p == nil { + return "" + } if p.Ctxt == nil { return "" @@ -81,13 +116,9 @@ func (p *Prog) String() string { var buf bytes.Buffer - fmt.Fprintf(&buf, "%.5d (%v)\t%v%s", p.Pc, p.Line(), p.As, sc) + fmt.Fprintf(&buf, "%v%s", p.As, sc) sep := "\t" - quadOpAmd64 := p.RegTo2 == -1 - if quadOpAmd64 { - fmt.Fprintf(&buf, "%s$%d", sep, p.From3.Offset) - sep = ", " - } + if p.From.Type != TYPE_NONE { fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.From)) sep = ", " @@ -97,14 +128,11 @@ func (p *Prog) String() string { fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.Reg))) sep = ", " } - if p.From3Type() != TYPE_NONE { - if quadOpAmd64 { - fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.From3.Reg))) - } else { - fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, p.From3)) - } + for i := range p.RestArgs { + fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.RestArgs[i])) sep = ", " } + if p.As == ATEXT { // If there are attributes, print them. Otherwise, skip the comma. // In short, print one of these two: @@ -119,7 +147,7 @@ func (p *Prog) String() string { if p.To.Type != TYPE_NONE { fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.To)) } - if p.RegTo2 != REG_NONE && !quadOpAmd64 { + if p.RegTo2 != REG_NONE { fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.RegTo2))) } return buf.String() @@ -157,7 +185,7 @@ func Dconv(p *Prog, a *Addr) string { // PINSRQ CX,$1,X6 // where the $1 is included in the p->to Addr. // Move into a new field. - if a.Offset != 0 { + if a.Offset != 0 && (a.Reg < RBaseARM64 || a.Reg >= RBaseMIPS) { str = fmt.Sprintf("$%d,%v", a.Offset, Rconv(int(a.Reg))) break } @@ -166,6 +194,10 @@ func Dconv(p *Prog, a *Addr) string { if a.Name != NAME_NONE || a.Sym != nil { str = fmt.Sprintf("%v(%v)(REG)", Mconv(a), Rconv(int(a.Reg))) } + if (RBaseARM64+1<<10+1<<9) /* arm64.REG_ELEM */ <= a.Reg && + a.Reg < (RBaseARM64+1<<11) /* arm64.REG_ELEM_END */ { + str += fmt.Sprintf("[%d]", a.Index) + } case TYPE_BRANCH: if a.Sym != nil { @@ -243,7 +275,7 @@ func Dconv(p *Prog, a *Addr) string { str = fmt.Sprintf("%v, %v", Rconv(int(a.Offset)), Rconv(int(a.Reg))) case TYPE_REGLIST: - str = regListConv(int(a.Offset)) + str = RLconv(a.Offset) } return str @@ -380,27 +412,40 @@ func Rconv(reg int) string { return fmt.Sprintf("R???%d", reg) } -func regListConv(list int) string { - str := "" +type regListSet struct { + lo int64 + hi int64 + RLconv func(int64) string +} - for i := 0; i < 16; i++ { // TODO: 16 is ARM-specific. - if list&(1<= unsigned - AJCS // < unsigned - AJCXZL - AJEQ // == (zero) - AJGE // >= signed - AJGT // > signed - AJHI // > unsigned - AJLE // <= signed - AJLS // <= unsigned - AJLT // < signed - AJMI // sign bit set (negative) - AJNE // != (nonzero) - AJOC // overflow clear - AJOS // overflow set - AJPC // parity clear - AJPL // sign bit clear (positive) - AJPS // parity set - ALAHF - ALARL - ALARW - ALEAL - ALEAW - ALEAVEL - ALEAVEW - ALOCK - ALODSB - ALODSL - ALODSW - ALONG - ALOOP - ALOOPEQ - ALOOPNE - ALSLL - ALSLW - AMOVB - AMOVL - AMOVW - AMOVBLSX - AMOVBLZX - AMOVBQSX - AMOVBQZX - AMOVBWSX - AMOVBWZX - AMOVWLSX - AMOVWLZX - AMOVWQSX - AMOVWQZX - AMOVSB - AMOVSL - AMOVSW - AMULB - AMULL - AMULW - ANEGB - ANEGL - ANEGW - ANOTB - ANOTL - ANOTW - AORB - AORL - AORW - AOUTB - AOUTL - AOUTW - AOUTSB - AOUTSL - AOUTSW - APAUSE - APOPAL - APOPAW - APOPCNTW - APOPCNTL - APOPCNTQ - APOPFL - APOPFW - APOPL - APOPW - APUSHAL - APUSHAW - APUSHFL - APUSHFW - APUSHL - APUSHW - ARCLB - ARCLL - ARCLW - ARCRB - ARCRL - ARCRW - AREP - AREPN - AROLB - AROLL - AROLW - ARORB - ARORL - ARORW - ASAHF - ASALB - ASALL - ASALW - ASARB - ASARL - ASARW - ASBBB - ASBBL - ASBBW - ASCASB - ASCASL - ASCASW - ASETCC - ASETCS - ASETEQ - ASETGE - ASETGT - ASETHI - ASETLE - ASETLS - ASETLT - ASETMI - ASETNE - ASETOC - ASETOS - ASETPC - ASETPL - ASETPS - ACDQ - ACWD - ASHLB - ASHLL - ASHLW - ASHRB - ASHRL - ASHRW - ASTC - ASTD - ASTI - ASTOSB - ASTOSL - ASTOSW - ASUBB - ASUBL - ASUBW - ASYSCALL - ATESTB - ATESTL - ATESTW - AVERR - AVERW - AWAIT - AWORD - AXCHGB - AXCHGL - AXCHGW - AXLAT - AXORB - AXORL - AXORW - - AFMOVB - AFMOVBP - AFMOVD - AFMOVDP - AFMOVF - AFMOVFP - AFMOVL - AFMOVLP - AFMOVV - AFMOVVP - AFMOVW - AFMOVWP - AFMOVX - AFMOVXP - - AFCOMD - AFCOMDP - AFCOMDPP - AFCOMF - AFCOMFP - AFCOML - AFCOMLP - AFCOMW - AFCOMWP - AFUCOM - AFUCOMP - AFUCOMPP - - AFADDDP - AFADDW - AFADDL - AFADDF - AFADDD - - AFMULDP - AFMULW - AFMULL - AFMULF - AFMULD - - AFSUBDP - AFSUBW - AFSUBL - AFSUBF - AFSUBD - - AFSUBRDP - AFSUBRW - AFSUBRL - AFSUBRF - AFSUBRD - - AFDIVDP - AFDIVW - AFDIVL - AFDIVF - AFDIVD - - AFDIVRDP - AFDIVRW - AFDIVRL - AFDIVRF - AFDIVRD - - AFXCHD - AFFREE - - AFLDCW - AFLDENV - AFRSTOR - AFSAVE - AFSTCW - AFSTENV - AFSTSW - - AF2XM1 - AFABS - AFCHS - AFCLEX - AFCOS - AFDECSTP - AFINCSTP - AFINIT - AFLD1 - AFLDL2E - AFLDL2T - AFLDLG2 - AFLDLN2 - AFLDPI - AFLDZ - AFNOP - AFPATAN - AFPREM - AFPREM1 - AFPTAN - AFRNDINT - AFSCALE - AFSIN - AFSINCOS - AFSQRT - AFTST - AFXAM - AFXTRACT - AFYL2X - AFYL2XP1 - - // extra 32-bit operations - ACMPXCHGB - ACMPXCHGL - ACMPXCHGW - ACMPXCHG8B - ACPUID - AINVD - AINVLPG - ALFENCE - AMFENCE - AMOVNTIL - ARDMSR - ARDPMC - ARDTSC - ARSM - ASFENCE - ASYSRET - AWBINVD - AWRMSR - AXADDB - AXADDL - AXADDW - - // conditional move - ACMOVLCC - ACMOVLCS - ACMOVLEQ - ACMOVLGE - ACMOVLGT - ACMOVLHI - ACMOVLLE - ACMOVLLS - ACMOVLLT - ACMOVLMI - ACMOVLNE - ACMOVLOC - ACMOVLOS - ACMOVLPC - ACMOVLPL - ACMOVLPS - ACMOVQCC - ACMOVQCS - ACMOVQEQ - ACMOVQGE - ACMOVQGT - ACMOVQHI - ACMOVQLE - ACMOVQLS - ACMOVQLT - ACMOVQMI - ACMOVQNE - ACMOVQOC - ACMOVQOS - ACMOVQPC - ACMOVQPL - ACMOVQPS - ACMOVWCC - ACMOVWCS - ACMOVWEQ - ACMOVWGE - ACMOVWGT - ACMOVWHI - ACMOVWLE - ACMOVWLS - ACMOVWLT - ACMOVWMI - ACMOVWNE - ACMOVWOC - ACMOVWOS - ACMOVWPC - ACMOVWPL - ACMOVWPS - - // 64-bit - AADCQ - AADDQ - AANDQ - ABSFQ - ABSRQ - ABTCQ - ABTQ - ABTRQ - ABTSQ - ACMPQ - ACMPSQ - ACMPXCHGQ - ACQO - ADIVQ - AIDIVQ - AIMULQ - AIRETQ - AJCXZQ - ALEAQ - ALEAVEQ - ALODSQ - AMOVQ - AMOVLQSX - AMOVLQZX - AMOVNTIQ - AMOVSQ - AMULQ - ANEGQ - ANOTQ - AORQ - APOPFQ - APOPQ - APUSHFQ - APUSHQ - ARCLQ - ARCRQ - AROLQ - ARORQ - AQUAD - ASALQ - ASARQ - ASBBQ - ASCASQ - ASHLQ - ASHRQ - ASTOSQ - ASUBQ - ATESTQ - AXADDQ - AXCHGQ - AXORQ - AXGETBV - - // media - AADDPD - AADDPS - AADDSD - AADDSS - AANDNL - AANDNQ - AANDNPD - AANDNPS - AANDPD - AANDPS - ABEXTRL - ABEXTRQ - ABLSIL - ABLSIQ - ABLSMSKL - ABLSMSKQ - ABLSRL - ABLSRQ - ABZHIL - ABZHIQ - ACMPPD - ACMPPS - ACMPSD - ACMPSS - ACOMISD - ACOMISS - ACVTPD2PL - ACVTPD2PS - ACVTPL2PD - ACVTPL2PS - ACVTPS2PD - ACVTPS2PL - ACVTSD2SL - ACVTSD2SQ - ACVTSD2SS - ACVTSL2SD - ACVTSL2SS - ACVTSQ2SD - ACVTSQ2SS - ACVTSS2SD - ACVTSS2SL - ACVTSS2SQ - ACVTTPD2PL - ACVTTPS2PL - ACVTTSD2SL - ACVTTSD2SQ - ACVTTSS2SL - ACVTTSS2SQ - ADIVPD - ADIVPS - ADIVSD - ADIVSS - AEMMS - AFXRSTOR - AFXRSTOR64 - AFXSAVE - AFXSAVE64 - ALDDQU - ALDMXCSR - AMASKMOVOU - AMASKMOVQ - AMAXPD - AMAXPS - AMAXSD - AMAXSS - AMINPD - AMINPS - AMINSD - AMINSS - AMOVAPD - AMOVAPS - AMOVOU - AMOVHLPS - AMOVHPD - AMOVHPS - AMOVLHPS - AMOVLPD - AMOVLPS - AMOVMSKPD - AMOVMSKPS - AMOVNTO - AMOVNTPD - AMOVNTPS - AMOVNTQ - AMOVO - AMOVQOZX - AMOVSD - AMOVSS - AMOVUPD - AMOVUPS - AMULPD - AMULPS - AMULSD - AMULSS - AMULXL - AMULXQ - AORPD - AORPS - APACKSSLW - APACKSSWB - APACKUSWB - APADDB - APADDL - APADDQ - APADDSB - APADDSW - APADDUSB - APADDUSW - APADDW - APAND - APANDN - APAVGB - APAVGW - APCMPEQB - APCMPEQL - APCMPEQW - APCMPGTB - APCMPGTL - APCMPGTW - APDEPL - APDEPQ - APEXTL - APEXTQ - APEXTRB - APEXTRD - APEXTRQ - APEXTRW - APHADDD - APHADDSW - APHADDW - APHMINPOSUW - APHSUBD - APHSUBSW - APHSUBW - APINSRB - APINSRD - APINSRQ - APINSRW - APMADDWL - APMAXSW - APMAXUB - APMINSW - APMINUB - APMOVMSKB - APMOVSXBD - APMOVSXBQ - APMOVSXBW - APMOVSXDQ - APMOVSXWD - APMOVSXWQ - APMOVZXBD - APMOVZXBQ - APMOVZXBW - APMOVZXDQ - APMOVZXWD - APMOVZXWQ - APMULDQ - APMULHUW - APMULHW - APMULLD - APMULLW - APMULULQ - APOR - APSADBW - APSHUFB - APSHUFHW - APSHUFL - APSHUFLW - APSHUFW - APSLLL - APSLLO - APSLLQ - APSLLW - APSRAL - APSRAW - APSRLL - APSRLO - APSRLQ - APSRLW - APSUBB - APSUBL - APSUBQ - APSUBSB - APSUBSW - APSUBUSB - APSUBUSW - APSUBW - APUNPCKHBW - APUNPCKHLQ - APUNPCKHQDQ - APUNPCKHWL - APUNPCKLBW - APUNPCKLLQ - APUNPCKLQDQ - APUNPCKLWL - APXOR - ARCPPS - ARCPSS - ARSQRTPS - ARSQRTSS - ASARXL - ASARXQ - ASHLXL - ASHLXQ - ASHRXL - ASHRXQ - ASHUFPD - ASHUFPS - ASQRTPD - ASQRTPS - ASQRTSD - ASQRTSS - ASTMXCSR - ASUBPD - ASUBPS - ASUBSD - ASUBSS - AUCOMISD - AUCOMISS - AUNPCKHPD - AUNPCKHPS - AUNPCKLPD - AUNPCKLPS - AXORPD - AXORPS - APCMPESTRI - - ARETFW - ARETFL - ARETFQ - ASWAPGS - - ACRC32B - ACRC32Q - AIMUL3Q - - APREFETCHT0 - APREFETCHT1 - APREFETCHT2 - APREFETCHNTA - - AMOVQL - ABSWAPL - ABSWAPQ - - AAESENC - AAESENCLAST - AAESDEC - AAESDECLAST - AAESIMC - AAESKEYGENASSIST - - AROUNDPS - AROUNDSS - AROUNDPD - AROUNDSD - AMOVDDUP - AMOVSHDUP - AMOVSLDUP - - APSHUFD - APCLMULQDQ - - AVZEROUPPER - AVMOVDQU - AVMOVNTDQ - AVMOVDQA - AVPCMPEQB - AVPXOR - AVPMOVMSKB - AVPAND - AVPTEST - AVPBROADCASTB - AVPSHUFB - AVPSHUFD - AVPERM2F128 - AVPALIGNR - AVPADDQ - AVPADDD - AVPSRLDQ - AVPSLLDQ - AVPSRLQ - AVPSLLQ - AVPSRLD - AVPSLLD - AVPOR - AVPBLENDD - AVINSERTI128 - AVPERM2I128 - ARORXL - ARORXQ - AVBROADCASTSS - AVBROADCASTSD - AVMOVDDUP - AVMOVSHDUP - AVMOVSLDUP - - // from 386 - AJCXZW - AFCMOVCC - AFCMOVCS - AFCMOVEQ - AFCMOVHI - AFCMOVLS - AFCMOVNE - AFCMOVNU - AFCMOVUN - AFCOMI - AFCOMIP - AFUCOMI - AFUCOMIP - - // TSX - AXACQUIRE - AXRELEASE - AXBEGIN - AXEND - AXABORT - AXTEST - - ALAST + DONE = 1 << iota ) const ( @@ -1006,3 +206,120 @@ const ( T_64 = 1 << 6 T_GOTYPE = 1 << 7 ) + +// https://www.uclibc.org/docs/psABI-x86_64.pdf, figure 3.36 +var AMD64DWARFRegisters = map[int16]int16{ + REG_AX: 0, + REG_DX: 1, + REG_CX: 2, + REG_BX: 3, + REG_SI: 4, + REG_DI: 5, + REG_BP: 6, + REG_SP: 7, + REG_R8: 8, + REG_R9: 9, + REG_R10: 10, + REG_R11: 11, + REG_R12: 12, + REG_R13: 13, + REG_R14: 14, + REG_R15: 15, + // 16 is "Return Address RA", whatever that is. + // XMM registers. %xmmN => XN. + REG_X0: 17, + REG_X1: 18, + REG_X2: 19, + REG_X3: 20, + REG_X4: 21, + REG_X5: 22, + REG_X6: 23, + REG_X7: 24, + REG_X8: 25, + REG_X9: 26, + REG_X10: 27, + REG_X11: 28, + REG_X12: 29, + REG_X13: 30, + REG_X14: 31, + REG_X15: 32, + // ST registers. %stN => FN. + REG_F0: 33, + REG_F1: 34, + REG_F2: 35, + REG_F3: 36, + REG_F4: 37, + REG_F5: 38, + REG_F6: 39, + REG_F7: 40, + // MMX registers. %mmN => MN. + REG_M0: 41, + REG_M1: 42, + REG_M2: 43, + REG_M3: 44, + REG_M4: 45, + REG_M5: 46, + REG_M6: 47, + REG_M7: 48, + // 48 is flags, which doesn't have a name. + REG_ES: 50, + REG_CS: 51, + REG_SS: 52, + REG_DS: 53, + REG_FS: 54, + REG_GS: 55, + // 58 and 59 are {fs,gs}base, which don't have names. + REG_TR: 62, + REG_LDTR: 63, + // 64-66 are mxcsr, fcw, fsw, which don't have names. +} + +// https://www.uclibc.org/docs/psABI-i386.pdf, table 2.14 +var X86DWARFRegisters = map[int16]int16{ + REG_AX: 0, + REG_CX: 1, + REG_DX: 2, + REG_BX: 3, + REG_SP: 4, + REG_BP: 5, + REG_SI: 6, + REG_DI: 7, + // 8 is "Return Address RA", whatever that is. + // 9 is flags, which doesn't have a name. + // ST registers. %stN => FN. + REG_F0: 11, + REG_F1: 12, + REG_F2: 13, + REG_F3: 14, + REG_F4: 15, + REG_F5: 16, + REG_F6: 17, + REG_F7: 18, + // XMM registers. %xmmN => XN. + REG_X0: 21, + REG_X1: 22, + REG_X2: 23, + REG_X3: 24, + REG_X4: 25, + REG_X5: 26, + REG_X6: 27, + REG_X7: 28, + // MMX registers. %mmN => MN. + REG_M0: 29, + REG_M1: 30, + REG_M2: 31, + REG_M3: 32, + REG_M4: 33, + REG_M5: 34, + REG_M6: 35, + REG_M7: 36, + // 39 is mxcsr, which doesn't have a name. + REG_ES: 40, + REG_CS: 41, + REG_SS: 42, + REG_DS: 43, + REG_FS: 44, + REG_GS: 45, + REG_TR: 48, + REG_LDTR: 49, +} diff --git a/src/cmd/internal/obj/x86/aenum.go b/src/cmd/internal/obj/x86/aenum.go new file mode 100644 index 00000000000..013d9e0228c --- /dev/null +++ b/src/cmd/internal/obj/x86/aenum.go @@ -0,0 +1,1136 @@ +// Code generated by x86avxgen. DO NOT EDIT. + +package x86 + +import "cmd/internal/obj" + +//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p x86 + +const ( + AAAA = obj.ABaseAMD64 + obj.A_ARCHSPECIFIC + iota + AAAD + AAAM + AAAS + AADCB + AADCL + AADCQ + AADCW + AADCXL + AADCXQ + AADDB + AADDL + AADDPD + AADDPS + AADDQ + AADDSD + AADDSS + AADDSUBPD + AADDSUBPS + AADDW + AADJSP + AADOXL + AADOXQ + AAESDEC + AAESDECLAST + AAESENC + AAESENCLAST + AAESIMC + AAESKEYGENASSIST + AANDB + AANDL + AANDNL + AANDNPD + AANDNPS + AANDNQ + AANDPD + AANDPS + AANDQ + AANDW + AARPL + ABEXTRL + ABEXTRQ + ABLENDPD + ABLENDPS + ABLSIL + ABLSIQ + ABLSMSKL + ABLSMSKQ + ABLSRL + ABLSRQ + ABOUNDL + ABOUNDW + ABSFL + ABSFQ + ABSFW + ABSRL + ABSRQ + ABSRW + ABSWAPL + ABSWAPQ + ABTCL + ABTCQ + ABTCW + ABTL + ABTQ + ABTRL + ABTRQ + ABTRW + ABTSL + ABTSQ + ABTSW + ABTW + ABYTE + ABZHIL + ABZHIQ + ACDQ + ACLC + ACLD + ACLFLUSH + ACLI + ACLTS + ACMC + ACMOVLCC + ACMOVLCS + ACMOVLEQ + ACMOVLGE + ACMOVLGT + ACMOVLHI + ACMOVLLE + ACMOVLLS + ACMOVLLT + ACMOVLMI + ACMOVLNE + ACMOVLOC + ACMOVLOS + ACMOVLPC + ACMOVLPL + ACMOVLPS + ACMOVQCC + ACMOVQCS + ACMOVQEQ + ACMOVQGE + ACMOVQGT + ACMOVQHI + ACMOVQLE + ACMOVQLS + ACMOVQLT + ACMOVQMI + ACMOVQNE + ACMOVQOC + ACMOVQOS + ACMOVQPC + ACMOVQPL + ACMOVQPS + ACMOVWCC + ACMOVWCS + ACMOVWEQ + ACMOVWGE + ACMOVWGT + ACMOVWHI + ACMOVWLE + ACMOVWLS + ACMOVWLT + ACMOVWMI + ACMOVWNE + ACMOVWOC + ACMOVWOS + ACMOVWPC + ACMOVWPL + ACMOVWPS + ACMPB + ACMPL + ACMPPD + ACMPPS + ACMPQ + ACMPSB + ACMPSD + ACMPSL + ACMPSQ + ACMPSS + ACMPSW + ACMPW + ACMPXCHG8B + ACMPXCHGB + ACMPXCHGL + ACMPXCHGQ + ACMPXCHGW + ACOMISD + ACOMISS + ACPUID + ACQO + ACRC32B + ACRC32Q + ACVTPD2PL + ACVTPD2PS + ACVTPL2PD + ACVTPL2PS + ACVTPS2PD + ACVTPS2PL + ACVTSD2SL + ACVTSD2SQ + ACVTSD2SS + ACVTSL2SD + ACVTSL2SS + ACVTSQ2SD + ACVTSQ2SS + ACVTSS2SD + ACVTSS2SL + ACVTSS2SQ + ACVTTPD2PL + ACVTTPS2PL + ACVTTSD2SL + ACVTTSD2SQ + ACVTTSS2SL + ACVTTSS2SQ + ACWD + ADAA + ADAS + ADECB + ADECL + ADECQ + ADECW + ADIVB + ADIVL + ADIVPD + ADIVPS + ADIVQ + ADIVSD + ADIVSS + ADIVW + ADPPD + ADPPS + AEMMS + AENTER + AEXTRACTPS + AF2XM1 + AFABS + AFADDD + AFADDDP + AFADDF + AFADDL + AFADDW + AFCHS + AFCLEX + AFCMOVCC + AFCMOVCS + AFCMOVEQ + AFCMOVHI + AFCMOVLS + AFCMOVNE + AFCMOVNU + AFCMOVUN + AFCOMD + AFCOMDP + AFCOMDPP + AFCOMF + AFCOMFP + AFCOMI + AFCOMIP + AFCOML + AFCOMLP + AFCOMW + AFCOMWP + AFCOS + AFDECSTP + AFDIVD + AFDIVDP + AFDIVF + AFDIVL + AFDIVRD + AFDIVRDP + AFDIVRF + AFDIVRL + AFDIVRW + AFDIVW + AFFREE + AFINCSTP + AFINIT + AFLD1 + AFLDCW + AFLDENV + AFLDL2E + AFLDL2T + AFLDLG2 + AFLDLN2 + AFLDPI + AFLDZ + AFMOVB + AFMOVBP + AFMOVD + AFMOVDP + AFMOVF + AFMOVFP + AFMOVL + AFMOVLP + AFMOVV + AFMOVVP + AFMOVW + AFMOVWP + AFMOVX + AFMOVXP + AFMULD + AFMULDP + AFMULF + AFMULL + AFMULW + AFNOP + AFPATAN + AFPREM + AFPREM1 + AFPTAN + AFRNDINT + AFRSTOR + AFSAVE + AFSCALE + AFSIN + AFSINCOS + AFSQRT + AFSTCW + AFSTENV + AFSTSW + AFSUBD + AFSUBDP + AFSUBF + AFSUBL + AFSUBRD + AFSUBRDP + AFSUBRF + AFSUBRL + AFSUBRW + AFSUBW + AFTST + AFUCOM + AFUCOMI + AFUCOMIP + AFUCOMP + AFUCOMPP + AFXAM + AFXCHD + AFXRSTOR + AFXRSTOR64 + AFXSAVE + AFXSAVE64 + AFXTRACT + AFYL2X + AFYL2XP1 + AHADDPD + AHADDPS + AHLT + AHSUBPD + AHSUBPS + AIDIVB + AIDIVL + AIDIVQ + AIDIVW + AIMUL3Q + AIMULB + AIMULL + AIMULQ + AIMULW + AINB + AINCB + AINCL + AINCQ + AINCW + AINL + AINSB + AINSERTPS + AINSL + AINSW + AINT + AINTO + AINVD + AINVLPG + AINW + AIRETL + AIRETQ + AIRETW + AJCC // >= unsigned + AJCS // < unsigned + AJCXZL + AJCXZQ + AJCXZW + AJEQ // == (zero) + AJGE // >= signed + AJGT // > signed + AJHI // > unsigned + AJLE // <= signed + AJLS // <= unsigned + AJLT // < signed + AJMI // sign bit set (negative) + AJNE // != (nonzero) + AJOC // overflow clear + AJOS // overflow set + AJPC // parity clear + AJPL // sign bit clear (positive) + AJPS // parity set + ALAHF + ALARL + ALARW + ALDDQU + ALDMXCSR + ALEAL + ALEAQ + ALEAVEL + ALEAVEQ + ALEAVEW + ALEAW + ALFENCE + ALOCK + ALODSB + ALODSL + ALODSQ + ALODSW + ALONG + ALOOP + ALOOPEQ + ALOOPNE + ALSLL + ALSLW + AMASKMOVOU + AMASKMOVQ + AMAXPD + AMAXPS + AMAXSD + AMAXSS + AMFENCE + AMINPD + AMINPS + AMINSD + AMINSS + AMOVAPD + AMOVAPS + AMOVB + AMOVBLSX + AMOVBLZX + AMOVBQSX + AMOVBQZX + AMOVBWSX + AMOVBWZX + AMOVDDUP + AMOVHLPS + AMOVHPD + AMOVHPS + AMOVL + AMOVLHPS + AMOVLPD + AMOVLPS + AMOVLQSX + AMOVLQZX + AMOVMSKPD + AMOVMSKPS + AMOVNTDQA + AMOVNTIL + AMOVNTIQ + AMOVNTO + AMOVNTPD + AMOVNTPS + AMOVNTQ + AMOVO + AMOVOU + AMOVQ + AMOVQL + AMOVQOZX + AMOVSB + AMOVSD + AMOVSHDUP + AMOVSL + AMOVSLDUP + AMOVSQ + AMOVSS + AMOVSW + AMOVUPD + AMOVUPS + AMOVW + AMOVWLSX + AMOVWLZX + AMOVWQSX + AMOVWQZX + AMPSADBW + AMULB + AMULL + AMULPD + AMULPS + AMULQ + AMULSD + AMULSS + AMULW + AMULXL + AMULXQ + ANEGB + ANEGL + ANEGQ + ANEGW + ANOTB + ANOTL + ANOTQ + ANOTW + AORB + AORL + AORPD + AORPS + AORQ + AORW + AOUTB + AOUTL + AOUTSB + AOUTSL + AOUTSW + AOUTW + APABSB + APABSD + APABSW + APACKSSLW + APACKSSWB + APACKUSDW + APACKUSWB + APADDB + APADDL + APADDQ + APADDSB + APADDSW + APADDUSB + APADDUSW + APADDW + APALIGNR + APAND + APANDN + APAUSE + APAVGB + APAVGW + APBLENDW + APCLMULQDQ + APCMPEQB + APCMPEQL + APCMPEQQ + APCMPEQW + APCMPESTRI + APCMPESTRM + APCMPGTB + APCMPGTL + APCMPGTQ + APCMPGTW + APCMPISTRI + APCMPISTRM + APDEPL + APDEPQ + APEXTL + APEXTQ + APEXTRB + APEXTRD + APEXTRQ + APEXTRW + APHADDD + APHADDSW + APHADDW + APHMINPOSUW + APHSUBD + APHSUBSW + APHSUBW + APINSRB + APINSRD + APINSRQ + APINSRW + APMADDUBSW + APMADDWL + APMAXSB + APMAXSD + APMAXSW + APMAXUB + APMAXUD + APMAXUW + APMINSB + APMINSD + APMINSW + APMINUB + APMINUD + APMINUW + APMOVMSKB + APMOVSXBD + APMOVSXBQ + APMOVSXBW + APMOVSXDQ + APMOVSXWD + APMOVSXWQ + APMOVZXBD + APMOVZXBQ + APMOVZXBW + APMOVZXDQ + APMOVZXWD + APMOVZXWQ + APMULDQ + APMULHRSW + APMULHUW + APMULHW + APMULLD + APMULLW + APMULULQ + APOPAL + APOPAW + APOPCNTL + APOPCNTQ + APOPCNTW + APOPFL + APOPFQ + APOPFW + APOPL + APOPQ + APOPW + APOR + APREFETCHNTA + APREFETCHT0 + APREFETCHT1 + APREFETCHT2 + APSADBW + APSHUFB + APSHUFD + APSHUFHW + APSHUFL + APSHUFLW + APSHUFW + APSIGNB + APSIGND + APSIGNW + APSLLL + APSLLO + APSLLQ + APSLLW + APSRAL + APSRAW + APSRLL + APSRLO + APSRLQ + APSRLW + APSUBB + APSUBL + APSUBQ + APSUBSB + APSUBSW + APSUBUSB + APSUBUSW + APSUBW + APTEST + APUNPCKHBW + APUNPCKHLQ + APUNPCKHQDQ + APUNPCKHWL + APUNPCKLBW + APUNPCKLLQ + APUNPCKLQDQ + APUNPCKLWL + APUSHAL + APUSHAW + APUSHFL + APUSHFQ + APUSHFW + APUSHL + APUSHQ + APUSHW + APXOR + AQUAD + ARCLB + ARCLL + ARCLQ + ARCLW + ARCPPS + ARCPSS + ARCRB + ARCRL + ARCRQ + ARCRW + ARDMSR + ARDPMC + ARDTSC + AREP + AREPN + ARETFL + ARETFQ + ARETFW + AROLB + AROLL + AROLQ + AROLW + ARORB + ARORL + ARORQ + ARORW + ARORXL + ARORXQ + AROUNDPD + AROUNDPS + AROUNDSD + AROUNDSS + ARSM + ARSQRTPS + ARSQRTSS + ASAHF + ASALB + ASALL + ASALQ + ASALW + ASARB + ASARL + ASARQ + ASARW + ASARXL + ASARXQ + ASBBB + ASBBL + ASBBQ + ASBBW + ASCASB + ASCASL + ASCASQ + ASCASW + ASETCC + ASETCS + ASETEQ + ASETGE + ASETGT + ASETHI + ASETLE + ASETLS + ASETLT + ASETMI + ASETNE + ASETOC + ASETOS + ASETPC + ASETPL + ASETPS + ASFENCE + ASHLB + ASHLL + ASHLQ + ASHLW + ASHLXL + ASHLXQ + ASHRB + ASHRL + ASHRQ + ASHRW + ASHRXL + ASHRXQ + ASHUFPD + ASHUFPS + ASQRTPD + ASQRTPS + ASQRTSD + ASQRTSS + ASTC + ASTD + ASTI + ASTMXCSR + ASTOSB + ASTOSL + ASTOSQ + ASTOSW + ASUBB + ASUBL + ASUBPD + ASUBPS + ASUBQ + ASUBSD + ASUBSS + ASUBW + ASWAPGS + ASYSCALL + ASYSRET + ATESTB + ATESTL + ATESTQ + ATESTW + AUCOMISD + AUCOMISS + AUNPCKHPD + AUNPCKHPS + AUNPCKLPD + AUNPCKLPS + AVADDPD + AVADDPS + AVADDSD + AVADDSS + AVADDSUBPD + AVADDSUBPS + AVAESDEC + AVAESDECLAST + AVAESENC + AVAESENCLAST + AVAESIMC + AVAESKEYGENASSIST + AVANDNPD + AVANDNPS + AVANDPD + AVANDPS + AVBLENDPD + AVBLENDPS + AVBLENDVPD + AVBLENDVPS + AVBROADCASTF128 + AVBROADCASTI128 + AVBROADCASTSD + AVBROADCASTSS + AVCMPPD + AVCMPPS + AVCMPSD + AVCMPSS + AVCOMISD + AVCOMISS + AVCVTDQ2PD + AVCVTDQ2PS + AVCVTPD2DQX + AVCVTPD2DQY + AVCVTPD2PSX + AVCVTPD2PSY + AVCVTPH2PS + AVCVTPS2DQ + AVCVTPS2PD + AVCVTPS2PH + AVCVTSD2SI + AVCVTSD2SIQ + AVCVTSD2SS + AVCVTSI2SDL + AVCVTSI2SDQ + AVCVTSI2SSL + AVCVTSI2SSQ + AVCVTSS2SD + AVCVTSS2SI + AVCVTSS2SIQ + AVCVTTPD2DQX + AVCVTTPD2DQY + AVCVTTPS2DQ + AVCVTTSD2SI + AVCVTTSD2SIQ + AVCVTTSS2SI + AVCVTTSS2SIQ + AVDIVPD + AVDIVPS + AVDIVSD + AVDIVSS + AVDPPD + AVDPPS + AVERR + AVERW + AVEXTRACTF128 + AVEXTRACTI128 + AVEXTRACTPS + AVFMADD132PD + AVFMADD132PS + AVFMADD132SD + AVFMADD132SS + AVFMADD213PD + AVFMADD213PS + AVFMADD213SD + AVFMADD213SS + AVFMADD231PD + AVFMADD231PS + AVFMADD231SD + AVFMADD231SS + AVFMADDSUB132PD + AVFMADDSUB132PS + AVFMADDSUB213PD + AVFMADDSUB213PS + AVFMADDSUB231PD + AVFMADDSUB231PS + AVFMSUB132PD + AVFMSUB132PS + AVFMSUB132SD + AVFMSUB132SS + AVFMSUB213PD + AVFMSUB213PS + AVFMSUB213SD + AVFMSUB213SS + AVFMSUB231PD + AVFMSUB231PS + AVFMSUB231SD + AVFMSUB231SS + AVFMSUBADD132PD + AVFMSUBADD132PS + AVFMSUBADD213PD + AVFMSUBADD213PS + AVFMSUBADD231PD + AVFMSUBADD231PS + AVFNMADD132PD + AVFNMADD132PS + AVFNMADD132SD + AVFNMADD132SS + AVFNMADD213PD + AVFNMADD213PS + AVFNMADD213SD + AVFNMADD213SS + AVFNMADD231PD + AVFNMADD231PS + AVFNMADD231SD + AVFNMADD231SS + AVFNMSUB132PD + AVFNMSUB132PS + AVFNMSUB132SD + AVFNMSUB132SS + AVFNMSUB213PD + AVFNMSUB213PS + AVFNMSUB213SD + AVFNMSUB213SS + AVFNMSUB231PD + AVFNMSUB231PS + AVFNMSUB231SD + AVFNMSUB231SS + AVGATHERDPD + AVGATHERDPS + AVGATHERQPD + AVGATHERQPS + AVHADDPD + AVHADDPS + AVHSUBPD + AVHSUBPS + AVINSERTF128 + AVINSERTI128 + AVINSERTPS + AVLDDQU + AVLDMXCSR + AVMASKMOVDQU + AVMASKMOVPD + AVMASKMOVPS + AVMAXPD + AVMAXPS + AVMAXSD + AVMAXSS + AVMINPD + AVMINPS + AVMINSD + AVMINSS + AVMOVAPD + AVMOVAPS + AVMOVD + AVMOVDDUP + AVMOVDQA + AVMOVDQU + AVMOVHLPS + AVMOVHPD + AVMOVHPS + AVMOVLHPS + AVMOVLPD + AVMOVLPS + AVMOVMSKPD + AVMOVMSKPS + AVMOVNTDQ + AVMOVNTDQA + AVMOVNTPD + AVMOVNTPS + AVMOVQ + AVMOVSD + AVMOVSHDUP + AVMOVSLDUP + AVMOVSS + AVMOVUPD + AVMOVUPS + AVMPSADBW + AVMULPD + AVMULPS + AVMULSD + AVMULSS + AVORPD + AVORPS + AVPABSB + AVPABSD + AVPABSW + AVPACKSSDW + AVPACKSSWB + AVPACKUSDW + AVPACKUSWB + AVPADDB + AVPADDD + AVPADDQ + AVPADDSB + AVPADDSW + AVPADDUSB + AVPADDUSW + AVPADDW + AVPALIGNR + AVPAND + AVPANDN + AVPAVGB + AVPAVGW + AVPBLENDD + AVPBLENDVB + AVPBLENDW + AVPBROADCASTB + AVPBROADCASTD + AVPBROADCASTQ + AVPBROADCASTW + AVPCLMULQDQ + AVPCMPEQB + AVPCMPEQD + AVPCMPEQQ + AVPCMPEQW + AVPCMPESTRI + AVPCMPESTRM + AVPCMPGTB + AVPCMPGTD + AVPCMPGTQ + AVPCMPGTW + AVPCMPISTRI + AVPCMPISTRM + AVPERM2F128 + AVPERM2I128 + AVPERMD + AVPERMILPD + AVPERMILPS + AVPERMPD + AVPERMPS + AVPERMQ + AVPEXTRB + AVPEXTRD + AVPEXTRQ + AVPEXTRW + AVPGATHERDD + AVPGATHERDQ + AVPGATHERQD + AVPGATHERQQ + AVPHADDD + AVPHADDSW + AVPHADDW + AVPHMINPOSUW + AVPHSUBD + AVPHSUBSW + AVPHSUBW + AVPINSRB + AVPINSRD + AVPINSRQ + AVPINSRW + AVPMADDUBSW + AVPMADDWD + AVPMASKMOVD + AVPMASKMOVQ + AVPMAXSB + AVPMAXSD + AVPMAXSW + AVPMAXUB + AVPMAXUD + AVPMAXUW + AVPMINSB + AVPMINSD + AVPMINSW + AVPMINUB + AVPMINUD + AVPMINUW + AVPMOVMSKB + AVPMOVSXBD + AVPMOVSXBQ + AVPMOVSXBW + AVPMOVSXDQ + AVPMOVSXWD + AVPMOVSXWQ + AVPMOVZXBD + AVPMOVZXBQ + AVPMOVZXBW + AVPMOVZXDQ + AVPMOVZXWD + AVPMOVZXWQ + AVPMULDQ + AVPMULHRSW + AVPMULHUW + AVPMULHW + AVPMULLD + AVPMULLW + AVPMULUDQ + AVPOR + AVPSADBW + AVPSHUFB + AVPSHUFD + AVPSHUFHW + AVPSHUFLW + AVPSIGNB + AVPSIGND + AVPSIGNW + AVPSLLD + AVPSLLDQ + AVPSLLQ + AVPSLLVD + AVPSLLVQ + AVPSLLW + AVPSRAD + AVPSRAVD + AVPSRAW + AVPSRLD + AVPSRLDQ + AVPSRLQ + AVPSRLVD + AVPSRLVQ + AVPSRLW + AVPSUBB + AVPSUBD + AVPSUBQ + AVPSUBSB + AVPSUBSW + AVPSUBUSB + AVPSUBUSW + AVPSUBW + AVPTEST + AVPUNPCKHBW + AVPUNPCKHDQ + AVPUNPCKHQDQ + AVPUNPCKHWD + AVPUNPCKLBW + AVPUNPCKLDQ + AVPUNPCKLQDQ + AVPUNPCKLWD + AVPXOR + AVRCPPS + AVRCPSS + AVROUNDPD + AVROUNDPS + AVROUNDSD + AVROUNDSS + AVRSQRTPS + AVRSQRTSS + AVSHUFPD + AVSHUFPS + AVSQRTPD + AVSQRTPS + AVSQRTSD + AVSQRTSS + AVSTMXCSR + AVSUBPD + AVSUBPS + AVSUBSD + AVSUBSS + AVTESTPD + AVTESTPS + AVUCOMISD + AVUCOMISS + AVUNPCKHPD + AVUNPCKHPS + AVUNPCKLPD + AVUNPCKLPS + AVXORPD + AVXORPS + AVZEROALL + AVZEROUPPER + AWAIT + AWBINVD + AWORD + AWRMSR + AXABORT + AXACQUIRE + AXADDB + AXADDL + AXADDQ + AXADDW + AXBEGIN + AXCHGB + AXCHGL + AXCHGQ + AXCHGW + AXEND + AXGETBV + AXLAT + AXORB + AXORL + AXORPD + AXORPS + AXORQ + AXORW + AXRELEASE + AXTEST + ALAST +) diff --git a/src/cmd/internal/obj/x86/anames.go b/src/cmd/internal/obj/x86/anames.go index 38cc03d84d1..ec7bea1255d 100644 --- a/src/cmd/internal/obj/x86/anames.go +++ b/src/cmd/internal/obj/x86/anames.go @@ -1,4 +1,4 @@ -// Generated by stringer -i a.out.go -o anames.go -p x86 +// Generated by stringer -i aenum.go -o anames.go -p x86 // Do not edit. package x86 @@ -12,351 +12,82 @@ var Anames = []string{ "AAS", "ADCB", "ADCL", + "ADCQ", "ADCW", + "ADCXL", + "ADCXQ", "ADDB", "ADDL", + "ADDPD", + "ADDPS", + "ADDQ", + "ADDSD", + "ADDSS", + "ADDSUBPD", + "ADDSUBPS", "ADDW", "ADJSP", + "ADOXL", + "ADOXQ", + "AESDEC", + "AESDECLAST", + "AESENC", + "AESENCLAST", + "AESIMC", + "AESKEYGENASSIST", "ANDB", "ANDL", + "ANDNL", + "ANDNPD", + "ANDNPS", + "ANDNQ", + "ANDPD", + "ANDPS", + "ANDQ", "ANDW", "ARPL", + "BEXTRL", + "BEXTRQ", + "BLENDPD", + "BLENDPS", + "BLSIL", + "BLSIQ", + "BLSMSKL", + "BLSMSKQ", + "BLSRL", + "BLSRQ", "BOUNDL", "BOUNDW", "BSFL", + "BSFQ", "BSFW", "BSRL", + "BSRQ", "BSRW", - "BTL", - "BTW", + "BSWAPL", + "BSWAPQ", "BTCL", + "BTCQ", "BTCW", + "BTL", + "BTQ", "BTRL", + "BTRQ", "BTRW", "BTSL", + "BTSQ", "BTSW", + "BTW", "BYTE", + "BZHIL", + "BZHIQ", + "CDQ", "CLC", "CLD", + "CLFLUSH", "CLI", "CLTS", "CMC", - "CMPB", - "CMPL", - "CMPW", - "CMPSB", - "CMPSL", - "CMPSW", - "DAA", - "DAS", - "DECB", - "DECL", - "DECQ", - "DECW", - "DIVB", - "DIVL", - "DIVW", - "ENTER", - "HADDPD", - "HADDPS", - "HLT", - "HSUBPD", - "HSUBPS", - "IDIVB", - "IDIVL", - "IDIVW", - "IMULB", - "IMULL", - "IMULW", - "INB", - "INL", - "INW", - "INCB", - "INCL", - "INCQ", - "INCW", - "INSB", - "INSL", - "INSW", - "INT", - "INTO", - "IRETL", - "IRETW", - "JCC", - "JCS", - "JCXZL", - "JEQ", - "JGE", - "JGT", - "JHI", - "JLE", - "JLS", - "JLT", - "JMI", - "JNE", - "JOC", - "JOS", - "JPC", - "JPL", - "JPS", - "LAHF", - "LARL", - "LARW", - "LEAL", - "LEAW", - "LEAVEL", - "LEAVEW", - "LOCK", - "LODSB", - "LODSL", - "LODSW", - "LONG", - "LOOP", - "LOOPEQ", - "LOOPNE", - "LSLL", - "LSLW", - "MOVB", - "MOVL", - "MOVW", - "MOVBLSX", - "MOVBLZX", - "MOVBQSX", - "MOVBQZX", - "MOVBWSX", - "MOVBWZX", - "MOVWLSX", - "MOVWLZX", - "MOVWQSX", - "MOVWQZX", - "MOVSB", - "MOVSL", - "MOVSW", - "MULB", - "MULL", - "MULW", - "NEGB", - "NEGL", - "NEGW", - "NOTB", - "NOTL", - "NOTW", - "ORB", - "ORL", - "ORW", - "OUTB", - "OUTL", - "OUTW", - "OUTSB", - "OUTSL", - "OUTSW", - "PAUSE", - "POPAL", - "POPAW", - "POPCNTW", - "POPCNTL", - "POPCNTQ", - "POPFL", - "POPFW", - "POPL", - "POPW", - "PUSHAL", - "PUSHAW", - "PUSHFL", - "PUSHFW", - "PUSHL", - "PUSHW", - "RCLB", - "RCLL", - "RCLW", - "RCRB", - "RCRL", - "RCRW", - "REP", - "REPN", - "ROLB", - "ROLL", - "ROLW", - "RORB", - "RORL", - "RORW", - "SAHF", - "SALB", - "SALL", - "SALW", - "SARB", - "SARL", - "SARW", - "SBBB", - "SBBL", - "SBBW", - "SCASB", - "SCASL", - "SCASW", - "SETCC", - "SETCS", - "SETEQ", - "SETGE", - "SETGT", - "SETHI", - "SETLE", - "SETLS", - "SETLT", - "SETMI", - "SETNE", - "SETOC", - "SETOS", - "SETPC", - "SETPL", - "SETPS", - "CDQ", - "CWD", - "SHLB", - "SHLL", - "SHLW", - "SHRB", - "SHRL", - "SHRW", - "STC", - "STD", - "STI", - "STOSB", - "STOSL", - "STOSW", - "SUBB", - "SUBL", - "SUBW", - "SYSCALL", - "TESTB", - "TESTL", - "TESTW", - "VERR", - "VERW", - "WAIT", - "WORD", - "XCHGB", - "XCHGL", - "XCHGW", - "XLAT", - "XORB", - "XORL", - "XORW", - "FMOVB", - "FMOVBP", - "FMOVD", - "FMOVDP", - "FMOVF", - "FMOVFP", - "FMOVL", - "FMOVLP", - "FMOVV", - "FMOVVP", - "FMOVW", - "FMOVWP", - "FMOVX", - "FMOVXP", - "FCOMD", - "FCOMDP", - "FCOMDPP", - "FCOMF", - "FCOMFP", - "FCOML", - "FCOMLP", - "FCOMW", - "FCOMWP", - "FUCOM", - "FUCOMP", - "FUCOMPP", - "FADDDP", - "FADDW", - "FADDL", - "FADDF", - "FADDD", - "FMULDP", - "FMULW", - "FMULL", - "FMULF", - "FMULD", - "FSUBDP", - "FSUBW", - "FSUBL", - "FSUBF", - "FSUBD", - "FSUBRDP", - "FSUBRW", - "FSUBRL", - "FSUBRF", - "FSUBRD", - "FDIVDP", - "FDIVW", - "FDIVL", - "FDIVF", - "FDIVD", - "FDIVRDP", - "FDIVRW", - "FDIVRL", - "FDIVRF", - "FDIVRD", - "FXCHD", - "FFREE", - "FLDCW", - "FLDENV", - "FRSTOR", - "FSAVE", - "FSTCW", - "FSTENV", - "FSTSW", - "F2XM1", - "FABS", - "FCHS", - "FCLEX", - "FCOS", - "FDECSTP", - "FINCSTP", - "FINIT", - "FLD1", - "FLDL2E", - "FLDL2T", - "FLDLG2", - "FLDLN2", - "FLDPI", - "FLDZ", - "FNOP", - "FPATAN", - "FPREM", - "FPREM1", - "FPTAN", - "FRNDINT", - "FSCALE", - "FSIN", - "FSINCOS", - "FSQRT", - "FTST", - "FXAM", - "FXTRACT", - "FYL2X", - "FYL2XP1", - "CMPXCHGB", - "CMPXCHGL", - "CMPXCHGW", - "CMPXCHG8B", - "CPUID", - "INVD", - "INVLPG", - "LFENCE", - "MFENCE", - "MOVNTIL", - "RDMSR", - "RDPMC", - "RDTSC", - "RSM", - "SFENCE", - "SYSRET", - "WBINVD", - "WRMSR", - "XADDB", - "XADDL", - "XADDW", "CMOVLCC", "CMOVLCS", "CMOVLEQ", @@ -405,84 +136,29 @@ var Anames = []string{ "CMOVWPC", "CMOVWPL", "CMOVWPS", - "ADCQ", - "ADDQ", - "ANDQ", - "BSFQ", - "BSRQ", - "BTCQ", - "BTQ", - "BTRQ", - "BTSQ", - "CMPQ", - "CMPSQ", - "CMPXCHGQ", - "CQO", - "DIVQ", - "IDIVQ", - "IMULQ", - "IRETQ", - "JCXZQ", - "LEAQ", - "LEAVEQ", - "LODSQ", - "MOVQ", - "MOVLQSX", - "MOVLQZX", - "MOVNTIQ", - "MOVSQ", - "MULQ", - "NEGQ", - "NOTQ", - "ORQ", - "POPFQ", - "POPQ", - "PUSHFQ", - "PUSHQ", - "RCLQ", - "RCRQ", - "ROLQ", - "RORQ", - "QUAD", - "SALQ", - "SARQ", - "SBBQ", - "SCASQ", - "SHLQ", - "SHRQ", - "STOSQ", - "SUBQ", - "TESTQ", - "XADDQ", - "XCHGQ", - "XORQ", - "XGETBV", - "ADDPD", - "ADDPS", - "ADDSD", - "ADDSS", - "ANDNL", - "ANDNQ", - "ANDNPD", - "ANDNPS", - "ANDPD", - "ANDPS", - "BEXTRL", - "BEXTRQ", - "BLSIL", - "BLSIQ", - "BLSMSKL", - "BLSMSKQ", - "BLSRL", - "BLSRQ", - "BZHIL", - "BZHIQ", + "CMPB", + "CMPL", "CMPPD", "CMPPS", + "CMPQ", + "CMPSB", "CMPSD", + "CMPSL", + "CMPSQ", "CMPSS", + "CMPSW", + "CMPW", + "CMPXCHG8B", + "CMPXCHGB", + "CMPXCHGL", + "CMPXCHGQ", + "CMPXCHGW", "COMISD", "COMISS", + "CPUID", + "CQO", + "CRC32B", + "CRC32Q", "CVTPD2PL", "CVTPD2PS", "CVTPL2PD", @@ -505,58 +181,307 @@ var Anames = []string{ "CVTTSD2SQ", "CVTTSS2SL", "CVTTSS2SQ", + "CWD", + "DAA", + "DAS", + "DECB", + "DECL", + "DECQ", + "DECW", + "DIVB", + "DIVL", "DIVPD", "DIVPS", + "DIVQ", "DIVSD", "DIVSS", + "DIVW", + "DPPD", + "DPPS", "EMMS", + "ENTER", + "EXTRACTPS", + "F2XM1", + "FABS", + "FADDD", + "FADDDP", + "FADDF", + "FADDL", + "FADDW", + "FCHS", + "FCLEX", + "FCMOVCC", + "FCMOVCS", + "FCMOVEQ", + "FCMOVHI", + "FCMOVLS", + "FCMOVNE", + "FCMOVNU", + "FCMOVUN", + "FCOMD", + "FCOMDP", + "FCOMDPP", + "FCOMF", + "FCOMFP", + "FCOMI", + "FCOMIP", + "FCOML", + "FCOMLP", + "FCOMW", + "FCOMWP", + "FCOS", + "FDECSTP", + "FDIVD", + "FDIVDP", + "FDIVF", + "FDIVL", + "FDIVRD", + "FDIVRDP", + "FDIVRF", + "FDIVRL", + "FDIVRW", + "FDIVW", + "FFREE", + "FINCSTP", + "FINIT", + "FLD1", + "FLDCW", + "FLDENV", + "FLDL2E", + "FLDL2T", + "FLDLG2", + "FLDLN2", + "FLDPI", + "FLDZ", + "FMOVB", + "FMOVBP", + "FMOVD", + "FMOVDP", + "FMOVF", + "FMOVFP", + "FMOVL", + "FMOVLP", + "FMOVV", + "FMOVVP", + "FMOVW", + "FMOVWP", + "FMOVX", + "FMOVXP", + "FMULD", + "FMULDP", + "FMULF", + "FMULL", + "FMULW", + "FNOP", + "FPATAN", + "FPREM", + "FPREM1", + "FPTAN", + "FRNDINT", + "FRSTOR", + "FSAVE", + "FSCALE", + "FSIN", + "FSINCOS", + "FSQRT", + "FSTCW", + "FSTENV", + "FSTSW", + "FSUBD", + "FSUBDP", + "FSUBF", + "FSUBL", + "FSUBRD", + "FSUBRDP", + "FSUBRF", + "FSUBRL", + "FSUBRW", + "FSUBW", + "FTST", + "FUCOM", + "FUCOMI", + "FUCOMIP", + "FUCOMP", + "FUCOMPP", + "FXAM", + "FXCHD", "FXRSTOR", "FXRSTOR64", "FXSAVE", "FXSAVE64", + "FXTRACT", + "FYL2X", + "FYL2XP1", + "HADDPD", + "HADDPS", + "HLT", + "HSUBPD", + "HSUBPS", + "IDIVB", + "IDIVL", + "IDIVQ", + "IDIVW", + "IMUL3Q", + "IMULB", + "IMULL", + "IMULQ", + "IMULW", + "INB", + "INCB", + "INCL", + "INCQ", + "INCW", + "INL", + "INSB", + "INSERTPS", + "INSL", + "INSW", + "INT", + "INTO", + "INVD", + "INVLPG", + "INW", + "IRETL", + "IRETQ", + "IRETW", + "JCC", + "JCS", + "JCXZL", + "JCXZQ", + "JCXZW", + "JEQ", + "JGE", + "JGT", + "JHI", + "JLE", + "JLS", + "JLT", + "JMI", + "JNE", + "JOC", + "JOS", + "JPC", + "JPL", + "JPS", + "LAHF", + "LARL", + "LARW", "LDDQU", "LDMXCSR", + "LEAL", + "LEAQ", + "LEAVEL", + "LEAVEQ", + "LEAVEW", + "LEAW", + "LFENCE", + "LOCK", + "LODSB", + "LODSL", + "LODSQ", + "LODSW", + "LONG", + "LOOP", + "LOOPEQ", + "LOOPNE", + "LSLL", + "LSLW", "MASKMOVOU", "MASKMOVQ", "MAXPD", "MAXPS", "MAXSD", "MAXSS", + "MFENCE", "MINPD", "MINPS", "MINSD", "MINSS", "MOVAPD", "MOVAPS", - "MOVOU", + "MOVB", + "MOVBLSX", + "MOVBLZX", + "MOVBQSX", + "MOVBQZX", + "MOVBWSX", + "MOVBWZX", + "MOVDDUP", "MOVHLPS", "MOVHPD", "MOVHPS", + "MOVL", "MOVLHPS", "MOVLPD", "MOVLPS", + "MOVLQSX", + "MOVLQZX", "MOVMSKPD", "MOVMSKPS", + "MOVNTDQA", + "MOVNTIL", + "MOVNTIQ", "MOVNTO", "MOVNTPD", "MOVNTPS", "MOVNTQ", "MOVO", + "MOVOU", + "MOVQ", + "MOVQL", "MOVQOZX", + "MOVSB", "MOVSD", + "MOVSHDUP", + "MOVSL", + "MOVSLDUP", + "MOVSQ", "MOVSS", + "MOVSW", "MOVUPD", "MOVUPS", + "MOVW", + "MOVWLSX", + "MOVWLZX", + "MOVWQSX", + "MOVWQZX", + "MPSADBW", + "MULB", + "MULL", "MULPD", "MULPS", + "MULQ", "MULSD", "MULSS", + "MULW", "MULXL", "MULXQ", + "NEGB", + "NEGL", + "NEGQ", + "NEGW", + "NOTB", + "NOTL", + "NOTQ", + "NOTW", + "ORB", + "ORL", "ORPD", "ORPS", + "ORQ", + "ORW", + "OUTB", + "OUTL", + "OUTSB", + "OUTSL", + "OUTSW", + "OUTW", + "PABSB", + "PABSD", + "PABSW", "PACKSSLW", "PACKSSWB", + "PACKUSDW", "PACKUSWB", "PADDB", "PADDL", @@ -566,16 +491,26 @@ var Anames = []string{ "PADDUSB", "PADDUSW", "PADDW", + "PALIGNR", "PAND", "PANDN", + "PAUSE", "PAVGB", "PAVGW", + "PBLENDW", + "PCLMULQDQ", "PCMPEQB", "PCMPEQL", + "PCMPEQQ", "PCMPEQW", + "PCMPESTRI", + "PCMPESTRM", "PCMPGTB", "PCMPGTL", + "PCMPGTQ", "PCMPGTW", + "PCMPISTRI", + "PCMPISTRM", "PDEPL", "PDEPQ", "PEXTL", @@ -595,11 +530,20 @@ var Anames = []string{ "PINSRD", "PINSRQ", "PINSRW", + "PMADDUBSW", "PMADDWL", + "PMAXSB", + "PMAXSD", "PMAXSW", "PMAXUB", + "PMAXUD", + "PMAXUW", + "PMINSB", + "PMINSD", "PMINSW", "PMINUB", + "PMINUD", + "PMINUW", "PMOVMSKB", "PMOVSXBD", "PMOVSXBQ", @@ -614,18 +558,38 @@ var Anames = []string{ "PMOVZXWD", "PMOVZXWQ", "PMULDQ", + "PMULHRSW", "PMULHUW", "PMULHW", "PMULLD", "PMULLW", "PMULULQ", + "POPAL", + "POPAW", + "POPCNTL", + "POPCNTQ", + "POPCNTW", + "POPFL", + "POPFQ", + "POPFW", + "POPL", + "POPQ", + "POPW", "POR", + "PREFETCHNTA", + "PREFETCHT0", + "PREFETCHT1", + "PREFETCHT2", "PSADBW", "PSHUFB", + "PSHUFD", "PSHUFHW", "PSHUFL", "PSHUFLW", "PSHUFW", + "PSIGNB", + "PSIGND", + "PSIGNW", "PSLLL", "PSLLO", "PSLLQ", @@ -644,6 +608,7 @@ var Anames = []string{ "PSUBUSB", "PSUBUSW", "PSUBW", + "PTEST", "PUNPCKHBW", "PUNPCKHLQ", "PUNPCKHQDQ", @@ -652,15 +617,97 @@ var Anames = []string{ "PUNPCKLLQ", "PUNPCKLQDQ", "PUNPCKLWL", + "PUSHAL", + "PUSHAW", + "PUSHFL", + "PUSHFQ", + "PUSHFW", + "PUSHL", + "PUSHQ", + "PUSHW", "PXOR", + "QUAD", + "RCLB", + "RCLL", + "RCLQ", + "RCLW", "RCPPS", "RCPSS", + "RCRB", + "RCRL", + "RCRQ", + "RCRW", + "RDMSR", + "RDPMC", + "RDTSC", + "REP", + "REPN", + "RETFL", + "RETFQ", + "RETFW", + "ROLB", + "ROLL", + "ROLQ", + "ROLW", + "RORB", + "RORL", + "RORQ", + "RORW", + "RORXL", + "RORXQ", + "ROUNDPD", + "ROUNDPS", + "ROUNDSD", + "ROUNDSS", + "RSM", "RSQRTPS", "RSQRTSS", + "SAHF", + "SALB", + "SALL", + "SALQ", + "SALW", + "SARB", + "SARL", + "SARQ", + "SARW", "SARXL", "SARXQ", + "SBBB", + "SBBL", + "SBBQ", + "SBBW", + "SCASB", + "SCASL", + "SCASQ", + "SCASW", + "SETCC", + "SETCS", + "SETEQ", + "SETGE", + "SETGT", + "SETHI", + "SETLE", + "SETLS", + "SETLT", + "SETMI", + "SETNE", + "SETOC", + "SETOS", + "SETPC", + "SETPL", + "SETPS", + "SFENCE", + "SHLB", + "SHLL", + "SHLQ", + "SHLW", "SHLXL", "SHLXQ", + "SHRB", + "SHRL", + "SHRQ", + "SHRW", "SHRXL", "SHRXQ", "SHUFPD", @@ -669,100 +716,420 @@ var Anames = []string{ "SQRTPS", "SQRTSD", "SQRTSS", + "STC", + "STD", + "STI", "STMXCSR", + "STOSB", + "STOSL", + "STOSQ", + "STOSW", + "SUBB", + "SUBL", "SUBPD", "SUBPS", + "SUBQ", "SUBSD", "SUBSS", + "SUBW", + "SWAPGS", + "SYSCALL", + "SYSRET", + "TESTB", + "TESTL", + "TESTQ", + "TESTW", "UCOMISD", "UCOMISS", "UNPCKHPD", "UNPCKHPS", "UNPCKLPD", "UNPCKLPS", - "XORPD", - "XORPS", - "PCMPESTRI", - "RETFW", - "RETFL", - "RETFQ", - "SWAPGS", - "CRC32B", - "CRC32Q", - "IMUL3Q", - "PREFETCHT0", - "PREFETCHT1", - "PREFETCHT2", - "PREFETCHNTA", - "MOVQL", - "BSWAPL", - "BSWAPQ", - "AESENC", - "AESENCLAST", - "AESDEC", - "AESDECLAST", - "AESIMC", - "AESKEYGENASSIST", - "ROUNDPS", - "ROUNDSS", - "ROUNDPD", - "ROUNDSD", - "MOVDDUP", - "MOVSHDUP", - "MOVSLDUP", - "PSHUFD", - "PCLMULQDQ", - "VZEROUPPER", - "VMOVDQU", - "VMOVNTDQ", - "VMOVDQA", - "VPCMPEQB", - "VPXOR", - "VPMOVMSKB", - "VPAND", - "VPTEST", - "VPBROADCASTB", - "VPSHUFB", - "VPSHUFD", - "VPERM2F128", - "VPALIGNR", - "VPADDQ", - "VPADDD", - "VPSRLDQ", - "VPSLLDQ", - "VPSRLQ", - "VPSLLQ", - "VPSRLD", - "VPSLLD", - "VPOR", - "VPBLENDD", - "VINSERTI128", - "VPERM2I128", - "RORXL", - "RORXQ", - "VBROADCASTSS", + "VADDPD", + "VADDPS", + "VADDSD", + "VADDSS", + "VADDSUBPD", + "VADDSUBPS", + "VAESDEC", + "VAESDECLAST", + "VAESENC", + "VAESENCLAST", + "VAESIMC", + "VAESKEYGENASSIST", + "VANDNPD", + "VANDNPS", + "VANDPD", + "VANDPS", + "VBLENDPD", + "VBLENDPS", + "VBLENDVPD", + "VBLENDVPS", + "VBROADCASTF128", + "VBROADCASTI128", "VBROADCASTSD", + "VBROADCASTSS", + "VCMPPD", + "VCMPPS", + "VCMPSD", + "VCMPSS", + "VCOMISD", + "VCOMISS", + "VCVTDQ2PD", + "VCVTDQ2PS", + "VCVTPD2DQX", + "VCVTPD2DQY", + "VCVTPD2PSX", + "VCVTPD2PSY", + "VCVTPH2PS", + "VCVTPS2DQ", + "VCVTPS2PD", + "VCVTPS2PH", + "VCVTSD2SI", + "VCVTSD2SIQ", + "VCVTSD2SS", + "VCVTSI2SDL", + "VCVTSI2SDQ", + "VCVTSI2SSL", + "VCVTSI2SSQ", + "VCVTSS2SD", + "VCVTSS2SI", + "VCVTSS2SIQ", + "VCVTTPD2DQX", + "VCVTTPD2DQY", + "VCVTTPS2DQ", + "VCVTTSD2SI", + "VCVTTSD2SIQ", + "VCVTTSS2SI", + "VCVTTSS2SIQ", + "VDIVPD", + "VDIVPS", + "VDIVSD", + "VDIVSS", + "VDPPD", + "VDPPS", + "VERR", + "VERW", + "VEXTRACTF128", + "VEXTRACTI128", + "VEXTRACTPS", + "VFMADD132PD", + "VFMADD132PS", + "VFMADD132SD", + "VFMADD132SS", + "VFMADD213PD", + "VFMADD213PS", + "VFMADD213SD", + "VFMADD213SS", + "VFMADD231PD", + "VFMADD231PS", + "VFMADD231SD", + "VFMADD231SS", + "VFMADDSUB132PD", + "VFMADDSUB132PS", + "VFMADDSUB213PD", + "VFMADDSUB213PS", + "VFMADDSUB231PD", + "VFMADDSUB231PS", + "VFMSUB132PD", + "VFMSUB132PS", + "VFMSUB132SD", + "VFMSUB132SS", + "VFMSUB213PD", + "VFMSUB213PS", + "VFMSUB213SD", + "VFMSUB213SS", + "VFMSUB231PD", + "VFMSUB231PS", + "VFMSUB231SD", + "VFMSUB231SS", + "VFMSUBADD132PD", + "VFMSUBADD132PS", + "VFMSUBADD213PD", + "VFMSUBADD213PS", + "VFMSUBADD231PD", + "VFMSUBADD231PS", + "VFNMADD132PD", + "VFNMADD132PS", + "VFNMADD132SD", + "VFNMADD132SS", + "VFNMADD213PD", + "VFNMADD213PS", + "VFNMADD213SD", + "VFNMADD213SS", + "VFNMADD231PD", + "VFNMADD231PS", + "VFNMADD231SD", + "VFNMADD231SS", + "VFNMSUB132PD", + "VFNMSUB132PS", + "VFNMSUB132SD", + "VFNMSUB132SS", + "VFNMSUB213PD", + "VFNMSUB213PS", + "VFNMSUB213SD", + "VFNMSUB213SS", + "VFNMSUB231PD", + "VFNMSUB231PS", + "VFNMSUB231SD", + "VFNMSUB231SS", + "VGATHERDPD", + "VGATHERDPS", + "VGATHERQPD", + "VGATHERQPS", + "VHADDPD", + "VHADDPS", + "VHSUBPD", + "VHSUBPS", + "VINSERTF128", + "VINSERTI128", + "VINSERTPS", + "VLDDQU", + "VLDMXCSR", + "VMASKMOVDQU", + "VMASKMOVPD", + "VMASKMOVPS", + "VMAXPD", + "VMAXPS", + "VMAXSD", + "VMAXSS", + "VMINPD", + "VMINPS", + "VMINSD", + "VMINSS", + "VMOVAPD", + "VMOVAPS", + "VMOVD", "VMOVDDUP", + "VMOVDQA", + "VMOVDQU", + "VMOVHLPS", + "VMOVHPD", + "VMOVHPS", + "VMOVLHPS", + "VMOVLPD", + "VMOVLPS", + "VMOVMSKPD", + "VMOVMSKPS", + "VMOVNTDQ", + "VMOVNTDQA", + "VMOVNTPD", + "VMOVNTPS", + "VMOVQ", + "VMOVSD", "VMOVSHDUP", "VMOVSLDUP", - "JCXZW", - "FCMOVCC", - "FCMOVCS", - "FCMOVEQ", - "FCMOVHI", - "FCMOVLS", - "FCMOVNE", - "FCMOVNU", - "FCMOVUN", - "FCOMI", - "FCOMIP", - "FUCOMI", - "FUCOMIP", - "XACQUIRE", - "XRELEASE", - "XBEGIN", - "XEND", + "VMOVSS", + "VMOVUPD", + "VMOVUPS", + "VMPSADBW", + "VMULPD", + "VMULPS", + "VMULSD", + "VMULSS", + "VORPD", + "VORPS", + "VPABSB", + "VPABSD", + "VPABSW", + "VPACKSSDW", + "VPACKSSWB", + "VPACKUSDW", + "VPACKUSWB", + "VPADDB", + "VPADDD", + "VPADDQ", + "VPADDSB", + "VPADDSW", + "VPADDUSB", + "VPADDUSW", + "VPADDW", + "VPALIGNR", + "VPAND", + "VPANDN", + "VPAVGB", + "VPAVGW", + "VPBLENDD", + "VPBLENDVB", + "VPBLENDW", + "VPBROADCASTB", + "VPBROADCASTD", + "VPBROADCASTQ", + "VPBROADCASTW", + "VPCLMULQDQ", + "VPCMPEQB", + "VPCMPEQD", + "VPCMPEQQ", + "VPCMPEQW", + "VPCMPESTRI", + "VPCMPESTRM", + "VPCMPGTB", + "VPCMPGTD", + "VPCMPGTQ", + "VPCMPGTW", + "VPCMPISTRI", + "VPCMPISTRM", + "VPERM2F128", + "VPERM2I128", + "VPERMD", + "VPERMILPD", + "VPERMILPS", + "VPERMPD", + "VPERMPS", + "VPERMQ", + "VPEXTRB", + "VPEXTRD", + "VPEXTRQ", + "VPEXTRW", + "VPGATHERDD", + "VPGATHERDQ", + "VPGATHERQD", + "VPGATHERQQ", + "VPHADDD", + "VPHADDSW", + "VPHADDW", + "VPHMINPOSUW", + "VPHSUBD", + "VPHSUBSW", + "VPHSUBW", + "VPINSRB", + "VPINSRD", + "VPINSRQ", + "VPINSRW", + "VPMADDUBSW", + "VPMADDWD", + "VPMASKMOVD", + "VPMASKMOVQ", + "VPMAXSB", + "VPMAXSD", + "VPMAXSW", + "VPMAXUB", + "VPMAXUD", + "VPMAXUW", + "VPMINSB", + "VPMINSD", + "VPMINSW", + "VPMINUB", + "VPMINUD", + "VPMINUW", + "VPMOVMSKB", + "VPMOVSXBD", + "VPMOVSXBQ", + "VPMOVSXBW", + "VPMOVSXDQ", + "VPMOVSXWD", + "VPMOVSXWQ", + "VPMOVZXBD", + "VPMOVZXBQ", + "VPMOVZXBW", + "VPMOVZXDQ", + "VPMOVZXWD", + "VPMOVZXWQ", + "VPMULDQ", + "VPMULHRSW", + "VPMULHUW", + "VPMULHW", + "VPMULLD", + "VPMULLW", + "VPMULUDQ", + "VPOR", + "VPSADBW", + "VPSHUFB", + "VPSHUFD", + "VPSHUFHW", + "VPSHUFLW", + "VPSIGNB", + "VPSIGND", + "VPSIGNW", + "VPSLLD", + "VPSLLDQ", + "VPSLLQ", + "VPSLLVD", + "VPSLLVQ", + "VPSLLW", + "VPSRAD", + "VPSRAVD", + "VPSRAW", + "VPSRLD", + "VPSRLDQ", + "VPSRLQ", + "VPSRLVD", + "VPSRLVQ", + "VPSRLW", + "VPSUBB", + "VPSUBD", + "VPSUBQ", + "VPSUBSB", + "VPSUBSW", + "VPSUBUSB", + "VPSUBUSW", + "VPSUBW", + "VPTEST", + "VPUNPCKHBW", + "VPUNPCKHDQ", + "VPUNPCKHQDQ", + "VPUNPCKHWD", + "VPUNPCKLBW", + "VPUNPCKLDQ", + "VPUNPCKLQDQ", + "VPUNPCKLWD", + "VPXOR", + "VRCPPS", + "VRCPSS", + "VROUNDPD", + "VROUNDPS", + "VROUNDSD", + "VROUNDSS", + "VRSQRTPS", + "VRSQRTSS", + "VSHUFPD", + "VSHUFPS", + "VSQRTPD", + "VSQRTPS", + "VSQRTSD", + "VSQRTSS", + "VSTMXCSR", + "VSUBPD", + "VSUBPS", + "VSUBSD", + "VSUBSS", + "VTESTPD", + "VTESTPS", + "VUCOMISD", + "VUCOMISS", + "VUNPCKHPD", + "VUNPCKHPS", + "VUNPCKLPD", + "VUNPCKLPS", + "VXORPD", + "VXORPS", + "VZEROALL", + "VZEROUPPER", + "WAIT", + "WBINVD", + "WORD", + "WRMSR", "XABORT", + "XACQUIRE", + "XADDB", + "XADDL", + "XADDQ", + "XADDW", + "XBEGIN", + "XCHGB", + "XCHGL", + "XCHGQ", + "XCHGW", + "XEND", + "XGETBV", + "XLAT", + "XORB", + "XORL", + "XORPD", + "XORPS", + "XORQ", + "XORW", + "XRELEASE", "XTEST", "LAST", } diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index 5f3a8c45d53..7b80892cf0f 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -72,14 +72,6 @@ type Optab struct { op [23]uint8 } -type ytab struct { - from uint8 - from3 uint8 - to uint8 - zcase uint8 - zoffset uint8 -} - type Movtab struct { as obj.As ft uint8 @@ -94,6 +86,7 @@ const ( Ynone Yi0 // $0 Yi1 // $1 + Yu2 // $x, x fits in uint2 Yi8 // $x, x fits in int8 Yu8 // $x, x fits in uint8 Yu7 // $x, x in 0..127 (fits in both int8 and uint8) @@ -155,8 +148,10 @@ const ( Ymm Yxr Yxm + Yxvm // VSIB vector array; vm32x/vm64x Yyr Yym + Yyvm // VSIB vector array; vm32y/vm64y Ytls Ytextsize Yindir @@ -209,14 +204,19 @@ const ( Zm_ilo Zib_rr Zil_rr - Zclr Zbyte Zvex_rm_v_r + Zvex_rm_v_ro Zvex_r_v_rm Zvex_v_rm_r Zvex_i_rm_r Zvex_i_r_v Zvex_i_rm_v_r + Zvex + Zvex_rm_r_vo + Zvex_i_r_rm + Zvex_hr_rm_v_r + Zmax ) @@ -233,6 +233,9 @@ const ( Pef3 = 0xf5 /* xmm escape 2 with 16-bit prefix: 66 f3 0f */ Pq3 = 0x67 /* xmm escape 3: 66 48 0f */ Pq4 = 0x68 /* xmm escape 4: 66 0F 38 */ + Pq4w = 0x69 /* Pq4 with Rex.w 66 0F 38 */ + Pq5 = 0x6a /* xmm escape 5: F3 0F 38 */ + Pq5w = 0x6b /* Pq5 with Rex.w F3 0F 38 */ Pfw = 0xf4 /* Pf3 with Rex.w: f3 48 0f */ Pw = 0x48 /* Rex.w */ Pw8 = 0x90 // symbolic; exact value doesn't matter @@ -252,6 +255,11 @@ const ( // The P, L, and W fields are chosen to match // their eventual locations in the VEX prefix bytes. + // V field - 4 bits; ignored by encoder + vexNOVSR = 0 // No VEX-SPECIFIED-REGISTER + vexNDS = 0 + vexNDD = 0 + vexDDS = 0 // P field - 2 bits vex66 = 1 << 0 vexF3 = 2 << 0 @@ -271,49 +279,38 @@ const ( vex0F3A = 3 << 3 // Combinations used in the manual. - VEX_128_0F_WIG = vex128 | vex0F | vexWIG - VEX_128_66_0F_W0 = vex128 | vex66 | vex0F | vexW0 - VEX_128_66_0F_W1 = vex128 | vex66 | vex0F | vexW1 - VEX_128_66_0F_WIG = vex128 | vex66 | vex0F | vexWIG - VEX_128_66_0F38_W0 = vex128 | vex66 | vex0F38 | vexW0 - VEX_128_66_0F38_W1 = vex128 | vex66 | vex0F38 | vexW1 - VEX_128_66_0F38_WIG = vex128 | vex66 | vex0F38 | vexWIG - VEX_128_66_0F3A_W0 = vex128 | vex66 | vex0F3A | vexW0 - VEX_128_66_0F3A_W1 = vex128 | vex66 | vex0F3A | vexW1 - VEX_128_66_0F3A_WIG = vex128 | vex66 | vex0F3A | vexWIG - VEX_128_F2_0F_WIG = vex128 | vexF2 | vex0F | vexWIG - VEX_128_F3_0F_WIG = vex128 | vexF3 | vex0F | vexWIG - VEX_256_66_0F_WIG = vex256 | vex66 | vex0F | vexWIG - VEX_256_66_0F38_W0 = vex256 | vex66 | vex0F38 | vexW0 - VEX_256_66_0F38_W1 = vex256 | vex66 | vex0F38 | vexW1 - VEX_256_66_0F38_WIG = vex256 | vex66 | vex0F38 | vexWIG - VEX_256_66_0F3A_W0 = vex256 | vex66 | vex0F3A | vexW0 - VEX_256_66_0F3A_W1 = vex256 | vex66 | vex0F3A | vexW1 - VEX_256_66_0F3A_WIG = vex256 | vex66 | vex0F3A | vexWIG - VEX_256_F2_0F_WIG = vex256 | vexF2 | vex0F | vexWIG - VEX_256_F3_0F_WIG = vex256 | vexF3 | vex0F | vexWIG - VEX_LIG_0F_WIG = vexLIG | vex0F | vexWIG - VEX_LIG_66_0F_WIG = vexLIG | vex66 | vex0F | vexWIG - VEX_LIG_66_0F38_W0 = vexLIG | vex66 | vex0F38 | vexW0 - VEX_LIG_66_0F38_W1 = vexLIG | vex66 | vex0F38 | vexW1 - VEX_LIG_66_0F3A_WIG = vexLIG | vex66 | vex0F3A | vexWIG - VEX_LIG_F2_0F_W0 = vexLIG | vexF2 | vex0F | vexW0 - VEX_LIG_F2_0F_W1 = vexLIG | vexF2 | vex0F | vexW1 - VEX_LIG_F2_0F_WIG = vexLIG | vexF2 | vex0F | vexWIG - VEX_LIG_F3_0F_W0 = vexLIG | vexF3 | vex0F | vexW0 - VEX_LIG_F3_0F_W1 = vexLIG | vexF3 | vex0F | vexW1 - VEX_LIG_F3_0F_WIG = vexLIG | vexF3 | vex0F | vexWIG - VEX_LZ_0F_WIG = vexLZ | vex0F | vexWIG - VEX_LZ_0F38_W0 = vexLZ | vex0F38 | vexW0 - VEX_LZ_0F38_W1 = vexLZ | vex0F38 | vexW1 - VEX_LZ_66_0F38_W0 = vexLZ | vex66 | vex0F38 | vexW0 - VEX_LZ_66_0F38_W1 = vexLZ | vex66 | vex0F38 | vexW1 - VEX_LZ_F2_0F38_W0 = vexLZ | vexF2 | vex0F38 | vexW0 - VEX_LZ_F2_0F38_W1 = vexLZ | vexF2 | vex0F38 | vexW1 - VEX_LZ_F2_0F3A_W0 = vexLZ | vexF2 | vex0F3A | vexW0 - VEX_LZ_F2_0F3A_W1 = vexLZ | vexF2 | vex0F3A | vexW1 - VEX_LZ_F3_0F38_W0 = vexLZ | vexF3 | vex0F38 | vexW0 - VEX_LZ_F3_0F38_W1 = vexLZ | vexF3 | vex0F38 | vexW1 + VEX_DDS_LIG_66_0F38_W1 = vexDDS | vexLIG | vex66 | vex0F38 | vexW1 + VEX_NDD_128_66_0F_WIG = vexNDD | vex128 | vex66 | vex0F | vexWIG + VEX_NDD_256_66_0F_WIG = vexNDD | vex256 | vex66 | vex0F | vexWIG + VEX_NDD_LZ_F2_0F38_W0 = vexNDD | vexLZ | vexF2 | vex0F38 | vexW0 + VEX_NDD_LZ_F2_0F38_W1 = vexNDD | vexLZ | vexF2 | vex0F38 | vexW1 + VEX_NDS_128_66_0F_WIG = vexNDS | vex128 | vex66 | vex0F | vexWIG + VEX_NDS_128_66_0F38_WIG = vexNDS | vex128 | vex66 | vex0F38 | vexWIG + VEX_NDS_128_F2_0F_WIG = vexNDS | vex128 | vexF2 | vex0F | vexWIG + VEX_NDS_256_66_0F_WIG = vexNDS | vex256 | vex66 | vex0F | vexWIG + VEX_NDS_256_66_0F38_WIG = vexNDS | vex256 | vex66 | vex0F38 | vexWIG + VEX_NDS_256_66_0F3A_W0 = vexNDS | vex256 | vex66 | vex0F3A | vexW0 + VEX_NDS_256_66_0F3A_WIG = vexNDS | vex256 | vex66 | vex0F3A | vexWIG + VEX_NDS_LZ_0F38_W0 = vexNDS | vexLZ | vex0F38 | vexW0 + VEX_NDS_LZ_0F38_W1 = vexNDS | vexLZ | vex0F38 | vexW1 + VEX_NDS_LZ_66_0F38_W0 = vexNDS | vexLZ | vex66 | vex0F38 | vexW0 + VEX_NDS_LZ_66_0F38_W1 = vexNDS | vexLZ | vex66 | vex0F38 | vexW1 + VEX_NDS_LZ_F2_0F38_W0 = vexNDS | vexLZ | vexF2 | vex0F38 | vexW0 + VEX_NDS_LZ_F2_0F38_W1 = vexNDS | vexLZ | vexF2 | vex0F38 | vexW1 + VEX_NDS_LZ_F3_0F38_W0 = vexNDS | vexLZ | vexF3 | vex0F38 | vexW0 + VEX_NDS_LZ_F3_0F38_W1 = vexNDS | vexLZ | vexF3 | vex0F38 | vexW1 + VEX_NOVSR_128_66_0F_WIG = vexNOVSR | vex128 | vex66 | vex0F | vexWIG + VEX_NOVSR_128_66_0F38_W0 = vexNOVSR | vex128 | vex66 | vex0F38 | vexW0 + VEX_NOVSR_128_66_0F38_WIG = vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG + VEX_NOVSR_128_F2_0F_WIG = vexNOVSR | vex128 | vexF2 | vex0F | vexWIG + VEX_NOVSR_128_F3_0F_WIG = vexNOVSR | vex128 | vexF3 | vex0F | vexWIG + VEX_NOVSR_256_66_0F_WIG = vexNOVSR | vex256 | vex66 | vex0F | vexWIG + VEX_NOVSR_256_66_0F38_W0 = vexNOVSR | vex256 | vex66 | vex0F38 | vexW0 + VEX_NOVSR_256_66_0F38_WIG = vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG + VEX_NOVSR_256_F2_0F_WIG = vexNOVSR | vex256 | vexF2 | vex0F | vexWIG + VEX_NOVSR_256_F3_0F_WIG = vexNOVSR | vex256 | vexF3 | vex0F | vexWIG + VEX_NOVSR_LZ_F2_0F3A_W0 = vexNOVSR | vexLZ | vexF2 | vex0F3A | vexW0 + VEX_NOVSR_LZ_F2_0F3A_W1 = vexNOVSR | vexLZ | vexF2 | vex0F3A | vexW1 ) var ycover [Ymax * Ymax]uint8 @@ -323,463 +320,477 @@ var reg [MAXREG]int var regrex [MAXREG + 1]int var ynone = []ytab{ - {Ynone, Ynone, Ynone, Zlit, 1}, + {Zlit, 1, argList{}}, } var ytext = []ytab{ - {Ymb, Ynone, Ytextsize, Zpseudo, 0}, - {Ymb, Yi32, Ytextsize, Zpseudo, 1}, + {Zpseudo, 0, argList{Ymb, Ytextsize}}, + {Zpseudo, 1, argList{Ymb, Yi32, Ytextsize}}, } var ynop = []ytab{ - {Ynone, Ynone, Ynone, Zpseudo, 0}, - {Ynone, Ynone, Yiauto, Zpseudo, 0}, - {Ynone, Ynone, Yml, Zpseudo, 0}, - {Ynone, Ynone, Yrf, Zpseudo, 0}, - {Ynone, Ynone, Yxr, Zpseudo, 0}, - {Yiauto, Ynone, Ynone, Zpseudo, 0}, - {Yml, Ynone, Ynone, Zpseudo, 0}, - {Yrf, Ynone, Ynone, Zpseudo, 0}, - {Yxr, Ynone, Ynone, Zpseudo, 1}, + {Zpseudo, 0, argList{}}, + {Zpseudo, 0, argList{Yiauto}}, + {Zpseudo, 0, argList{Yml}}, + {Zpseudo, 0, argList{Yrf}}, + {Zpseudo, 0, argList{Yxr}}, + {Zpseudo, 0, argList{Yiauto}}, + {Zpseudo, 0, argList{Yml}}, + {Zpseudo, 0, argList{Yrf}}, + {Zpseudo, 1, argList{Yxr}}, } var yfuncdata = []ytab{ - {Yi32, Ynone, Ym, Zpseudo, 0}, + {Zpseudo, 0, argList{Yi32, Ym}}, } var ypcdata = []ytab{ - {Yi32, Ynone, Yi32, Zpseudo, 0}, + {Zpseudo, 0, argList{Yi32, Yi32}}, } var yxorb = []ytab{ - {Yi32, Ynone, Yal, Zib_, 1}, - {Yi32, Ynone, Ymb, Zibo_m, 2}, - {Yrb, Ynone, Ymb, Zr_m, 1}, - {Ymb, Ynone, Yrb, Zm_r, 1}, + {Zib_, 1, argList{Yi32, Yal}}, + {Zibo_m, 2, argList{Yi32, Ymb}}, + {Zr_m, 1, argList{Yrb, Ymb}}, + {Zm_r, 1, argList{Ymb, Yrb}}, } var yaddl = []ytab{ - {Yi8, Ynone, Yml, Zibo_m, 2}, - {Yi32, Ynone, Yax, Zil_, 1}, - {Yi32, Ynone, Yml, Zilo_m, 2}, - {Yrl, Ynone, Yml, Zr_m, 1}, - {Yml, Ynone, Yrl, Zm_r, 1}, + {Zibo_m, 2, argList{Yi8, Yml}}, + {Zil_, 1, argList{Yi32, Yax}}, + {Zilo_m, 2, argList{Yi32, Yml}}, + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, } var yincl = []ytab{ - {Ynone, Ynone, Yrl, Z_rp, 1}, - {Ynone, Ynone, Yml, Zo_m, 2}, + {Z_rp, 1, argList{Yrl}}, + {Zo_m, 2, argList{Yml}}, } var yincq = []ytab{ - {Ynone, Ynone, Yml, Zo_m, 2}, + {Zo_m, 2, argList{Yml}}, } var ycmpb = []ytab{ - {Yal, Ynone, Yi32, Z_ib, 1}, - {Ymb, Ynone, Yi32, Zm_ibo, 2}, - {Ymb, Ynone, Yrb, Zm_r, 1}, - {Yrb, Ynone, Ymb, Zr_m, 1}, + {Z_ib, 1, argList{Yal, Yi32}}, + {Zm_ibo, 2, argList{Ymb, Yi32}}, + {Zm_r, 1, argList{Ymb, Yrb}}, + {Zr_m, 1, argList{Yrb, Ymb}}, } var ycmpl = []ytab{ - {Yml, Ynone, Yi8, Zm_ibo, 2}, - {Yax, Ynone, Yi32, Z_il, 1}, - {Yml, Ynone, Yi32, Zm_ilo, 2}, - {Yml, Ynone, Yrl, Zm_r, 1}, - {Yrl, Ynone, Yml, Zr_m, 1}, + {Zm_ibo, 2, argList{Yml, Yi8}}, + {Z_il, 1, argList{Yax, Yi32}}, + {Zm_ilo, 2, argList{Yml, Yi32}}, + {Zm_r, 1, argList{Yml, Yrl}}, + {Zr_m, 1, argList{Yrl, Yml}}, } var yshb = []ytab{ - {Yi1, Ynone, Ymb, Zo_m, 2}, - {Yi32, Ynone, Ymb, Zibo_m, 2}, - {Ycx, Ynone, Ymb, Zo_m, 2}, + {Zo_m, 2, argList{Yi1, Ymb}}, + {Zibo_m, 2, argList{Yu8, Ymb}}, + {Zo_m, 2, argList{Ycx, Ymb}}, } var yshl = []ytab{ - {Yi1, Ynone, Yml, Zo_m, 2}, - {Yi32, Ynone, Yml, Zibo_m, 2}, - {Ycl, Ynone, Yml, Zo_m, 2}, - {Ycx, Ynone, Yml, Zo_m, 2}, + {Zo_m, 2, argList{Yi1, Yml}}, + {Zibo_m, 2, argList{Yu8, Yml}}, + {Zo_m, 2, argList{Ycl, Yml}}, + {Zo_m, 2, argList{Ycx, Yml}}, } var ytestl = []ytab{ - {Yi32, Ynone, Yax, Zil_, 1}, - {Yi32, Ynone, Yml, Zilo_m, 2}, - {Yrl, Ynone, Yml, Zr_m, 1}, - {Yml, Ynone, Yrl, Zm_r, 1}, + {Zil_, 1, argList{Yi32, Yax}}, + {Zilo_m, 2, argList{Yi32, Yml}}, + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, } var ymovb = []ytab{ - {Yrb, Ynone, Ymb, Zr_m, 1}, - {Ymb, Ynone, Yrb, Zm_r, 1}, - {Yi32, Ynone, Yrb, Zib_rp, 1}, - {Yi32, Ynone, Ymb, Zibo_m, 2}, + {Zr_m, 1, argList{Yrb, Ymb}}, + {Zm_r, 1, argList{Ymb, Yrb}}, + {Zib_rp, 1, argList{Yi32, Yrb}}, + {Zibo_m, 2, argList{Yi32, Ymb}}, } var ybtl = []ytab{ - {Yi8, Ynone, Yml, Zibo_m, 2}, - {Yrl, Ynone, Yml, Zr_m, 1}, + {Zibo_m, 2, argList{Yi8, Yml}}, + {Zr_m, 1, argList{Yrl, Yml}}, } var ymovw = []ytab{ - {Yrl, Ynone, Yml, Zr_m, 1}, - {Yml, Ynone, Yrl, Zm_r, 1}, - {Yi0, Ynone, Yrl, Zclr, 1}, - {Yi32, Ynone, Yrl, Zil_rp, 1}, - {Yi32, Ynone, Yml, Zilo_m, 2}, - {Yiauto, Ynone, Yrl, Zaut_r, 2}, + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, + {Zil_rp, 1, argList{Yi32, Yrl}}, + {Zilo_m, 2, argList{Yi32, Yml}}, + {Zaut_r, 2, argList{Yiauto, Yrl}}, } var ymovl = []ytab{ - {Yrl, Ynone, Yml, Zr_m, 1}, - {Yml, Ynone, Yrl, Zm_r, 1}, - {Yi0, Ynone, Yrl, Zclr, 1}, - {Yi32, Ynone, Yrl, Zil_rp, 1}, - {Yi32, Ynone, Yml, Zilo_m, 2}, - {Yml, Ynone, Ymr, Zm_r_xm, 1}, // MMX MOVD - {Ymr, Ynone, Yml, Zr_m_xm, 1}, // MMX MOVD - {Yml, Ynone, Yxr, Zm_r_xm, 2}, // XMM MOVD (32 bit) - {Yxr, Ynone, Yml, Zr_m_xm, 2}, // XMM MOVD (32 bit) - {Yiauto, Ynone, Yrl, Zaut_r, 2}, + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, + {Zil_rp, 1, argList{Yi32, Yrl}}, + {Zilo_m, 2, argList{Yi32, Yml}}, + {Zm_r_xm, 1, argList{Yml, Ymr}}, // MMX MOVD + {Zr_m_xm, 1, argList{Ymr, Yml}}, // MMX MOVD + {Zm_r_xm, 2, argList{Yml, Yxr}}, // XMM MOVD (32 bit) + {Zr_m_xm, 2, argList{Yxr, Yml}}, // XMM MOVD (32 bit) + {Zaut_r, 2, argList{Yiauto, Yrl}}, } var yret = []ytab{ - {Ynone, Ynone, Ynone, Zo_iw, 1}, - {Yi32, Ynone, Ynone, Zo_iw, 1}, + {Zo_iw, 1, argList{}}, + {Zo_iw, 1, argList{Yi32}}, } var ymovq = []ytab{ // valid in 32-bit mode - {Ym, Ynone, Ymr, Zm_r_xm_nr, 1}, // 0x6f MMX MOVQ (shorter encoding) - {Ymr, Ynone, Ym, Zr_m_xm_nr, 1}, // 0x7f MMX MOVQ - {Yxr, Ynone, Ymr, Zm_r_xm_nr, 2}, // Pf2, 0xd6 MOVDQ2Q - {Yxm, Ynone, Yxr, Zm_r_xm_nr, 2}, // Pf3, 0x7e MOVQ xmm1/m64 -> xmm2 - {Yxr, Ynone, Yxm, Zr_m_xm_nr, 2}, // Pe, 0xd6 MOVQ xmm1 -> xmm2/m64 + {Zm_r_xm_nr, 1, argList{Ym, Ymr}}, // 0x6f MMX MOVQ (shorter encoding) + {Zr_m_xm_nr, 1, argList{Ymr, Ym}}, // 0x7f MMX MOVQ + {Zm_r_xm_nr, 2, argList{Yxr, Ymr}}, // Pf2, 0xd6 MOVDQ2Q + {Zm_r_xm_nr, 2, argList{Yxm, Yxr}}, // Pf3, 0x7e MOVQ xmm1/m64 -> xmm2 + {Zr_m_xm_nr, 2, argList{Yxr, Yxm}}, // Pe, 0xd6 MOVQ xmm1 -> xmm2/m64 // valid only in 64-bit mode, usually with 64-bit prefix - {Yrl, Ynone, Yml, Zr_m, 1}, // 0x89 - {Yml, Ynone, Yrl, Zm_r, 1}, // 0x8b - {Yi0, Ynone, Yrl, Zclr, 1}, // 0x31 - {Ys32, Ynone, Yrl, Zilo_m, 2}, // 32 bit signed 0xc7,(0) - {Yi64, Ynone, Yrl, Ziq_rp, 1}, // 0xb8 -- 32/64 bit immediate - {Yi32, Ynone, Yml, Zilo_m, 2}, // 0xc7,(0) - {Ymm, Ynone, Ymr, Zm_r_xm, 1}, // 0x6e MMX MOVD - {Ymr, Ynone, Ymm, Zr_m_xm, 1}, // 0x7e MMX MOVD - {Yml, Ynone, Yxr, Zm_r_xm, 2}, // Pe, 0x6e MOVD xmm load - {Yxr, Ynone, Yml, Zr_m_xm, 2}, // Pe, 0x7e MOVD xmm store - {Yiauto, Ynone, Yrl, Zaut_r, 1}, // 0 built-in LEAQ + {Zr_m, 1, argList{Yrl, Yml}}, // 0x89 + {Zm_r, 1, argList{Yml, Yrl}}, // 0x8b + {Zilo_m, 2, argList{Ys32, Yrl}}, // 32 bit signed 0xc7,(0) + {Ziq_rp, 1, argList{Yi64, Yrl}}, // 0xb8 -- 32/64 bit immediate + {Zilo_m, 2, argList{Yi32, Yml}}, // 0xc7,(0) + {Zm_r_xm, 1, argList{Ymm, Ymr}}, // 0x6e MMX MOVD + {Zr_m_xm, 1, argList{Ymr, Ymm}}, // 0x7e MMX MOVD + {Zm_r_xm, 2, argList{Yml, Yxr}}, // Pe, 0x6e MOVD xmm load + {Zr_m_xm, 2, argList{Yxr, Yml}}, // Pe, 0x7e MOVD xmm store + {Zaut_r, 1, argList{Yiauto, Yrl}}, // 0 built-in LEAQ } var ym_rl = []ytab{ - {Ym, Ynone, Yrl, Zm_r, 1}, + {Zm_r, 1, argList{Ym, Yrl}}, } var yrl_m = []ytab{ - {Yrl, Ynone, Ym, Zr_m, 1}, + {Zr_m, 1, argList{Yrl, Ym}}, } var ymb_rl = []ytab{ - {Ymb, Ynone, Yrl, Zmb_r, 1}, + {Zmb_r, 1, argList{Ymb, Yrl}}, } var yml_rl = []ytab{ - {Yml, Ynone, Yrl, Zm_r, 1}, + {Zm_r, 1, argList{Yml, Yrl}}, } var yrl_ml = []ytab{ - {Yrl, Ynone, Yml, Zr_m, 1}, + {Zr_m, 1, argList{Yrl, Yml}}, } var yml_mb = []ytab{ - {Yrb, Ynone, Ymb, Zr_m, 1}, - {Ymb, Ynone, Yrb, Zm_r, 1}, + {Zr_m, 1, argList{Yrb, Ymb}}, + {Zm_r, 1, argList{Ymb, Yrb}}, } var yrb_mb = []ytab{ - {Yrb, Ynone, Ymb, Zr_m, 1}, + {Zr_m, 1, argList{Yrb, Ymb}}, } var yxchg = []ytab{ - {Yax, Ynone, Yrl, Z_rp, 1}, - {Yrl, Ynone, Yax, Zrp_, 1}, - {Yrl, Ynone, Yml, Zr_m, 1}, - {Yml, Ynone, Yrl, Zm_r, 1}, + {Z_rp, 1, argList{Yax, Yrl}}, + {Zrp_, 1, argList{Yrl, Yax}}, + {Zr_m, 1, argList{Yrl, Yml}}, + {Zm_r, 1, argList{Yml, Yrl}}, } var ydivl = []ytab{ - {Yml, Ynone, Ynone, Zm_o, 2}, + {Zm_o, 2, argList{Yml}}, } var ydivb = []ytab{ - {Ymb, Ynone, Ynone, Zm_o, 2}, + {Zm_o, 2, argList{Ymb}}, } var yimul = []ytab{ - {Yml, Ynone, Ynone, Zm_o, 2}, - {Yi8, Ynone, Yrl, Zib_rr, 1}, - {Yi32, Ynone, Yrl, Zil_rr, 1}, - {Yml, Ynone, Yrl, Zm_r, 2}, + {Zm_o, 2, argList{Yml}}, + {Zib_rr, 1, argList{Yi8, Yrl}}, + {Zil_rr, 1, argList{Yi32, Yrl}}, + {Zm_r, 2, argList{Yml, Yrl}}, } var yimul3 = []ytab{ - {Yi8, Yml, Yrl, Zibm_r, 2}, + {Zibm_r, 2, argList{Yi8, Yml, Yrl}}, } var ybyte = []ytab{ - {Yi64, Ynone, Ynone, Zbyte, 1}, + {Zbyte, 1, argList{Yi64}}, } var yin = []ytab{ - {Yi32, Ynone, Ynone, Zib_, 1}, - {Ynone, Ynone, Ynone, Zlit, 1}, + {Zib_, 1, argList{Yi32}}, + {Zlit, 1, argList{}}, } var yint = []ytab{ - {Yi32, Ynone, Ynone, Zib_, 1}, + {Zib_, 1, argList{Yi32}}, } var ypushl = []ytab{ - {Yrl, Ynone, Ynone, Zrp_, 1}, - {Ym, Ynone, Ynone, Zm_o, 2}, - {Yi8, Ynone, Ynone, Zib_, 1}, - {Yi32, Ynone, Ynone, Zil_, 1}, + {Zrp_, 1, argList{Yrl}}, + {Zm_o, 2, argList{Ym}}, + {Zib_, 1, argList{Yi8}}, + {Zil_, 1, argList{Yi32}}, } var ypopl = []ytab{ - {Ynone, Ynone, Yrl, Z_rp, 1}, - {Ynone, Ynone, Ym, Zo_m, 2}, + {Z_rp, 1, argList{Yrl}}, + {Zo_m, 2, argList{Ym}}, +} + +var yclflush = []ytab{ + {Zo_m, 2, argList{Ym}}, } var ybswap = []ytab{ - {Ynone, Ynone, Yrl, Z_rp, 2}, + {Z_rp, 2, argList{Yrl}}, } var yscond = []ytab{ - {Ynone, Ynone, Ymb, Zo_m, 2}, + {Zo_m, 2, argList{Ymb}}, } var yjcond = []ytab{ - {Ynone, Ynone, Ybr, Zbr, 0}, - {Yi0, Ynone, Ybr, Zbr, 0}, - {Yi1, Ynone, Ybr, Zbr, 1}, + {Zbr, 0, argList{Ybr}}, + {Zbr, 0, argList{Yi0, Ybr}}, + {Zbr, 1, argList{Yi1, Ybr}}, } var yloop = []ytab{ - {Ynone, Ynone, Ybr, Zloop, 1}, + {Zloop, 1, argList{Ybr}}, } var ycall = []ytab{ - {Ynone, Ynone, Yml, Zcallindreg, 0}, - {Yrx, Ynone, Yrx, Zcallindreg, 2}, - {Ynone, Ynone, Yindir, Zcallind, 2}, - {Ynone, Ynone, Ybr, Zcall, 0}, - {Ynone, Ynone, Yi32, Zcallcon, 1}, + {Zcallindreg, 0, argList{Yml}}, + {Zcallindreg, 2, argList{Yrx, Yrx}}, + {Zcallind, 2, argList{Yindir}}, + {Zcall, 0, argList{Ybr}}, + {Zcallcon, 1, argList{Yi32}}, } var yduff = []ytab{ - {Ynone, Ynone, Yi32, Zcallduff, 1}, + {Zcallduff, 1, argList{Yi32}}, } var yjmp = []ytab{ - {Ynone, Ynone, Yml, Zo_m64, 2}, - {Ynone, Ynone, Ybr, Zjmp, 0}, - {Ynone, Ynone, Yi32, Zjmpcon, 1}, + {Zo_m64, 2, argList{Yml}}, + {Zjmp, 0, argList{Ybr}}, + {Zjmpcon, 1, argList{Yi32}}, } var yfmvd = []ytab{ - {Ym, Ynone, Yf0, Zm_o, 2}, - {Yf0, Ynone, Ym, Zo_m, 2}, - {Yrf, Ynone, Yf0, Zm_o, 2}, - {Yf0, Ynone, Yrf, Zo_m, 2}, + {Zm_o, 2, argList{Ym, Yf0}}, + {Zo_m, 2, argList{Yf0, Ym}}, + {Zm_o, 2, argList{Yrf, Yf0}}, + {Zo_m, 2, argList{Yf0, Yrf}}, } var yfmvdp = []ytab{ - {Yf0, Ynone, Ym, Zo_m, 2}, - {Yf0, Ynone, Yrf, Zo_m, 2}, + {Zo_m, 2, argList{Yf0, Ym}}, + {Zo_m, 2, argList{Yf0, Yrf}}, } var yfmvf = []ytab{ - {Ym, Ynone, Yf0, Zm_o, 2}, - {Yf0, Ynone, Ym, Zo_m, 2}, + {Zm_o, 2, argList{Ym, Yf0}}, + {Zo_m, 2, argList{Yf0, Ym}}, } var yfmvx = []ytab{ - {Ym, Ynone, Yf0, Zm_o, 2}, + {Zm_o, 2, argList{Ym, Yf0}}, } var yfmvp = []ytab{ - {Yf0, Ynone, Ym, Zo_m, 2}, + {Zo_m, 2, argList{Yf0, Ym}}, } var yfcmv = []ytab{ - {Yrf, Ynone, Yf0, Zm_o, 2}, + {Zm_o, 2, argList{Yrf, Yf0}}, } var yfadd = []ytab{ - {Ym, Ynone, Yf0, Zm_o, 2}, - {Yrf, Ynone, Yf0, Zm_o, 2}, - {Yf0, Ynone, Yrf, Zo_m, 2}, + {Zm_o, 2, argList{Ym, Yf0}}, + {Zm_o, 2, argList{Yrf, Yf0}}, + {Zo_m, 2, argList{Yf0, Yrf}}, } var yfxch = []ytab{ - {Yf0, Ynone, Yrf, Zo_m, 2}, - {Yrf, Ynone, Yf0, Zm_o, 2}, + {Zo_m, 2, argList{Yf0, Yrf}}, + {Zm_o, 2, argList{Yrf, Yf0}}, } var ycompp = []ytab{ - {Yf0, Ynone, Yrf, Zo_m, 2}, /* botch is really f0,f1 */ + {Zo_m, 2, argList{Yf0, Yrf}}, /* botch is really f0,f1 */ } var ystsw = []ytab{ - {Ynone, Ynone, Ym, Zo_m, 2}, - {Ynone, Ynone, Yax, Zlit, 1}, + {Zo_m, 2, argList{Ym}}, + {Zlit, 1, argList{Yax}}, } -var ysvrs = []ytab{ - {Ynone, Ynone, Ym, Zo_m, 2}, - {Ym, Ynone, Ynone, Zm_o, 2}, +var ysvrs_mo = []ytab{ + {Zm_o, 2, argList{Ym}}, +} + +// unaryDst version of "ysvrs_mo". +var ysvrs_om = []ytab{ + {Zo_m, 2, argList{Ym}}, } var ymm = []ytab{ - {Ymm, Ynone, Ymr, Zm_r_xm, 1}, - {Yxm, Ynone, Yxr, Zm_r_xm, 2}, + {Zm_r_xm, 1, argList{Ymm, Ymr}}, + {Zm_r_xm, 2, argList{Yxm, Yxr}}, } var yxm = []ytab{ - {Yxm, Ynone, Yxr, Zm_r_xm, 1}, + {Zm_r_xm, 1, argList{Yxm, Yxr}}, } var yxm_q4 = []ytab{ - {Yxm, Ynone, Yxr, Zm_r, 1}, + {Zm_r, 1, argList{Yxm, Yxr}}, } var yxcvm1 = []ytab{ - {Yxm, Ynone, Yxr, Zm_r_xm, 2}, - {Yxm, Ynone, Ymr, Zm_r_xm, 2}, + {Zm_r_xm, 2, argList{Yxm, Yxr}}, + {Zm_r_xm, 2, argList{Yxm, Ymr}}, } var yxcvm2 = []ytab{ - {Yxm, Ynone, Yxr, Zm_r_xm, 2}, - {Ymm, Ynone, Yxr, Zm_r_xm, 2}, + {Zm_r_xm, 2, argList{Yxm, Yxr}}, + {Zm_r_xm, 2, argList{Ymm, Yxr}}, } var yxr = []ytab{ - {Yxr, Ynone, Yxr, Zm_r_xm, 1}, + {Zm_r_xm, 1, argList{Yxr, Yxr}}, } var yxr_ml = []ytab{ - {Yxr, Ynone, Yml, Zr_m_xm, 1}, + {Zr_m_xm, 1, argList{Yxr, Yml}}, } var ymr = []ytab{ - {Ymr, Ynone, Ymr, Zm_r, 1}, + {Zm_r, 1, argList{Ymr, Ymr}}, } var ymr_ml = []ytab{ - {Ymr, Ynone, Yml, Zr_m_xm, 1}, + {Zr_m_xm, 1, argList{Ymr, Yml}}, } var yxcmpi = []ytab{ - {Yxm, Yxr, Yi8, Zm_r_i_xm, 2}, + {Zm_r_i_xm, 2, argList{Yxm, Yxr, Yi8}}, } var yxmov = []ytab{ - {Yxm, Ynone, Yxr, Zm_r_xm, 1}, - {Yxr, Ynone, Yxm, Zr_m_xm, 1}, + {Zm_r_xm, 1, argList{Yxm, Yxr}}, + {Zr_m_xm, 1, argList{Yxr, Yxm}}, } var yxcvfl = []ytab{ - {Yxm, Ynone, Yrl, Zm_r_xm, 1}, + {Zm_r_xm, 1, argList{Yxm, Yrl}}, } var yxcvlf = []ytab{ - {Yml, Ynone, Yxr, Zm_r_xm, 1}, + {Zm_r_xm, 1, argList{Yml, Yxr}}, } var yxcvfq = []ytab{ - {Yxm, Ynone, Yrl, Zm_r_xm, 2}, + {Zm_r_xm, 2, argList{Yxm, Yrl}}, } var yxcvqf = []ytab{ - {Yml, Ynone, Yxr, Zm_r_xm, 2}, + {Zm_r_xm, 2, argList{Yml, Yxr}}, } var yps = []ytab{ - {Ymm, Ynone, Ymr, Zm_r_xm, 1}, - {Yi8, Ynone, Ymr, Zibo_m_xm, 2}, - {Yxm, Ynone, Yxr, Zm_r_xm, 2}, - {Yi8, Ynone, Yxr, Zibo_m_xm, 3}, + {Zm_r_xm, 1, argList{Ymm, Ymr}}, + {Zibo_m_xm, 2, argList{Yi8, Ymr}}, + {Zm_r_xm, 2, argList{Yxm, Yxr}}, + {Zibo_m_xm, 3, argList{Yi8, Yxr}}, } var yxrrl = []ytab{ - {Yxr, Ynone, Yrl, Zm_r, 1}, + {Zm_r, 1, argList{Yxr, Yrl}}, } var ymrxr = []ytab{ - {Ymr, Ynone, Yxr, Zm_r, 1}, - {Yxm, Ynone, Yxr, Zm_r_xm, 1}, + {Zm_r, 1, argList{Ymr, Yxr}}, + {Zm_r_xm, 1, argList{Yxm, Yxr}}, } var ymshuf = []ytab{ - {Yi8, Ymm, Ymr, Zibm_r, 2}, + {Zibm_r, 2, argList{Yi8, Ymm, Ymr}}, } var ymshufb = []ytab{ - {Yxm, Ynone, Yxr, Zm2_r, 2}, + {Zm2_r, 2, argList{Yxm, Yxr}}, } +// It should never have more than 1 entry, +// because some optab entries you opcode secuences that +// are longer than 2 bytes (zoffset=2 here), +// ROUNDPD and ROUNDPS and recently added BLENDPD, +// to name a few. var yxshuf = []ytab{ - {Yu8, Yxm, Yxr, Zibm_r, 2}, + {Zibm_r, 2, argList{Yu8, Yxm, Yxr}}, } var yextrw = []ytab{ - {Yu8, Yxr, Yrl, Zibm_r, 2}, + {Zibm_r, 2, argList{Yu8, Yxr, Yrl}}, } var yextr = []ytab{ - {Yu8, Yxr, Ymm, Zibr_m, 3}, + {Zibr_m, 3, argList{Yu8, Yxr, Ymm}}, } var yinsrw = []ytab{ - {Yu8, Yml, Yxr, Zibm_r, 2}, + {Zibm_r, 2, argList{Yu8, Yml, Yxr}}, } var yinsr = []ytab{ - {Yu8, Ymm, Yxr, Zibm_r, 3}, + {Zibm_r, 3, argList{Yu8, Ymm, Yxr}}, } var ypsdq = []ytab{ - {Yi8, Ynone, Yxr, Zibo_m, 2}, + {Zibo_m, 2, argList{Yi8, Yxr}}, } var ymskb = []ytab{ - {Yxr, Ynone, Yrl, Zm_r_xm, 2}, - {Ymr, Ynone, Yrl, Zm_r_xm, 1}, + {Zm_r_xm, 2, argList{Yxr, Yrl}}, + {Zm_r_xm, 1, argList{Ymr, Yrl}}, } var ycrc32l = []ytab{ - {Yml, Ynone, Yrl, Zlitm_r, 0}, + {Zlitm_r, 0, argList{Yml, Yrl}}, } var yprefetch = []ytab{ - {Ym, Ynone, Ynone, Zm_o, 2}, + {Zm_o, 2, argList{Ym}}, } var yaes = []ytab{ - {Yxm, Ynone, Yxr, Zlitm_r, 2}, + {Zlitm_r, 2, argList{Yxm, Yxr}}, } var yxbegin = []ytab{ - {Ynone, Ynone, Ybr, Zjmp, 1}, + {Zjmp, 1, argList{Ybr}}, } var yxabort = []ytab{ - {Yu8, Ynone, Ynone, Zib_, 1}, + {Zib_, 1, argList{Yu8}}, } var ylddqu = []ytab{ - {Ym, Ynone, Yxr, Zm_r, 1}, + {Zm_r, 1, argList{Ym, Yxr}}, +} + +var ypalignr = []ytab{ + {Zibm_r, 2, argList{Yu8, Yxm, Yxr}}, } // VEX instructions that come in two forms: @@ -796,89 +807,257 @@ var ylddqu = []ytab{ // VPXOR ymm2/m256, ymmV, ymm1 // VEX.NDS.256.66.0F.WIG EF /r // -// The NDS/NDD/DDS part can be dropped, producing this -// Optab entry: +// Produce this Optab entry: // -// {AVPXOR, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xEF, VEX_256_66_0F_WIG, 0xEF}} +// {AVPXOR, yvex_xy3, Pvex, [23]uint8{VEX_NDS_128_66_0F_WIG, 0xEF, VEX_NDS_256_66_0F_WIG, 0xEF}} // var yvex_xy3 = []ytab{ - {Yxm, Yxr, Yxr, Zvex_rm_v_r, 2}, - {Yym, Yyr, Yyr, Zvex_rm_v_r, 2}, + {Zvex_rm_v_r, 2, argList{Yxm, Yxr, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yym, Yyr, Yyr}}, +} + +var yvex_x3 = []ytab{ + {Zvex_rm_v_r, 2, argList{Yxm, Yxr, Yxr}}, } var yvex_ri3 = []ytab{ - {Yi8, Ymb, Yrl, Zvex_i_rm_r, 2}, + {Zvex_i_rm_r, 2, argList{Yi8, Ymb, Yrl}}, } var yvex_xyi3 = []ytab{ - {Yu8, Yxm, Yxr, Zvex_i_rm_r, 2}, - {Yu8, Yym, Yyr, Zvex_i_rm_r, 2}, - {Yi8, Yxm, Yxr, Zvex_i_rm_r, 2}, - {Yi8, Yym, Yyr, Zvex_i_rm_r, 2}, + {Zvex_i_rm_r, 2, argList{Yu8, Yxm, Yxr}}, + {Zvex_i_rm_r, 2, argList{Yu8, Yym, Yyr}}, + {Zvex_i_rm_r, 2, argList{Yi8, Yxm, Yxr}}, + {Zvex_i_rm_r, 2, argList{Yi8, Yym, Yyr}}, } -var yvex_yyi4 = []ytab{ //TODO don't hide 4 op, some version have xmm version - {Yym, Yyr, Yyr, Zvex_i_rm_v_r, 2}, +var yvex_yyi4 = []ytab{ + {Zvex_i_rm_v_r, 2, argList{Yu8, Yym, Yyr, Yyr}}, } var yvex_xyi4 = []ytab{ - {Yxm, Yyr, Yyr, Zvex_i_rm_v_r, 2}, + {Zvex_i_rm_v_r, 2, argList{Yu8, Yxm, Yyr, Yyr}}, } var yvex_shift = []ytab{ - {Yi8, Yxr, Yxr, Zvex_i_r_v, 3}, - {Yi8, Yyr, Yyr, Zvex_i_r_v, 3}, - {Yxm, Yxr, Yxr, Zvex_rm_v_r, 2}, - {Yxm, Yyr, Yyr, Zvex_rm_v_r, 2}, + {Zvex_i_r_v, 3, argList{Yi8, Yxr, Yxr}}, + {Zvex_i_r_v, 3, argList{Yi8, Yyr, Yyr}}, + {Zvex_rm_v_r, 2, argList{Yxm, Yxr, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yxm, Yyr, Yyr}}, } var yvex_shift_dq = []ytab{ - {Yi8, Yxr, Yxr, Zvex_i_r_v, 3}, - {Yi8, Yyr, Yyr, Zvex_i_r_v, 3}, + {Zvex_i_r_v, 3, argList{Yi8, Yxr, Yxr}}, + {Zvex_i_r_v, 3, argList{Yi8, Yyr, Yyr}}, } var yvex_r3 = []ytab{ - {Yml, Yrl, Yrl, Zvex_rm_v_r, 2}, + {Zvex_rm_v_r, 2, argList{Yml, Yrl, Yrl}}, } var yvex_vmr3 = []ytab{ - {Yrl, Yml, Yrl, Zvex_v_rm_r, 2}, + {Zvex_v_rm_r, 2, argList{Yrl, Yml, Yrl}}, } var yvex_xy2 = []ytab{ - {Yxm, Ynone, Yxr, Zvex_rm_v_r, 2}, - {Yym, Ynone, Yyr, Zvex_rm_v_r, 2}, + {Zvex_rm_v_r, 2, argList{Yxm, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yym, Yyr}}, } var yvex_xyr2 = []ytab{ - {Yxr, Ynone, Yrl, Zvex_rm_v_r, 2}, - {Yyr, Ynone, Yrl, Zvex_rm_v_r, 2}, + {Zvex_rm_v_r, 2, argList{Yxr, Yrl}}, + {Zvex_rm_v_r, 2, argList{Yyr, Yrl}}, } var yvex_vmovdqa = []ytab{ - {Yxm, Ynone, Yxr, Zvex_rm_v_r, 2}, - {Yxr, Ynone, Yxm, Zvex_r_v_rm, 2}, - {Yym, Ynone, Yyr, Zvex_rm_v_r, 2}, - {Yyr, Ynone, Yym, Zvex_r_v_rm, 2}, + {Zvex_rm_v_r, 2, argList{Yxm, Yxr}}, + {Zvex_r_v_rm, 2, argList{Yxr, Yxm}}, + {Zvex_rm_v_r, 2, argList{Yym, Yyr}}, + {Zvex_r_v_rm, 2, argList{Yyr, Yym}}, } var yvex_vmovntdq = []ytab{ - {Yxr, Ynone, Ym, Zvex_r_v_rm, 2}, - {Yyr, Ynone, Ym, Zvex_r_v_rm, 2}, + {Zvex_r_v_rm, 2, argList{Yxr, Ym}}, + {Zvex_r_v_rm, 2, argList{Yyr, Ym}}, } var yvex_vpbroadcast = []ytab{ - {Yxm, Ynone, Yxr, Zvex_rm_v_r, 2}, - {Yxm, Ynone, Yyr, Zvex_rm_v_r, 2}, + {Zvex_rm_v_r, 2, argList{Yxm, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yxm, Yyr}}, } var yvex_vpbroadcast_sd = []ytab{ - {Yxm, Ynone, Yyr, Zvex_rm_v_r, 2}, + {Zvex_rm_v_r, 2, argList{Yxm, Yyr}}, +} + +var yvex_vpextrw = []ytab{ + {Zvex_i_rm_r, 2, argList{Yi8, Yxr, Yrl}}, + {Zvex_i_r_rm, 2, argList{Yi8, Yxr, Yml}}, +} + +var yvex_m = []ytab{ + {Zvex_rm_v_ro, 3, argList{Ym}}, +} + +var yvex_xx3 = []ytab{ + {Zvex_rm_v_r, 2, argList{Yxr, Yxr, Yxr}}, +} + +var yvex_yi3 = []ytab{ + {Zvex_i_r_rm, 2, argList{Yi8, Yyr, Yxm}}, +} + +var yvex_mxy = []ytab{ + {Zvex_rm_v_r, 2, argList{Ym, Yxr}}, + {Zvex_rm_v_r, 2, argList{Ym, Yyr}}, +} + +var yvex_yy3 = []ytab{ + {Zvex_rm_v_r, 2, argList{Yym, Yyr, Yyr}}, +} + +var yvex_xi3 = []ytab{ + {Zvex_i_rm_r, 2, argList{Yi8, Yxm, Yxr}}, +} + +var yvex_vpermpd = []ytab{ + {Zvex_i_rm_r, 2, argList{Yi8, Yym, Yyr}}, +} + +var yvex_vpermilp = []ytab{ + {Zvex_i_rm_r, 2, argList{Yi8, Yxm, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yxm, Yxr, Yxr}}, + {Zvex_i_rm_r, 2, argList{Yi8, Yym, Yyr}}, + {Zvex_rm_v_r, 2, argList{Yym, Yyr, Yyr}}, +} + +var yvex_vcvtps2ph = []ytab{ + {Zvex_i_r_rm, 2, argList{Yi8, Yyr, Yxm}}, + {Zvex_i_r_rm, 2, argList{Yi8, Yxr, Yxm}}, +} + +var yvex_vbroadcastf = []ytab{ + {Zvex_rm_v_r, 2, argList{Ym, Yyr}}, +} + +var yvex_vmovd = []ytab{ + {Zvex_r_v_rm, 2, argList{Yxr, Yml}}, + {Zvex_rm_v_r, 2, argList{Yml, Yxr}}, +} + +var yvex_x2 = []ytab{ + {Zvex_rm_v_r, 2, argList{Yxm, Yxr}}, +} + +var yvex_y2 = []ytab{ + {Zvex_rm_v_r, 2, argList{Yym, Yxr}}, +} + +var yvex = []ytab{ + {Zvex, 2, argList{}}, +} + +var yvex_xx2 = []ytab{ + {Zvex_rm_v_r, 2, argList{Yxr, Yxr}}, +} + +var yvex_vpalignr = []ytab{ + {Zvex_i_rm_v_r, 2, argList{Yu8, Yxm, Yxr, Yxr}}, + {Zvex_i_rm_v_r, 2, argList{Yu8, Yym, Yyr, Yyr}}, +} + +var yvex_rxi4 = []ytab{ + {Zvex_i_rm_v_r, 2, argList{Yu8, Yml, Yxr, Yxr}}, +} + +var yvex_xxi4 = []ytab{ + {Zvex_i_rm_v_r, 2, argList{Yu8, Yxm, Yxr, Yxr}}, +} + +var yvex_xy4 = []ytab{ + {Zvex_hr_rm_v_r, 2, argList{Yxr, Yxm, Yxr, Yxr}}, + {Zvex_hr_rm_v_r, 2, argList{Yyr, Yym, Yyr, Yyr}}, +} + +var yvex_vpbroadcast_ss = []ytab{ + {Zvex_rm_v_r, 2, argList{Ym, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yxr, Yxr}}, + {Zvex_rm_v_r, 2, argList{Ym, Yyr}}, + {Zvex_rm_v_r, 2, argList{Yxr, Yyr}}, +} + +var yvex_vblendvpd = []ytab{ + {Zvex_r_v_rm, 2, argList{Yxr, Yxr, Yml}}, + {Zvex_r_v_rm, 2, argList{Yyr, Yyr, Yml}}, + {Zvex_rm_v_r, 2, argList{Ym, Yxr, Yxr}}, + {Zvex_rm_v_r, 2, argList{Ym, Yyr, Yyr}}, +} + +var yvex_vmov = []ytab{ + {Zvex_r_v_rm, 2, argList{Yxr, Ym}}, + {Zvex_rm_v_r, 2, argList{Ym, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yxr, Yxr, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yxr, Yxr, Yxr}}, +} + +var yvex_vps = []ytab{ + {Zvex_rm_v_r, 2, argList{Yxm, Yxr, Yxr}}, + {Zvex_i_r_v, 3, argList{Yi8, Yxr, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yxm, Yyr, Yyr}}, + {Zvex_i_r_v, 3, argList{Yi8, Yyr, Yyr}}, +} + +var yvex_r2 = []ytab{ + {Zvex_rm_r_vo, 3, argList{Yml, Yrl}}, +} + +var yvex_vpextr = []ytab{ + {Zvex_i_r_rm, 2, argList{Yi8, Yxr, Yml}}, +} + +var yvex_rx3 = []ytab{ + {Zvex_rm_v_r, 2, argList{Yml, Yxr, Yxr}}, +} + +var yvex_vcvtsd2si = []ytab{ + {Zvex_rm_v_r, 2, argList{Yxm, Yrl}}, +} + +var yvex_vmovhpd = []ytab{ + {Zvex_r_v_rm, 2, argList{Yxr, Ym}}, + {Zvex_rm_v_r, 2, argList{Ym, Yxr, Yxr}}, +} + +var yvex_vmovq = []ytab{ + {Zvex_r_v_rm, 2, argList{Yxr, Yml}}, + {Zvex_rm_v_r, 2, argList{Ym, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yml, Yxr}}, + {Zvex_rm_v_r, 2, argList{Yxr, Yxr}}, + {Zvex_r_v_rm, 2, argList{Yxr, Yxm}}, +} + +var yvpgatherdq = []ytab{ + {Zvex_v_rm_r, 2, argList{Yxr, Yxvm, Yxr}}, + {Zvex_v_rm_r, 2, argList{Yyr, Yxvm, Yyr}}, +} + +var yvpgatherqq = []ytab{ + {Zvex_v_rm_r, 2, argList{Yxr, Yxvm, Yxr}}, + {Zvex_v_rm_r, 2, argList{Yyr, Yyvm, Yyr}}, +} + +var yvgatherqps = []ytab{ + {Zvex_v_rm_r, 2, argList{Yxr, Yxvm, Yxr}}, + {Zvex_v_rm_r, 2, argList{Yxr, Yyvm, Yxr}}, } var ymmxmm0f38 = []ytab{ - {Ymm, Ynone, Ymr, Zlitm_r, 3}, - {Yxm, Ynone, Yxr, Zlitm_r, 5}, + {Zlitm_r, 3, argList{Ymm, Ymr}}, + {Zlitm_r, 5, argList{Yxm, Yxr}}, +} + +var yextractps = []ytab{ + {Zibr_m, 2, argList{Yu2, Yxr, Yml}}, } /* @@ -949,6 +1128,8 @@ var optab = {AADCL, yaddl, Px, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}}, {AADCQ, yaddl, Pw, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}}, {AADCW, yaddl, Pe, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}}, + {AADCXL, yml_rl, Pq4, [23]uint8{0xf6}}, + {AADCXQ, yml_rl, Pq4w, [23]uint8{0xf6}}, {AADDB, yxorb, Pb, [23]uint8{0x04, 0x80, 00, 0x00, 0x02}}, {AADDL, yaddl, Px, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, {AADDPD, yxm, Pq, [23]uint8{0x58}}, @@ -956,7 +1137,11 @@ var optab = {AADDQ, yaddl, Pw, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, {AADDSD, yxm, Pf2, [23]uint8{0x58}}, {AADDSS, yxm, Pf3, [23]uint8{0x58}}, + {AADDSUBPD, yxm, Pq, [23]uint8{0xd0}}, + {AADDSUBPS, yxm, Pf2, [23]uint8{0xd0}}, {AADDW, yaddl, Pe, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}}, + {AADOXL, yml_rl, Pq5, [23]uint8{0xf6}}, + {AADOXQ, yml_rl, Pq5w, [23]uint8{0xf6}}, {AADJSP, nil, 0, [23]uint8{}}, {AANDB, yxorb, Pb, [23]uint8{0x24, 0x80, 04, 0x20, 0x22}}, {AANDL, yaddl, Px, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}}, @@ -994,6 +1179,7 @@ var optab = {ACDQ, ynone, Px, [23]uint8{0x99}}, {ACLC, ynone, Px, [23]uint8{0xf8}}, {ACLD, ynone, Px, [23]uint8{0xfc}}, + {ACLFLUSH, yclflush, Pm, [23]uint8{0xae, 07}}, {ACLI, ynone, Px, [23]uint8{0xfa}}, {ACLTS, ynone, Pm, [23]uint8{0x06}}, {ACMC, ynone, Px, [23]uint8{0xf5}}, @@ -1098,12 +1284,15 @@ var optab = {ADIVSD, yxm, Pf2, [23]uint8{0x5e}}, {ADIVSS, yxm, Pf3, [23]uint8{0x5e}}, {ADIVW, ydivl, Pe, [23]uint8{0xf7, 06}}, + {ADPPD, yxshuf, Pq, [23]uint8{0x3a, 0x41, 0}}, + {ADPPS, yxshuf, Pq, [23]uint8{0x3a, 0x40, 0}}, {AEMMS, ynone, Pm, [23]uint8{0x77}}, + {AEXTRACTPS, yextractps, Pq, [23]uint8{0x3a, 0x17, 0}}, {AENTER, nil, 0, [23]uint8{}}, /* botch */ - {AFXRSTOR, ysvrs, Pm, [23]uint8{0xae, 01, 0xae, 01}}, - {AFXSAVE, ysvrs, Pm, [23]uint8{0xae, 00, 0xae, 00}}, - {AFXRSTOR64, ysvrs, Pw, [23]uint8{0x0f, 0xae, 01, 0x0f, 0xae, 01}}, - {AFXSAVE64, ysvrs, Pw, [23]uint8{0x0f, 0xae, 00, 0x0f, 0xae, 00}}, + {AFXRSTOR, ysvrs_mo, Pm, [23]uint8{0xae, 01, 0xae, 01}}, + {AFXSAVE, ysvrs_om, Pm, [23]uint8{0xae, 00, 0xae, 00}}, + {AFXRSTOR64, ysvrs_mo, Pw, [23]uint8{0x0f, 0xae, 01, 0x0f, 0xae, 01}}, + {AFXSAVE64, ysvrs_om, Pw, [23]uint8{0x0f, 0xae, 00, 0x0f, 0xae, 00}}, {AHLT, ynone, Px, [23]uint8{0xf4}}, {AIDIVB, ydivb, Pb, [23]uint8{0xf6, 07}}, {AIDIVL, ydivl, Px, [23]uint8{0xf7, 07}}, @@ -1122,6 +1311,7 @@ var optab = {AINL, yin, Px, [23]uint8{0xe5, 0xed}}, {AINSB, ynone, Pb, [23]uint8{0x6c}}, {AINSL, ynone, Px, [23]uint8{0x6d}}, + {AINSERTPS, yxshuf, Pq, [23]uint8{0x3a, 0x21, 0}}, {AINSW, ynone, Pe, [23]uint8{0x6d}}, {AINT, yint, Px, [23]uint8{0xcd}}, {AINTO, ynone, P32, [23]uint8{0xce}}, @@ -1157,7 +1347,7 @@ var optab = {ALARL, yml_rl, Pm, [23]uint8{0x02}}, {ALARW, yml_rl, Pq, [23]uint8{0x02}}, {ALDDQU, ylddqu, Pf2, [23]uint8{0xf0}}, - {ALDMXCSR, ysvrs, Pm, [23]uint8{0xae, 02, 0xae, 02}}, + {ALDMXCSR, ysvrs_mo, Pm, [23]uint8{0xae, 02, 0xae, 02}}, {ALEAL, ym_rl, Px, [23]uint8{0x8d}}, {ALEAQ, ym_rl, Pw, [23]uint8{0x8d}}, {ALEAVEL, ynone, P32, [23]uint8{0xc9}}, @@ -1199,7 +1389,7 @@ var optab = {AMOVHLPS, yxr, Pm, [23]uint8{0x12}}, {AMOVHPD, yxmov, Pe, [23]uint8{0x16, 0x17}}, {AMOVHPS, yxmov, Pm, [23]uint8{0x16, 0x17}}, - {AMOVL, ymovl, Px, [23]uint8{0x89, 0x8b, 0x31, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}}, + {AMOVL, ymovl, Px, [23]uint8{0x89, 0x8b, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}}, {AMOVLHPS, yxr, Pm, [23]uint8{0x16}}, {AMOVLPD, yxmov, Pe, [23]uint8{0x12, 0x13}}, {AMOVLPS, yxmov, Pm, [23]uint8{0x12, 0x13}}, @@ -1208,10 +1398,11 @@ var optab = {AMOVMSKPD, yxrrl, Pq, [23]uint8{0x50}}, {AMOVMSKPS, yxrrl, Pm, [23]uint8{0x50}}, {AMOVNTO, yxr_ml, Pe, [23]uint8{0xe7}}, + {AMOVNTDQA, ylddqu, Pq4, [23]uint8{0x2a}}, {AMOVNTPD, yxr_ml, Pe, [23]uint8{0x2b}}, {AMOVNTPS, yxr_ml, Pm, [23]uint8{0x2b}}, {AMOVNTQ, ymr_ml, Pm, [23]uint8{0xe7}}, - {AMOVQ, ymovq, Pw8, [23]uint8{0x6f, 0x7f, Pf2, 0xd6, Pf3, 0x7e, Pe, 0xd6, 0x89, 0x8b, 0x31, 0xc7, 00, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}}, + {AMOVQ, ymovq, Pw8, [23]uint8{0x6f, 0x7f, Pf2, 0xd6, Pf3, 0x7e, Pe, 0xd6, 0x89, 0x8b, 0xc7, 00, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}}, {AMOVQOZX, ymrxr, Pf3, [23]uint8{0xd6, 0x7e}}, {AMOVSB, ynone, Pb, [23]uint8{0xa4}}, {AMOVSD, yxmov, Pf2, [23]uint8{0x10, 0x11}}, @@ -1221,11 +1412,12 @@ var optab = {AMOVSW, ynone, Pe, [23]uint8{0xa5}}, {AMOVUPD, yxmov, Pe, [23]uint8{0x10, 0x11}}, {AMOVUPS, yxmov, Pm, [23]uint8{0x10, 0x11}}, - {AMOVW, ymovw, Pe, [23]uint8{0x89, 0x8b, 0x31, 0xb8, 0xc7, 00, 0}}, + {AMOVW, ymovw, Pe, [23]uint8{0x89, 0x8b, 0xb8, 0xc7, 00, 0}}, {AMOVWLSX, yml_rl, Pm, [23]uint8{0xbf}}, {AMOVWLZX, yml_rl, Pm, [23]uint8{0xb7}}, {AMOVWQSX, yml_rl, Pw, [23]uint8{0x0f, 0xbf}}, {AMOVWQZX, yml_rl, Pw, [23]uint8{0x0f, 0xb7}}, + {AMPSADBW, yxshuf, Pq, [23]uint8{0x3a, 0x42, 0}}, {AMULB, ydivb, Pb, [23]uint8{0xf6, 04}}, {AMULL, ydivl, Px, [23]uint8{0xf7, 04}}, {AMULPD, yxm, Pe, [23]uint8{0x59}}, @@ -1255,8 +1447,12 @@ var optab = {AOUTSL, ynone, Px, [23]uint8{0x6f}}, {AOUTSW, ynone, Pe, [23]uint8{0x6f}}, {AOUTW, yin, Pe, [23]uint8{0xe7, 0xef}}, + {APABSB, yxm_q4, Pq4, [23]uint8{0x1c}}, + {APABSD, yxm_q4, Pq4, [23]uint8{0x1e}}, + {APABSW, yxm_q4, Pq4, [23]uint8{0x1d}}, {APACKSSLW, ymm, Py1, [23]uint8{0x6b, Pe, 0x6b}}, {APACKSSWB, ymm, Py1, [23]uint8{0x63, Pe, 0x63}}, + {APACKUSDW, yxm_q4, Pq4, [23]uint8{0x2b}}, {APACKUSWB, ymm, Py1, [23]uint8{0x67, Pe, 0x67}}, {APADDB, ymm, Py1, [23]uint8{0xfc, Pe, 0xfc}}, {APADDL, ymm, Py1, [23]uint8{0xfe, Pe, 0xfe}}, @@ -1266,17 +1462,23 @@ var optab = {APADDUSB, ymm, Py1, [23]uint8{0xdc, Pe, 0xdc}}, {APADDUSW, ymm, Py1, [23]uint8{0xdd, Pe, 0xdd}}, {APADDW, ymm, Py1, [23]uint8{0xfd, Pe, 0xfd}}, + {APALIGNR, ypalignr, Pq, [23]uint8{0x3a, 0x0f}}, {APAND, ymm, Py1, [23]uint8{0xdb, Pe, 0xdb}}, {APANDN, ymm, Py1, [23]uint8{0xdf, Pe, 0xdf}}, {APAUSE, ynone, Px, [23]uint8{0xf3, 0x90}}, {APAVGB, ymm, Py1, [23]uint8{0xe0, Pe, 0xe0}}, {APAVGW, ymm, Py1, [23]uint8{0xe3, Pe, 0xe3}}, + {APBLENDW, yxshuf, Pq, [23]uint8{0x3a, 0x0e, 0}}, {APCMPEQB, ymm, Py1, [23]uint8{0x74, Pe, 0x74}}, {APCMPEQL, ymm, Py1, [23]uint8{0x76, Pe, 0x76}}, + {APCMPEQQ, yxm_q4, Pq4, [23]uint8{0x29}}, {APCMPEQW, ymm, Py1, [23]uint8{0x75, Pe, 0x75}}, {APCMPGTB, ymm, Py1, [23]uint8{0x64, Pe, 0x64}}, {APCMPGTL, ymm, Py1, [23]uint8{0x66, Pe, 0x66}}, + {APCMPGTQ, yxm_q4, Pq4, [23]uint8{0x37}}, {APCMPGTW, ymm, Py1, [23]uint8{0x65, Pe, 0x65}}, + {APCMPISTRI, yxshuf, Pq, [23]uint8{0x3a, 0x63, 0}}, + {APCMPISTRM, yxshuf, Pq, [23]uint8{0x3a, 0x62, 0}}, {APEXTRW, yextrw, Pq, [23]uint8{0xc5, 00}}, {APEXTRB, yextr, Pq, [23]uint8{0x3a, 0x14, 00}}, {APEXTRD, yextr, Pq, [23]uint8{0x3a, 0x16, 00}}, @@ -1292,11 +1494,20 @@ var optab = {APINSRB, yinsr, Pq, [23]uint8{0x3a, 0x20, 00}}, {APINSRD, yinsr, Pq, [23]uint8{0x3a, 0x22, 00}}, {APINSRQ, yinsr, Pq3, [23]uint8{0x3a, 0x22, 00}}, + {APMADDUBSW, yxm_q4, Pq4, [23]uint8{0x04}}, {APMADDWL, ymm, Py1, [23]uint8{0xf5, Pe, 0xf5}}, + {APMAXSB, yxm_q4, Pq4, [23]uint8{0x3c}}, + {APMAXSD, yxm_q4, Pq4, [23]uint8{0x3d}}, {APMAXSW, yxm, Pe, [23]uint8{0xee}}, {APMAXUB, yxm, Pe, [23]uint8{0xde}}, + {APMAXUD, yxm_q4, Pq4, [23]uint8{0x3f}}, + {APMAXUW, yxm_q4, Pq4, [23]uint8{0x3e}}, + {APMINSB, yxm_q4, Pq4, [23]uint8{0x38}}, + {APMINSD, yxm_q4, Pq4, [23]uint8{0x39}}, {APMINSW, yxm, Pe, [23]uint8{0xea}}, {APMINUB, yxm, Pe, [23]uint8{0xda}}, + {APMINUD, yxm_q4, Pq4, [23]uint8{0x3b}}, + {APMINUW, yxm_q4, Pq4, [23]uint8{0x3a}}, {APMOVMSKB, ymskb, Px, [23]uint8{Pe, 0xd7, 0xd7}}, {APMOVSXBD, yxm_q4, Pq4, [23]uint8{0x21}}, {APMOVSXBQ, yxm_q4, Pq4, [23]uint8{0x22}}, @@ -1311,6 +1522,7 @@ var optab = {APMOVZXWD, yxm_q4, Pq4, [23]uint8{0x33}}, {APMOVZXWQ, yxm_q4, Pq4, [23]uint8{0x34}}, {APMULDQ, yxm_q4, Pq4, [23]uint8{0x28}}, + {APMULHRSW, yxm_q4, Pq4, [23]uint8{0x0b}}, {APMULHUW, ymm, Py1, [23]uint8{0xe4, Pe, 0xe4}}, {APMULHW, ymm, Py1, [23]uint8{0xe5, Pe, 0xe5}}, {APMULLD, yxm_q4, Pq4, [23]uint8{0x40}}, @@ -1334,6 +1546,9 @@ var optab = {APSHUFLW, yxshuf, Pf2, [23]uint8{0x70, 00}}, {APSHUFW, ymshuf, Pm, [23]uint8{0x70, 00}}, {APSHUFB, ymshufb, Pq, [23]uint8{0x38, 0x00}}, + {APSIGNB, yxm_q4, Pq4, [23]uint8{0x08}}, + {APSIGND, yxm_q4, Pq4, [23]uint8{0x0a}}, + {APSIGNW, yxm_q4, Pq4, [23]uint8{0x09}}, {APSLLO, ypsdq, Pq, [23]uint8{0x73, 07}}, {APSLLL, yps, Py3, [23]uint8{0xf2, 0x72, 06, Pe, 0xf2, Pe, 0x72, 06}}, {APSLLQ, yps, Py3, [23]uint8{0xf3, 0x73, 06, Pe, 0xf3, Pe, 0x73, 06}}, @@ -1352,6 +1567,7 @@ var optab = {APSUBUSB, yxm, Pe, [23]uint8{0xd8}}, {APSUBUSW, yxm, Pe, [23]uint8{0xd9}}, {APSUBW, yxm, Pe, [23]uint8{0xf9}}, + {APTEST, yxm_q4, Pq4, [23]uint8{0x17}}, {APUNPCKHBW, ymm, Py1, [23]uint8{0x68, Pe, 0x68}}, {APUNPCKHLQ, ymm, Py1, [23]uint8{0x6a, Pe, 0x6a}}, {APUNPCKHQDQ, yxm, Pe, [23]uint8{0x6d}}, @@ -1446,7 +1662,7 @@ var optab = {ASTC, ynone, Px, [23]uint8{0xf9}}, {ASTD, ynone, Px, [23]uint8{0xfd}}, {ASTI, ynone, Px, [23]uint8{0xfb}}, - {ASTMXCSR, ysvrs, Pm, [23]uint8{0xae, 03, 0xae, 03}}, + {ASTMXCSR, ysvrs_om, Pm, [23]uint8{0xae, 03, 0xae, 03}}, {ASTOSB, ynone, Pb, [23]uint8{0xaa}}, {ASTOSL, ynone, Px, [23]uint8{0xab}}, {ASTOSQ, ynone, Pw, [23]uint8{0xab}}, @@ -1557,12 +1773,12 @@ var optab = {AFDIVRD, yfadd, Px, [23]uint8{0xdc, 07, 0xd8, 07, 0xdc, 06}}, {AFXCHD, yfxch, Px, [23]uint8{0xd9, 01, 0xd9, 01}}, {AFFREE, nil, 0, [23]uint8{}}, - {AFLDCW, ysvrs, Px, [23]uint8{0xd9, 05, 0xd9, 05}}, - {AFLDENV, ysvrs, Px, [23]uint8{0xd9, 04, 0xd9, 04}}, - {AFRSTOR, ysvrs, Px, [23]uint8{0xdd, 04, 0xdd, 04}}, - {AFSAVE, ysvrs, Px, [23]uint8{0xdd, 06, 0xdd, 06}}, - {AFSTCW, ysvrs, Px, [23]uint8{0xd9, 07, 0xd9, 07}}, - {AFSTENV, ysvrs, Px, [23]uint8{0xd9, 06, 0xd9, 06}}, + {AFLDCW, ysvrs_mo, Px, [23]uint8{0xd9, 05, 0xd9, 05}}, + {AFLDENV, ysvrs_mo, Px, [23]uint8{0xd9, 04, 0xd9, 04}}, + {AFRSTOR, ysvrs_mo, Px, [23]uint8{0xdd, 04, 0xdd, 04}}, + {AFSAVE, ysvrs_om, Px, [23]uint8{0xdd, 06, 0xdd, 06}}, + {AFSTCW, ysvrs_om, Px, [23]uint8{0xd9, 07, 0xd9, 07}}, + {AFSTENV, ysvrs_om, Px, [23]uint8{0xd9, 06, 0xd9, 06}}, {AFSTSW, ystsw, Px, [23]uint8{0xdd, 07, 0xdf, 0xe0}}, {AF2XM1, ynone, Px, [23]uint8{0xd9, 0xf0}}, {AFABS, ynone, Px, [23]uint8{0xd9, 0xe1}}, @@ -1638,63 +1854,13 @@ var optab = {APSHUFD, yxshuf, Pq, [23]uint8{0x70, 0}}, {APCLMULQDQ, yxshuf, Pq, [23]uint8{0x3a, 0x44, 0}}, {APCMPESTRI, yxshuf, Pq, [23]uint8{0x3a, 0x61, 0}}, + {APCMPESTRM, yxshuf, Pq, [23]uint8{0x3a, 0x60, 0}}, {AMOVDDUP, yxm, Pf2, [23]uint8{0x12}}, {AMOVSHDUP, yxm, Pf3, [23]uint8{0x16}}, {AMOVSLDUP, yxm, Pf3, [23]uint8{0x12}}, - {AANDNL, yvex_r3, Pvex, [23]uint8{VEX_LZ_0F38_W0, 0xF2}}, - {AANDNQ, yvex_r3, Pvex, [23]uint8{VEX_LZ_0F38_W1, 0xF2}}, - {ABEXTRL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_0F38_W0, 0xF7}}, - {ABEXTRQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_0F38_W1, 0xF7}}, - {ABZHIL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_0F38_W0, 0xF5}}, - {ABZHIQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_0F38_W1, 0xF5}}, - {AMULXL, yvex_r3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W0, 0xF6}}, - {AMULXQ, yvex_r3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W1, 0xF6}}, - {APDEPL, yvex_r3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W0, 0xF5}}, - {APDEPQ, yvex_r3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W1, 0xF5}}, - {APEXTL, yvex_r3, Pvex, [23]uint8{VEX_LZ_F3_0F38_W0, 0xF5}}, - {APEXTQ, yvex_r3, Pvex, [23]uint8{VEX_LZ_F3_0F38_W1, 0xF5}}, - {ASARXL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_F3_0F38_W0, 0xF7}}, - {ASARXQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_F3_0F38_W1, 0xF7}}, - {ASHLXL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_66_0F38_W0, 0xF7}}, - {ASHLXQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_66_0F38_W1, 0xF7}}, - {ASHRXL, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W0, 0xF7}}, - {ASHRXQ, yvex_vmr3, Pvex, [23]uint8{VEX_LZ_F2_0F38_W1, 0xF7}}, - - {AVZEROUPPER, ynone, Px, [23]uint8{0xc5, 0xf8, 0x77}}, - {AVMOVDQU, yvex_vmovdqa, Pvex, [23]uint8{VEX_128_F3_0F_WIG, 0x6F, VEX_128_F3_0F_WIG, 0x7F, VEX_256_F3_0F_WIG, 0x6F, VEX_256_F3_0F_WIG, 0x7F}}, - {AVMOVDQA, yvex_vmovdqa, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x6F, VEX_128_66_0F_WIG, 0x7F, VEX_256_66_0F_WIG, 0x6F, VEX_256_66_0F_WIG, 0x7F}}, - {AVMOVNTDQ, yvex_vmovntdq, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xE7, VEX_256_66_0F_WIG, 0xE7}}, - {AVPCMPEQB, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x74, VEX_256_66_0F_WIG, 0x74}}, - {AVPXOR, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xEF, VEX_256_66_0F_WIG, 0xEF}}, - {AVPMOVMSKB, yvex_xyr2, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xD7, VEX_256_66_0F_WIG, 0xD7}}, - {AVPAND, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xDB, VEX_256_66_0F_WIG, 0xDB}}, - {AVPBROADCASTB, yvex_vpbroadcast, Pvex, [23]uint8{VEX_128_66_0F38_W0, 0x78, VEX_256_66_0F38_W0, 0x78}}, - {AVPTEST, yvex_xy2, Pvex, [23]uint8{VEX_128_66_0F38_WIG, 0x17, VEX_256_66_0F38_WIG, 0x17}}, - {AVPSHUFB, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F38_WIG, 0x00, VEX_256_66_0F38_WIG, 0x00}}, - {AVPSHUFD, yvex_xyi3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x70, VEX_256_66_0F_WIG, 0x70, VEX_128_66_0F_WIG, 0x70, VEX_256_66_0F_WIG, 0x70}}, - {AVPOR, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xeb, VEX_256_66_0F_WIG, 0xeb}}, - {AVPADDQ, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xd4, VEX_256_66_0F_WIG, 0xd4}}, - {AVPADDD, yvex_xy3, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0xfe, VEX_256_66_0F_WIG, 0xfe}}, - {AVPSLLD, yvex_shift, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x72, 0xf0, VEX_256_66_0F_WIG, 0x72, 0xf0, VEX_128_66_0F_WIG, 0xf2, VEX_256_66_0F_WIG, 0xf2}}, - {AVPSLLQ, yvex_shift, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x73, 0xf0, VEX_256_66_0F_WIG, 0x73, 0xf0, VEX_128_66_0F_WIG, 0xf3, VEX_256_66_0F_WIG, 0xf3}}, - {AVPSRLD, yvex_shift, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x72, 0xd0, VEX_256_66_0F_WIG, 0x72, 0xd0, VEX_128_66_0F_WIG, 0xd2, VEX_256_66_0F_WIG, 0xd2}}, - {AVPSRLQ, yvex_shift, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x73, 0xd0, VEX_256_66_0F_WIG, 0x73, 0xd0, VEX_128_66_0F_WIG, 0xd3, VEX_256_66_0F_WIG, 0xd3}}, - {AVPSRLDQ, yvex_shift_dq, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x73, 0xd8, VEX_256_66_0F_WIG, 0x73, 0xd8}}, - {AVPSLLDQ, yvex_shift_dq, Pvex, [23]uint8{VEX_128_66_0F_WIG, 0x73, 0xf8, VEX_256_66_0F_WIG, 0x73, 0xf8}}, - {AVPERM2F128, yvex_yyi4, Pvex, [23]uint8{VEX_256_66_0F3A_W0, 0x06}}, - {AVPALIGNR, yvex_yyi4, Pvex, [23]uint8{VEX_256_66_0F3A_WIG, 0x0f}}, - {AVPBLENDD, yvex_yyi4, Pvex, [23]uint8{VEX_256_66_0F3A_WIG, 0x02}}, - {AVINSERTI128, yvex_xyi4, Pvex, [23]uint8{VEX_256_66_0F3A_WIG, 0x38}}, - {AVPERM2I128, yvex_yyi4, Pvex, [23]uint8{VEX_256_66_0F3A_WIG, 0x46}}, - {ARORXL, yvex_ri3, Pvex, [23]uint8{VEX_LZ_F2_0F3A_W0, 0xf0}}, - {ARORXQ, yvex_ri3, Pvex, [23]uint8{VEX_LZ_F2_0F3A_W1, 0xf0}}, - {AVBROADCASTSD, yvex_vpbroadcast_sd, Pvex, [23]uint8{VEX_256_66_0F38_W0, 0x19}}, - {AVBROADCASTSS, yvex_vpbroadcast, Pvex, [23]uint8{VEX_128_66_0F38_W0, 0x18, VEX_256_66_0F38_W0, 0x18}}, - {AVMOVDDUP, yvex_xy2, Pvex, [23]uint8{VEX_128_F2_0F_WIG, 0x12, VEX_256_F2_0F_WIG, 0x12}}, - {AVMOVSHDUP, yvex_xy2, Pvex, [23]uint8{VEX_128_F3_0F_WIG, 0x16, VEX_256_F3_0F_WIG, 0x16}}, - {AVMOVSLDUP, yvex_xy2, Pvex, [23]uint8{VEX_128_F3_0F_WIG, 0x12, VEX_256_F3_0F_WIG, 0x12}}, - + {ABLENDPD, yxshuf, Pq, [23]uint8{0x3a, 0x0d, 0}}, + {ABLENDPS, yxshuf, Pq, [23]uint8{0x3a, 0x0c, 0}}, {AXACQUIRE, ynone, Px, [23]uint8{0xf2}}, {AXRELEASE, ynone, Px, [23]uint8{0xf3}}, {AXBEGIN, yxbegin, Px, [23]uint8{0xc7, 0xf8}}, @@ -1706,20 +1872,61 @@ var optab = {obj.APCDATA, ypcdata, Px, [23]uint8{0, 0}}, {obj.ADUFFCOPY, yduff, Px, [23]uint8{0xe8}}, {obj.ADUFFZERO, yduff, Px, [23]uint8{0xe8}}, + + // AVX2 gather instructions. + // Added as a part of VSIB support implementation, + // when x86avxgen will output these, they will be moved to + // vex_optabs.go where they belong. + {AVGATHERDPD, yvpgatherdq, Pvex, [23]uint8{ + vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x92, + vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x92, + }}, + {AVGATHERQPD, yvpgatherqq, Pvex, [23]uint8{ + vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x93, + vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x93, + }}, + {AVGATHERDPS, yvpgatherqq, Pvex, [23]uint8{ + vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x92, + vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x92, + }}, + {AVGATHERQPS, yvgatherqps, Pvex, [23]uint8{ + vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x93, + vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x93, + }}, + {AVPGATHERDD, yvpgatherqq, Pvex, [23]uint8{ + vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x90, + vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x90, + }}, + {AVPGATHERQD, yvgatherqps, Pvex, [23]uint8{ + vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x91, + vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x91, + }}, + {AVPGATHERDQ, yvpgatherdq, Pvex, [23]uint8{ + vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x90, + vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x90, + }}, + {AVPGATHERQQ, yvpgatherqq, Pvex, [23]uint8{ + vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x91, + vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x91, + }}, + {obj.AEND, nil, 0, [23]uint8{}}, {0, nil, 0, [23]uint8{}}, } var opindex [(ALAST + 1) & obj.AMask]*Optab -// isextern reports whether s describes an external symbol that must avoid pc-relative addressing. +// useAbs reports whether s describes a symbol that must avoid pc-relative addressing. // This happens on systems like Solaris that call .so functions instead of system calls. // It does not seem to be necessary for any other systems. This is probably working // around a Solaris-specific bug that should be fixed differently, but we don't know // what that bug is. And this does fix it. -func isextern(s *obj.LSym) bool { - // All the Solaris dynamic imports from libc.so begin with "libc_". - return strings.HasPrefix(s.Name, "libc_") +func useAbs(ctxt *obj.Link, s *obj.LSym) bool { + if ctxt.Headtype == objabi.Hsolaris { + // All the Solaris dynamic imports from libc.so begin with "libc_". + return strings.HasPrefix(s.Name, "libc_") + } + return ctxt.Arch.Family == sys.I386 && !ctxt.Flag_shared } // single-instruction no-ops of various lengths. @@ -1980,6 +2187,13 @@ func instinit(ctxt *obj.Link) { deferreturn = ctxt.Lookup("runtime.deferreturn") } + for i := range vexOptab { + c := vexOptab[i].as + if opindex[c&obj.AMask] != nil { + ctxt.Diag("phase error in vexOptab: %d (%v)", i, c) + } + opindex[c&obj.AMask] = &vexOptab[i] + } for i := 1; optab[i].as != 0; i++ { c := optab[i].as if opindex[c&obj.AMask] != nil { @@ -1992,25 +2206,33 @@ func instinit(ctxt *obj.Link) { ycover[i*Ymax+i] = 1 } + ycover[Yi0*Ymax+Yu2] = 1 + ycover[Yi1*Ymax+Yu2] = 1 + ycover[Yi0*Ymax+Yi8] = 1 ycover[Yi1*Ymax+Yi8] = 1 + ycover[Yu2*Ymax+Yi8] = 1 ycover[Yu7*Ymax+Yi8] = 1 ycover[Yi0*Ymax+Yu7] = 1 ycover[Yi1*Ymax+Yu7] = 1 + ycover[Yu2*Ymax+Yu7] = 1 ycover[Yi0*Ymax+Yu8] = 1 ycover[Yi1*Ymax+Yu8] = 1 + ycover[Yu2*Ymax+Yu8] = 1 ycover[Yu7*Ymax+Yu8] = 1 ycover[Yi0*Ymax+Ys32] = 1 ycover[Yi1*Ymax+Ys32] = 1 + ycover[Yu2*Ymax+Ys32] = 1 ycover[Yu7*Ymax+Ys32] = 1 ycover[Yu8*Ymax+Ys32] = 1 ycover[Yi8*Ymax+Ys32] = 1 ycover[Yi0*Ymax+Yi32] = 1 ycover[Yi1*Ymax+Yi32] = 1 + ycover[Yu2*Ymax+Yi32] = 1 ycover[Yu7*Ymax+Yi32] = 1 ycover[Yu8*Ymax+Yi32] = 1 ycover[Yi8*Ymax+Yi32] = 1 @@ -2019,6 +2241,7 @@ func instinit(ctxt *obj.Link) { ycover[Yi0*Ymax+Yi64] = 1 ycover[Yi1*Ymax+Yi64] = 1 ycover[Yu7*Ymax+Yi64] = 1 + ycover[Yu2*Ymax+Yi64] = 1 ycover[Yu8*Ymax+Yi64] = 1 ycover[Yi8*Ymax+Yi64] = 1 ycover[Ys32*Ymax+Yi64] = 1 @@ -2225,7 +2448,7 @@ func prefixof(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { return 0x26 case REG_TLS: - if ctxt.Flag_shared { + if ctxt.Flag_shared && ctxt.Headtype != objabi.Hwindows { // When building for inclusion into a shared library, an instruction of the form // MOV 0(CX)(TLS*1), AX // becomes @@ -2268,6 +2491,18 @@ func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { // Can't use SP as the index register return Yxxx } + if a.Index >= REG_X0 && a.Index <= REG_X15 { + if ctxt.Arch.Family == sys.I386 && a.Index > REG_X7 { + return Yxxx + } + return Yxvm + } + if a.Index >= REG_Y0 && a.Index <= REG_Y15 { + if ctxt.Arch.Family == sys.I386 && a.Index > REG_Y7 { + return Yxxx + } + return Yyvm + } if ctxt.Arch.Family == sys.AMD64 { // Offset must fit in a 32-bit signed field (or fit in a 32-bit unsigned field // where the sign extension doesn't matter). @@ -2306,7 +2541,7 @@ func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { case obj.NAME_EXTERN, obj.NAME_STATIC: - if a.Sym != nil && isextern(a.Sym) || (ctxt.Arch.Family == sys.I386 && !ctxt.Flag_shared) { + if a.Sym != nil && useAbs(ctxt, a.Sym) { return Yi32 } return Yiauto // use pc-relative addressing @@ -2340,15 +2575,14 @@ func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { v = int64(int32(v)) } if v == 0 { - if p.Mark&PRESERVEFLAGS != 0 { - // If PRESERVEFLAGS is set, avoid MOV $0, AX turning into XOR AX, AX. - return Yu7 - } return Yi0 } if v == 1 { return Yi1 } + if v >= 0 && v <= 3 { + return Yu2 + } if v >= 0 && v <= 127 { return Yu7 } @@ -2681,9 +2915,11 @@ func (a *AsmBuf) Reset() { a.off = 0 } // At returns the byte at offset i. func (a *AsmBuf) At(i int) byte { return a.buf[i] } +// asmidx emits SIB byte. func (asmbuf *AsmBuf) asmidx(ctxt *obj.Link, scale int, index int, base int) { var i int + // X/Y index register is used in VSIB. switch index { default: goto bad @@ -2699,7 +2935,23 @@ func (asmbuf *AsmBuf) asmidx(ctxt *obj.Link, scale int, index int, base int) { REG_R12, REG_R13, REG_R14, - REG_R15: + REG_R15, + REG_X8, + REG_X9, + REG_X10, + REG_X11, + REG_X12, + REG_X13, + REG_X14, + REG_X15, + REG_Y8, + REG_Y9, + REG_Y10, + REG_Y11, + REG_Y12, + REG_Y13, + REG_Y14, + REG_Y15: if ctxt.Arch.Family == sys.I386 { goto bad } @@ -2711,7 +2963,23 @@ func (asmbuf *AsmBuf) asmidx(ctxt *obj.Link, scale int, index int, base int) { REG_BX, REG_BP, REG_SI, - REG_DI: + REG_DI, + REG_X0, + REG_X1, + REG_X2, + REG_X3, + REG_X4, + REG_X5, + REG_X6, + REG_X7, + REG_Y0, + REG_Y1, + REG_Y2, + REG_Y3, + REG_Y4, + REG_Y5, + REG_Y6, + REG_Y7: i = reg[index] << 3 } @@ -2807,7 +3075,7 @@ func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 { if a.Name == obj.NAME_GOTREF { r.Siz = 4 r.Type = objabi.R_GOTPCREL - } else if isextern(s) || (ctxt.Arch.Family != sys.AMD64 && !ctxt.Flag_shared) { + } else if useAbs(ctxt, s) { r.Siz = 4 r.Type = objabi.R_ADDR } else { @@ -2890,7 +3158,7 @@ func (asmbuf *AsmBuf) asmandsz(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a case obj.NAME_EXTERN, obj.NAME_GOTREF, obj.NAME_STATIC: - if !isextern(a.Sym) && ctxt.Arch.Family == sys.AMD64 { + if !useAbs(ctxt, a.Sym) && ctxt.Arch.Family == sys.AMD64 { goto bad } if ctxt.Arch.Family == sys.I386 && ctxt.Flag_shared { @@ -2960,7 +3228,7 @@ func (asmbuf *AsmBuf) asmandsz(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, a asmbuf.rexflag |= regrex[base]&Rxb | rex if base == REG_NONE || (REG_CS <= base && base <= REG_GS) || base == REG_TLS { - if (a.Sym == nil || !isextern(a.Sym)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_GOTREF) || ctxt.Arch.Family != sys.AMD64 { + if (a.Sym == nil || !useAbs(ctxt, a.Sym)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_GOTREF) || ctxt.Arch.Family != sys.AMD64 { if a.Name == obj.NAME_GOTREF && (a.Offset != 0 || a.Index != 0 || a.Scale != 0) { ctxt.Diag("%v has offset against gotref", p) } @@ -3322,6 +3590,35 @@ func (asmbuf *AsmBuf) asmvex(ctxt *obj.Link, rm, v, r *obj.Addr, vex, opcode uin asmbuf.Put1(opcode) } +// regIndex returns register index that fits in 4 bits. +// +// Examples: +// REG_X15 => 15 +// REG_R9 => 9 +// REG_AX => 0 +// +func regIndex(r int16) int { + lower3bits := reg[r] + high4bit := regrex[r] & Rxr << 1 + return lower3bits | high4bit +} + +// avx2gatherValid returns true if p satisfies AVX2 gather constraints. +// Reports errors via ctxt. +func avx2gatherValid(ctxt *obj.Link, p *obj.Prog) bool { + // If any pair of the index, mask, or destination registers + // are the same, this instruction results a #UD fault. + index := regIndex(p.GetFrom3().Index) + mask := regIndex(p.From.Reg) + dest := regIndex(p.To.Reg) + if dest == mask || dest == index || mask == index { + ctxt.Diag("mask, index, and destination registers should be distinct: %v", p) + return false + } + + return true +} + func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { o := opindex[p.As&obj.AMask] @@ -3344,9 +3641,10 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { // Similarly SHRQ CX, AX:DX is really SHRQ CX(DX*0), AX. // Change encoding generated by assemblers and compilers and remove. if (p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_REG) && p.From.Index != REG_NONE && p.From.Scale == 0 { - p.From3 = new(obj.Addr) - p.From3.Type = obj.TYPE_REG - p.From3.Reg = p.From.Index + p.SetFrom3(obj.Addr{ + Type: obj.TYPE_REG, + Reg: p.From.Index, + }) p.From.Index = 0 } @@ -3355,8 +3653,7 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { switch p.As { case AIMUL3Q, APEXTRW, APINSRW, APINSRD, APINSRQ, APSHUFHW, APSHUFL, APSHUFW, ASHUFPD, ASHUFPS, AAESKEYGENASSIST, APSHUFD, APCLMULQDQ: if p.From3Type() == obj.TYPE_NONE { - p.From3 = new(obj.Addr) - *p.From3 = p.From + p.SetFrom3(p.From) p.From = obj.Addr{} p.From.Type = obj.TYPE_CONST p.From.Offset = p.To.Offset @@ -3364,12 +3661,23 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { } case ACMPSD, ACMPSS, ACMPPS, ACMPPD: if p.From3Type() == obj.TYPE_NONE { - p.From3 = new(obj.Addr) - *p.From3 = p.To + p.SetFrom3(p.To) p.To = obj.Addr{} p.To.Type = obj.TYPE_CONST - p.To.Offset = p.From3.Offset - p.From3.Offset = 0 + p.To.Offset = p.GetFrom3().Offset + p.GetFrom3().Offset = 0 + } + + case AVGATHERDPD, + AVGATHERQPD, + AVGATHERDPS, + AVGATHERQPS, + AVPGATHERDD, + AVPGATHERQD, + AVPGATHERDQ, + AVPGATHERQQ: + if !avx2gatherValid(ctxt, p) { + return } } @@ -3381,10 +3689,7 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { } ft := int(p.Ft) * Ymax - f3t := Ynone * Ymax - if p.From3 != nil { - f3t = oclass(ctxt, p, p.From3) * Ymax - } + var f3t int tt := int(p.Tt) * Ymax xo := obj.Bool2int(o.op[0] == 0x0f) @@ -3396,9 +3701,22 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { var r *obj.Reloc var rel obj.Reloc var v int64 - for i := range o.ytab { - yt := &o.ytab[i] - if ycover[ft+int(yt.from)] != 0 && ycover[f3t+int(yt.from3)] != 0 && ycover[tt+int(yt.to)] != 0 { + + args := make([]int, 0, 6) + if ft != Ynone*Ymax { + args = append(args, ft) + } + for i := range p.RestArgs { + args = append(args, oclass(ctxt, p, &p.RestArgs[i])*Ymax) + } + if tt != Ynone*Ymax { + args = append(args, tt) + } + + for _, yt := range o.ytab { + if !yt.match(args) { + z += int(yt.zoffset) + xo + } else { switch o.prefix { case Px1: /* first option valid only in 32-bit mode */ if ctxt.Arch.Family == sys.AMD64 && z == 0 { @@ -3415,6 +3733,17 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { case Pq4: /* 66 0F 38 */ asmbuf.Put3(0x66, 0x0F, 0x38) + case Pq4w: /* 66 0F 38 + REX.W */ + asmbuf.rexflag |= Pw + asmbuf.Put3(0x66, 0x0F, 0x38) + + case Pq5: /* F3 0F 38 */ + asmbuf.Put3(0xF3, 0x0F, 0x38) + + case Pq5w: /* F3 0F 38 + REX.W */ + asmbuf.rexflag |= Pw + asmbuf.Put3(0xF3, 0x0F, 0x38) + case Pf2, /* xmm opcode escape */ Pf3: asmbuf.Put2(o.prefix, Pm) @@ -3545,7 +3874,7 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { case Zm_r_i_xm: asmbuf.mediaop(ctxt, o, op, int(yt.zoffset), z) - asmbuf.asmand(ctxt, cursym, p, &p.From, p.From3) + asmbuf.asmand(ctxt, cursym, p, &p.From, p.GetFrom3()) asmbuf.Put1(byte(p.To.Offset)) case Zibm_r, Zibr_m: @@ -3559,9 +3888,9 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { asmbuf.Put1(byte(op)) } if yt.zcase == Zibr_m { - asmbuf.asmand(ctxt, cursym, p, &p.To, p.From3) + asmbuf.asmand(ctxt, cursym, p, &p.To, p.GetFrom3()) } else { - asmbuf.asmand(ctxt, cursym, p, p.From3, &p.To) + asmbuf.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) } asmbuf.Put1(byte(p.From.Offset)) @@ -3582,39 +3911,62 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { asmbuf.Put1(byte(op)) asmbuf.asmand(ctxt, cursym, p, &p.To, &p.From) + case Zvex: + asmbuf.asmvex(ctxt, &p.From, p.GetFrom3(), &p.To, o.op[z], o.op[z+1]) + case Zvex_rm_v_r: - asmbuf.asmvex(ctxt, &p.From, p.From3, &p.To, o.op[z], o.op[z+1]) + asmbuf.asmvex(ctxt, &p.From, p.GetFrom3(), &p.To, o.op[z], o.op[z+1]) asmbuf.asmand(ctxt, cursym, p, &p.From, &p.To) + case Zvex_rm_v_ro: + asmbuf.asmvex(ctxt, &p.From, p.GetFrom3(), &p.To, o.op[z], o.op[z+1]) + asmbuf.asmando(ctxt, cursym, p, &p.From, int(o.op[z+2])) + case Zvex_i_r_v: - asmbuf.asmvex(ctxt, p.From3, &p.To, nil, o.op[z], o.op[z+1]) + asmbuf.asmvex(ctxt, p.GetFrom3(), &p.To, nil, o.op[z], o.op[z+1]) regnum := byte(0x7) - if p.From3.Reg >= REG_X0 && p.From3.Reg <= REG_X15 { - regnum &= byte(p.From3.Reg - REG_X0) + if p.GetFrom3().Reg >= REG_X0 && p.GetFrom3().Reg <= REG_X15 { + regnum &= byte(p.GetFrom3().Reg - REG_X0) } else { - regnum &= byte(p.From3.Reg - REG_Y0) + regnum &= byte(p.GetFrom3().Reg - REG_Y0) } asmbuf.Put1(byte(o.op[z+2]) | regnum) asmbuf.Put1(byte(p.From.Offset)) case Zvex_i_rm_v_r: - asmbuf.asmvex(ctxt, &p.From, p.From3, &p.To, o.op[z], o.op[z+1]) - asmbuf.asmand(ctxt, cursym, p, &p.From, &p.To) - asmbuf.Put1(byte(p.From3.Offset)) + imm, from, from3, to := unpackOps4(p) + asmbuf.asmvex(ctxt, from, from3, to, o.op[z], o.op[z+1]) + asmbuf.asmand(ctxt, cursym, p, from, to) + asmbuf.Put1(byte(imm.Offset)) case Zvex_i_rm_r: - asmbuf.asmvex(ctxt, p.From3, nil, &p.To, o.op[z], o.op[z+1]) - asmbuf.asmand(ctxt, cursym, p, p.From3, &p.To) + asmbuf.asmvex(ctxt, p.GetFrom3(), nil, &p.To, o.op[z], o.op[z+1]) + asmbuf.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) asmbuf.Put1(byte(p.From.Offset)) case Zvex_v_rm_r: - asmbuf.asmvex(ctxt, p.From3, &p.From, &p.To, o.op[z], o.op[z+1]) - asmbuf.asmand(ctxt, cursym, p, p.From3, &p.To) + asmbuf.asmvex(ctxt, p.GetFrom3(), &p.From, &p.To, o.op[z], o.op[z+1]) + asmbuf.asmand(ctxt, cursym, p, p.GetFrom3(), &p.To) case Zvex_r_v_rm: - asmbuf.asmvex(ctxt, &p.To, p.From3, &p.From, o.op[z], o.op[z+1]) + asmbuf.asmvex(ctxt, &p.To, p.GetFrom3(), &p.From, o.op[z], o.op[z+1]) asmbuf.asmand(ctxt, cursym, p, &p.To, &p.From) + case Zvex_rm_r_vo: + asmbuf.asmvex(ctxt, &p.From, &p.To, p.GetFrom3(), o.op[z], o.op[z+1]) + asmbuf.asmando(ctxt, cursym, p, &p.From, int(o.op[z+2])) + + case Zvex_i_r_rm: + asmbuf.asmvex(ctxt, &p.To, nil, p.GetFrom3(), o.op[z], o.op[z+1]) + asmbuf.asmand(ctxt, cursym, p, &p.To, p.GetFrom3()) + asmbuf.Put1(byte(p.From.Offset)) + + case Zvex_hr_rm_v_r: + hr, from, from3, to := unpackOps4(p) + asmbuf.asmvex(ctxt, from, from3, to, o.op[z], o.op[z+1]) + asmbuf.asmand(ctxt, cursym, p, from, to) + asmbuf.Put1(byte(regIndex(hr.Reg) << 4)) + case Zr_m_xm: asmbuf.mediaop(ctxt, o, op, int(yt.zoffset), z) asmbuf.asmand(ctxt, cursym, p, &p.To, &p.From) @@ -3779,11 +4131,6 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { asmbuf.rexflag |= regrex[p.From.Reg] & (Rxb | 0x40) asmbuf.Put1(byte(op + reg[p.From.Reg])) - case Zclr: - asmbuf.rexflag &^= Pw - asmbuf.Put1(byte(op)) - asmbuf.asmand(ctxt, cursym, p, &p.To, &p.To) - case Zcallcon, Zjmpcon: if yt.zcase == Zcallcon { asmbuf.Put1(byte(op)) @@ -3814,6 +4161,7 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { case Zcall, Zcallduff: if p.To.Sym == nil { ctxt.Diag("call without target") + ctxt.DiagFlush() log.Fatalf("bad code") } @@ -3854,6 +4202,7 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { if p.To.Sym != nil { if yt.zcase != Zjmp { ctxt.Diag("branch to ATEXT") + ctxt.DiagFlush() log.Fatalf("bad code") } @@ -3875,6 +4224,7 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { if q == nil { ctxt.Diag("jmp/branch/loop without target") + ctxt.DiagFlush() log.Fatalf("bad code") } @@ -3967,7 +4317,10 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { return } - z += int(yt.zoffset) + xo + } + f3t = Ynone * Ymax + if p.GetFrom3() != nil { + f3t = oclass(ctxt, p, p.GetFrom3()) * Ymax } for mo := ymovtab; mo[0].as != 0; mo = mo[1:] { var pp obj.Prog @@ -4046,7 +4399,7 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { case obj.TYPE_CONST: asmbuf.Put2(0x0f, t[0]) - asmbuf.asmandsz(ctxt, cursym, p, &p.To, reg[p.From3.Reg], regrex[p.From3.Reg], 0) + asmbuf.asmandsz(ctxt, cursym, p, &p.To, reg[p.GetFrom3().Reg], regrex[p.GetFrom3().Reg], 0) asmbuf.Put1(byte(p.From.Offset)) case obj.TYPE_REG: @@ -4056,7 +4409,7 @@ func (asmbuf *AsmBuf) doasm(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { case REG_CL, REG_CX: asmbuf.Put2(0x0f, t[1]) - asmbuf.asmandsz(ctxt, cursym, p, &p.To, reg[p.From3.Reg], regrex[p.From3.Reg], 0) + asmbuf.asmandsz(ctxt, cursym, p, &p.To, reg[p.GetFrom3().Reg], regrex[p.GetFrom3().Reg], 0) } } @@ -4369,6 +4722,7 @@ func byteswapreg(ctxt *obj.Link, a *obj.Addr) int { return REG_DX default: ctxt.Diag("impossible byte register") + ctxt.DiagFlush() log.Fatalf("bad code") return 0 } @@ -4607,3 +4961,8 @@ func (asmbuf *AsmBuf) asmins(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog) { } } } + +// Extract 4 operands from p. +func unpackOps4(p *obj.Prog) (*obj.Addr, *obj.Addr, *obj.Addr, *obj.Addr) { + return &p.From, &p.RestArgs[0], &p.RestArgs[1], &p.To +} diff --git a/src/cmd/internal/obj/x86/obj6.go b/src/cmd/internal/obj/x86/obj6.go index d34f0aeaa63..7b5e4769dcd 100644 --- a/src/cmd/internal/obj/x86/obj6.go +++ b/src/cmd/internal/obj/x86/obj6.go @@ -201,8 +201,8 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { } if ctxt.Headtype == objabi.Hnacl && ctxt.Arch.Family == sys.AMD64 { - if p.From3 != nil { - nacladdr(ctxt, p, p.From3) + if p.GetFrom3() != nil { + nacladdr(ctxt, p, p.GetFrom3()) } nacladdr(ctxt, p, &p.From) nacladdr(ctxt, p, &p.To) @@ -322,7 +322,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { // $LEA $offset($reg), $reg // CALL $reg // (we use LEAx rather than ADDx because ADDx clobbers - // flags and duffzero on 386 does not otherwise do so) + // flags and duffzero on 386 does not otherwise do so). var sym *obj.LSym if p.As == obj.ADUFFZERO { sym = ctxt.Lookup("runtime.duffzero") @@ -398,7 +398,7 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { q.From.Reg = reg } } - if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { + if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { ctxt.Diag("don't know how to handle %v with -dynlink", p) } var source *obj.Addr @@ -436,7 +436,9 @@ func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p2.As = p.As p2.Scond = p.Scond p2.From = p.From - p2.From3 = p.From3 + if p.RestArgs != nil { + p2.RestArgs = append(p2.RestArgs, p.RestArgs...) + } p2.Reg = p.Reg p2.To = p.To // p.To.Type was set to TYPE_BRANCH above, but that makes checkaddr @@ -522,13 +524,13 @@ func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { } } - if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) { + if !isName(&p.From) && !isName(&p.To) && (p.GetFrom3() == nil || !isName(p.GetFrom3())) { return } var dst int16 = REG_CX if (p.As == ALEAL || p.As == AMOVL) && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { dst = p.To.Reg - // Why? See the comment near the top of rewriteToUseGot above. + // Why? See the comment near the top of rewriteToUseGot above. // AMOVLs might be introduced by the GOT rewrites. } q := obj.Appendp(p, newprog) @@ -543,7 +545,7 @@ func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { r.As = p.As r.Scond = p.Scond r.From = p.From - r.From3 = p.From3 + r.RestArgs = p.RestArgs r.Reg = p.Reg r.To = p.To if isName(&p.From) { @@ -552,8 +554,8 @@ func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { if isName(&p.To) { r.To.Reg = dst } - if p.From3 != nil && isName(p.From3) { - r.From3.Reg = dst + if p.GetFrom3() != nil && isName(p.GetFrom3()) { + r.GetFrom3().Reg = dst } obj.Nopout(p) } @@ -857,12 +859,12 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { case obj.NAME_PARAM: p.From.Offset += int64(deltasp) + int64(pcsize) } - if p.From3 != nil { - switch p.From3.Name { + if p.GetFrom3() != nil { + switch p.GetFrom3().Name { case obj.NAME_AUTO: - p.From3.Offset += int64(deltasp) - int64(bpsize) + p.GetFrom3().Offset += int64(deltasp) - int64(bpsize) case obj.NAME_PARAM: - p.From3.Offset += int64(deltasp) + int64(pcsize) + p.GetFrom3().Offset += int64(deltasp) + int64(pcsize) } } switch p.To.Name { @@ -1183,6 +1185,7 @@ func stacksplit(ctxt *obj.Link, cursym *obj.LSym, p *obj.Prog, newprog obj.ProgA var unaryDst = map[obj.As]bool{ ABSWAPL: true, ABSWAPQ: true, + ACLFLUSH: true, ACMPXCHG8B: true, ADECB: true, ADECL: true, @@ -1231,28 +1234,31 @@ var unaryDst = map[obj.As]bool{ } var Linkamd64 = obj.LinkArch{ - Arch: sys.ArchAMD64, - Init: instinit, - Preprocess: preprocess, - Assemble: span6, - Progedit: progedit, - UnaryDst: unaryDst, + Arch: sys.ArchAMD64, + Init: instinit, + Preprocess: preprocess, + Assemble: span6, + Progedit: progedit, + UnaryDst: unaryDst, + DWARFRegisters: AMD64DWARFRegisters, } var Linkamd64p32 = obj.LinkArch{ - Arch: sys.ArchAMD64P32, - Init: instinit, - Preprocess: preprocess, - Assemble: span6, - Progedit: progedit, - UnaryDst: unaryDst, + Arch: sys.ArchAMD64P32, + Init: instinit, + Preprocess: preprocess, + Assemble: span6, + Progedit: progedit, + UnaryDst: unaryDst, + DWARFRegisters: AMD64DWARFRegisters, } var Link386 = obj.LinkArch{ - Arch: sys.Arch386, - Init: instinit, - Preprocess: preprocess, - Assemble: span6, - Progedit: progedit, - UnaryDst: unaryDst, + Arch: sys.Arch386, + Init: instinit, + Preprocess: preprocess, + Assemble: span6, + Progedit: progedit, + UnaryDst: unaryDst, + DWARFRegisters: X86DWARFRegisters, } diff --git a/src/cmd/internal/obj/x86/vex_optabs.go b/src/cmd/internal/obj/x86/vex_optabs.go new file mode 100644 index 00000000000..b0b54fe4425 --- /dev/null +++ b/src/cmd/internal/obj/x86/vex_optabs.go @@ -0,0 +1,382 @@ +// Code generated by x86avxgen. DO NOT EDIT. + +package x86 + +var vexOptab = []Optab{ + {AANDNL, yvex_r3, Pvex, [23]uint8{vexNDS | vexLZ | vex0F38 | vexW0, 0xF2}}, + {AANDNQ, yvex_r3, Pvex, [23]uint8{vexNDS | vexLZ | vex0F38 | vexW1, 0xF2}}, + {ABEXTRL, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vex0F38 | vexW0, 0xF7}}, + {ABEXTRQ, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vex0F38 | vexW1, 0xF7}}, + {ABLSIL, yvex_r2, Pvex, [23]uint8{vexNDD | vexLZ | vex0F38 | vexW0, 0xF3, 03}}, + {ABLSIQ, yvex_r2, Pvex, [23]uint8{vexNDD | vexLZ | vex0F38 | vexW1, 0xF3, 03}}, + {ABLSMSKL, yvex_r2, Pvex, [23]uint8{vexNDD | vexLZ | vex0F38 | vexW0, 0xF3, 02}}, + {ABLSMSKQ, yvex_r2, Pvex, [23]uint8{vexNDD | vexLZ | vex0F38 | vexW1, 0xF3, 02}}, + {ABLSRL, yvex_r2, Pvex, [23]uint8{vexNDD | vexLZ | vex0F38 | vexW0, 0xF3, 01}}, + {ABLSRQ, yvex_r2, Pvex, [23]uint8{vexNDD | vexLZ | vex0F38 | vexW1, 0xF3, 01}}, + {ABZHIL, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vex0F38 | vexW0, 0xF5}}, + {ABZHIQ, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vex0F38 | vexW1, 0xF5}}, + {AMULXL, yvex_r3, Pvex, [23]uint8{vexNDD | vexLZ | vexF2 | vex0F38 | vexW0, 0xF6}}, + {AMULXQ, yvex_r3, Pvex, [23]uint8{vexNDD | vexLZ | vexF2 | vex0F38 | vexW1, 0xF6}}, + {APDEPL, yvex_r3, Pvex, [23]uint8{vexNDS | vexLZ | vexF2 | vex0F38 | vexW0, 0xF5}}, + {APDEPQ, yvex_r3, Pvex, [23]uint8{vexNDS | vexLZ | vexF2 | vex0F38 | vexW1, 0xF5}}, + {APEXTL, yvex_r3, Pvex, [23]uint8{vexNDS | vexLZ | vexF3 | vex0F38 | vexW0, 0xF5}}, + {APEXTQ, yvex_r3, Pvex, [23]uint8{vexNDS | vexLZ | vexF3 | vex0F38 | vexW1, 0xF5}}, + {ARORXL, yvex_ri3, Pvex, [23]uint8{vexNOVSR | vexLZ | vexF2 | vex0F3A | vexW0, 0xF0}}, + {ARORXQ, yvex_ri3, Pvex, [23]uint8{vexNOVSR | vexLZ | vexF2 | vex0F3A | vexW1, 0xF0}}, + {ASARXL, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vexF3 | vex0F38 | vexW0, 0xF7}}, + {ASARXQ, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vexF3 | vex0F38 | vexW1, 0xF7}}, + {ASHLXL, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vex66 | vex0F38 | vexW0, 0xF7}}, + {ASHLXQ, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vex66 | vex0F38 | vexW1, 0xF7}}, + {ASHRXL, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vexF2 | vex0F38 | vexW0, 0xF7}}, + {ASHRXQ, yvex_vmr3, Pvex, [23]uint8{vexNDS | vexLZ | vexF2 | vex0F38 | vexW1, 0xF7}}, + {AVADDPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x58, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x58}}, + {AVADDPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x58, vexNDS | vex256 | vex0F | vexWIG, 0x58}}, + {AVADDSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x58}}, + {AVADDSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x58}}, + {AVADDSUBPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xD0, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xD0}}, + {AVADDSUBPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vexF2 | vex0F | vexWIG, 0xD0, vexNDS | vex256 | vexF2 | vex0F | vexWIG, 0xD0}}, + {AVAESDEC, yvex_x3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0xDE}}, + {AVAESDECLAST, yvex_x3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0xDF}}, + {AVAESENC, yvex_x3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0xDC}}, + {AVAESENCLAST, yvex_x3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0xDD}}, + {AVAESIMC, yvex_x2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0xDB}}, + {AVAESKEYGENASSIST, yvex_xi3, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexWIG, 0xDF}}, + {AVANDNPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x55, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x55}}, + {AVANDNPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x55, vexNDS | vex256 | vex0F | vexWIG, 0x55}}, + {AVANDPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x54, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x54}}, + {AVANDPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x54, vexNDS | vex256 | vex0F | vexWIG, 0x54}}, + {AVBLENDPD, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x0D, vexNDS | vex256 | vex66 | vex0F3A | vexWIG, 0x0D}}, + {AVBLENDPS, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x0C, vexNDS | vex256 | vex66 | vex0F3A | vexWIG, 0x0C}}, + {AVBLENDVPD, yvex_xy4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexW0, 0x4B, vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x4B}}, + {AVBLENDVPS, yvex_xy4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexW0, 0x4A, vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x4A}}, + {AVBROADCASTF128, yvex_vbroadcastf, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x1A}}, + {AVBROADCASTI128, yvex_vbroadcastf, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x5A}}, + {AVBROADCASTSD, yvex_vpbroadcast_sd, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x19, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x19}}, + {AVBROADCASTSS, yvex_vpbroadcast_ss, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x18, vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x18, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x18, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x18}}, + {AVCMPPD, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xC2, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xC2}}, + {AVCMPPS, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0xC2, vexNDS | vex256 | vex0F | vexWIG, 0xC2}}, + {AVCMPSD, yvex_xxi4, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0xC2}}, + {AVCMPSS, yvex_xxi4, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0xC2}}, + {AVCOMISD, yvex_x2, Pvex, [23]uint8{vexNOVSR | vexLIG | vex66 | vex0F | vexWIG, 0x2F}}, + {AVCOMISS, yvex_x2, Pvex, [23]uint8{vexNOVSR | vexLIG | vex0F | vexWIG, 0x2F}}, + {AVCVTDQ2PD, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0xE6, vexNOVSR | vex256 | vexF3 | vex0F | vexWIG, 0xE6}}, + {AVCVTDQ2PS, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x5B, vexNOVSR | vex256 | vex0F | vexWIG, 0x5B}}, + {AVCVTPD2DQX, yvex_x2, Pvex, [23]uint8{vexNOVSR | vex128 | vexF2 | vex0F | vexWIG, 0xE6}}, + {AVCVTPD2DQY, yvex_y2, Pvex, [23]uint8{vexNOVSR | vex256 | vexF2 | vex0F | vexWIG, 0xE6}}, + {AVCVTPD2PSX, yvex_x2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x5A}}, + {AVCVTPD2PSY, yvex_y2, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x5A}}, + {AVCVTPH2PS, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x13, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x13}}, + {AVCVTPS2DQ, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x5B, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x5B}}, + {AVCVTPS2PD, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x5A, vexNOVSR | vex256 | vex0F | vexWIG, 0x5A}}, + {AVCVTPS2PH, yvex_vcvtps2ph, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F3A | vexW0, 0x1D, vexNOVSR | vex128 | vex66 | vex0F3A | vexW0, 0x1D}}, + {AVCVTSD2SI, yvex_vcvtsd2si, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF2 | vex0F | vexW0, 0x2D}}, + {AVCVTSD2SIQ, yvex_vcvtsd2si, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF2 | vex0F | vexW1, 0x2D}}, + {AVCVTSD2SS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x5A}}, + {AVCVTSI2SDL, yvex_rx3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexW0, 0x2A}}, + {AVCVTSI2SDQ, yvex_rx3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexW1, 0x2A}}, + {AVCVTSI2SSL, yvex_rx3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexW0, 0x2A}}, + {AVCVTSI2SSQ, yvex_rx3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexW1, 0x2A}}, + {AVCVTSS2SD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x5A}}, + {AVCVTSS2SI, yvex_vcvtsd2si, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF3 | vex0F | vexW0, 0x2D}}, + {AVCVTSS2SIQ, yvex_vcvtsd2si, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF3 | vex0F | vexW1, 0x2D}}, + {AVCVTTPD2DQX, yvex_x2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0xE6}}, + {AVCVTTPD2DQY, yvex_y2, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0xE6}}, + {AVCVTTPS2DQ, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0x5B, vexNOVSR | vex256 | vexF3 | vex0F | vexWIG, 0x5B}}, + {AVCVTTSD2SI, yvex_vcvtsd2si, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF2 | vex0F | vexW0, 0x2C}}, + {AVCVTTSD2SIQ, yvex_vcvtsd2si, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF2 | vex0F | vexW1, 0x2C}}, + {AVCVTTSS2SI, yvex_vcvtsd2si, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF3 | vex0F | vexW0, 0x2C}}, + {AVCVTTSS2SIQ, yvex_vcvtsd2si, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF3 | vex0F | vexW1, 0x2C}}, + {AVDIVPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x5E, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x5E}}, + {AVDIVPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x5E, vexNDS | vex256 | vex0F | vexWIG, 0x5E}}, + {AVDIVSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x5E}}, + {AVDIVSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x5E}}, + {AVDPPD, yvex_xxi4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x41}}, + {AVDPPS, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x40, vexNDS | vex256 | vex66 | vex0F3A | vexWIG, 0x40}}, + {AVEXTRACTF128, yvex_yi3, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F3A | vexW0, 0x19}}, + {AVEXTRACTI128, yvex_yi3, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F3A | vexW0, 0x39}}, + {AVEXTRACTPS, yvex_vpextr, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexWIG, 0x17}}, + {AVFMADD132PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x98, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x98}}, + {AVFMADD132PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x98, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x98}}, + {AVFMADD132SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0x99}}, + {AVFMADD132SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0x99}}, + {AVFMADD213PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xA8, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xA8}}, + {AVFMADD213PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xA8, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xA8}}, + {AVFMADD213SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0xA9}}, + {AVFMADD213SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0xA9}}, + {AVFMADD231PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xB8, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xB8}}, + {AVFMADD231PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xB8, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xB8}}, + {AVFMADD231SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0xB9}}, + {AVFMADD231SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0xB9}}, + {AVFMADDSUB132PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x96, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x96}}, + {AVFMADDSUB132PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x96, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x96}}, + {AVFMADDSUB213PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xA6, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xA6}}, + {AVFMADDSUB213PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xA6, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xA6}}, + {AVFMADDSUB231PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xB6, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xB6}}, + {AVFMADDSUB231PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xB6, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xB6}}, + {AVFMSUB132PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x9A, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x9A}}, + {AVFMSUB132PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x9A, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x9A}}, + {AVFMSUB132SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0x9B}}, + {AVFMSUB132SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0x9B}}, + {AVFMSUB213PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xAA, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xAA}}, + {AVFMSUB213PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xAA, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xAA}}, + {AVFMSUB213SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0xAB}}, + {AVFMSUB213SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0xAB}}, + {AVFMSUB231PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xBA, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xBA}}, + {AVFMSUB231PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xBA, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xBA}}, + {AVFMSUB231SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0xBB}}, + {AVFMSUB231SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0xBB}}, + {AVFMSUBADD132PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x97, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x97}}, + {AVFMSUBADD132PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x97, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x97}}, + {AVFMSUBADD213PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xA7, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xA7}}, + {AVFMSUBADD213PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xA7, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xA7}}, + {AVFMSUBADD231PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xB7, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xB7}}, + {AVFMSUBADD231PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xB7, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xB7}}, + {AVFNMADD132PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x9C, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x9C}}, + {AVFNMADD132PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x9C, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x9C}}, + {AVFNMADD132SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0x9D}}, + {AVFNMADD132SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0x9D}}, + {AVFNMADD213PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xAC, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xAC}}, + {AVFNMADD213PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xAC, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xAC}}, + {AVFNMADD213SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0xAD}}, + {AVFNMADD213SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0xAD}}, + {AVFNMADD231PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xBC, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xBC}}, + {AVFNMADD231PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xBC, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xBC}}, + {AVFNMADD231SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0xBD}}, + {AVFNMADD231SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0xBD}}, + {AVFNMSUB132PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0x9E, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0x9E}}, + {AVFNMSUB132PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0x9E, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0x9E}}, + {AVFNMSUB132SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0x9F}}, + {AVFNMSUB132SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0x9F}}, + {AVFNMSUB213PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xAE, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xAE}}, + {AVFNMSUB213PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xAE, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xAE}}, + {AVFNMSUB213SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0xAF}}, + {AVFNMSUB213SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0xAF}}, + {AVFNMSUB231PD, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW1, 0xBE, vexDDS | vex256 | vex66 | vex0F38 | vexW1, 0xBE}}, + {AVFNMSUB231PS, yvex_xy3, Pvex, [23]uint8{vexDDS | vex128 | vex66 | vex0F38 | vexW0, 0xBE, vexDDS | vex256 | vex66 | vex0F38 | vexW0, 0xBE}}, + {AVFNMSUB231SD, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW1, 0xBF}}, + {AVFNMSUB231SS, yvex_x3, Pvex, [23]uint8{vexDDS | vexLIG | vex66 | vex0F38 | vexW0, 0xBF}}, + {AVHADDPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x7C, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x7C}}, + {AVHADDPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vexF2 | vex0F | vexWIG, 0x7C, vexNDS | vex256 | vexF2 | vex0F | vexWIG, 0x7C}}, + {AVHSUBPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x7D, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x7D}}, + {AVHSUBPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vexF2 | vex0F | vexWIG, 0x7D, vexNDS | vex256 | vexF2 | vex0F | vexWIG, 0x7D}}, + {AVINSERTF128, yvex_xyi4, Pvex, [23]uint8{vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x18}}, + {AVINSERTI128, yvex_xyi4, Pvex, [23]uint8{vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x38}}, + {AVINSERTPS, yvex_xxi4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x21}}, + {AVLDDQU, yvex_mxy, Pvex, [23]uint8{vexNOVSR | vex128 | vexF2 | vex0F | vexWIG, 0xF0, vexNOVSR | vex256 | vexF2 | vex0F | vexWIG, 0xF0}}, + {AVLDMXCSR, yvex_m, Pvex, [23]uint8{vexNOVSR | vexLZ | vex0F | vexWIG, 0xAE, 02}}, + {AVMASKMOVDQU, yvex_xx2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0xF7}}, + {AVMASKMOVPD, yvex_vblendvpd, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x2F, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x2F, vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x2D, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x2D}}, + {AVMASKMOVPS, yvex_vblendvpd, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x2E, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x2E, vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x2C, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x2C}}, + {AVMAXPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x5F, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x5F}}, + {AVMAXPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x5F, vexNDS | vex256 | vex0F | vexWIG, 0x5F}}, + {AVMAXSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x5F}}, + {AVMAXSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x5F}}, + {AVMINPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x5D, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x5D}}, + {AVMINPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x5D, vexNDS | vex256 | vex0F | vexWIG, 0x5D}}, + {AVMINSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x5D}}, + {AVMINSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x5D}}, + {AVMOVAPD, yvex_vmovdqa, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x28, vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x29, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x28, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x29}}, + {AVMOVAPS, yvex_vmovdqa, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x28, vexNOVSR | vex128 | vex0F | vexWIG, 0x29, vexNOVSR | vex256 | vex0F | vexWIG, 0x28, vexNOVSR | vex256 | vex0F | vexWIG, 0x29}}, + {AVMOVD, yvex_vmovd, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexW0, 0x7E, vexNOVSR | vex128 | vex66 | vex0F | vexW0, 0x6E}}, + {AVMOVDDUP, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vexF2 | vex0F | vexWIG, 0x12, vexNOVSR | vex256 | vexF2 | vex0F | vexWIG, 0x12}}, + {AVMOVDQA, yvex_vmovdqa, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x6F, vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x7F, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x6F, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x7F}}, + {AVMOVDQU, yvex_vmovdqa, Pvex, [23]uint8{vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0x6F, vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0x7F, vexNOVSR | vex256 | vexF3 | vex0F | vexWIG, 0x6F, vexNOVSR | vex256 | vexF3 | vex0F | vexWIG, 0x7F}}, + {AVMOVHLPS, yvex_xx3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x12}}, + {AVMOVHPD, yvex_vmovhpd, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x17, vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x16}}, + {AVMOVHPS, yvex_vmovhpd, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x17, vexNDS | vex128 | vex0F | vexWIG, 0x16}}, + {AVMOVLHPS, yvex_xx3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x16}}, + {AVMOVLPD, yvex_vmovhpd, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x13, vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x12}}, + {AVMOVLPS, yvex_vmovhpd, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x13, vexNDS | vex128 | vex0F | vexWIG, 0x12}}, + {AVMOVMSKPD, yvex_xyr2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x50, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x50}}, + {AVMOVMSKPS, yvex_xyr2, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x50, vexNOVSR | vex256 | vex0F | vexWIG, 0x50}}, + {AVMOVNTDQ, yvex_vmovntdq, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0xE7, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0xE7}}, + {AVMOVNTDQA, yvex_mxy, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x2A, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x2A}}, + {AVMOVNTPD, yvex_vmovntdq, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x2B, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x2B}}, + {AVMOVNTPS, yvex_vmovntdq, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x2B, vexNOVSR | vex256 | vex0F | vexWIG, 0x2B}}, + {AVMOVQ, yvex_vmovq, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexW1, 0x7E, vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0x7E, vexNOVSR | vex128 | vex66 | vex0F | vexW1, 0x6E, vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0x7E, vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0xD6}}, + {AVMOVSD, yvex_vmov, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF2 | vex0F | vexWIG, 0x11, vexNOVSR | vexLIG | vexF2 | vex0F | vexWIG, 0x10, vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x10, vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x11}}, + {AVMOVSHDUP, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0x16, vexNOVSR | vex256 | vexF3 | vex0F | vexWIG, 0x16}}, + {AVMOVSLDUP, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0x12, vexNOVSR | vex256 | vexF3 | vex0F | vexWIG, 0x12}}, + {AVMOVSS, yvex_vmov, Pvex, [23]uint8{vexNOVSR | vexLIG | vexF3 | vex0F | vexWIG, 0x11, vexNOVSR | vexLIG | vexF3 | vex0F | vexWIG, 0x10, vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x10, vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x11}}, + {AVMOVUPD, yvex_vmovdqa, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x10, vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x11, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x10, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x11}}, + {AVMOVUPS, yvex_vmovdqa, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x10, vexNOVSR | vex128 | vex0F | vexWIG, 0x11, vexNOVSR | vex256 | vex0F | vexWIG, 0x10, vexNOVSR | vex256 | vex0F | vexWIG, 0x11}}, + {AVMPSADBW, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x42, vexNDS | vex256 | vex66 | vex0F3A | vexWIG, 0x42}}, + {AVMULPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x59, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x59}}, + {AVMULPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x59, vexNDS | vex256 | vex0F | vexWIG, 0x59}}, + {AVMULSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x59}}, + {AVMULSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x59}}, + {AVORPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x56, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x56}}, + {AVORPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x56, vexNDS | vex256 | vex0F | vexWIG, 0x56}}, + {AVPABSB, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x1C, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x1C}}, + {AVPABSD, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x1E, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x1E}}, + {AVPABSW, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x1D, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x1D}}, + {AVPACKSSDW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x6B, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x6B}}, + {AVPACKSSWB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x63, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x63}}, + {AVPACKUSDW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x2B, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x2B}}, + {AVPACKUSWB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x67, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x67}}, + {AVPADDB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xFC, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xFC}}, + {AVPADDD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xFE, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xFE}}, + {AVPADDQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xD4, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xD4}}, + {AVPADDSB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xEC, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xEC}}, + {AVPADDSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xED, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xED}}, + {AVPADDUSB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xDC, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xDC}}, + {AVPADDUSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xDD, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xDD}}, + {AVPADDW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xFD, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xFD}}, + {AVPALIGNR, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x0F, vexNDS | vex256 | vex66 | vex0F3A | vexWIG, 0x0F}}, + {AVPAND, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xDB, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xDB}}, + {AVPANDN, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xDF, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xDF}}, + {AVPAVGB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xE0, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xE0}}, + {AVPAVGW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xE3, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xE3}}, + {AVPBLENDD, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexW0, 0x02, vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x02}}, + {AVPBLENDVB, yvex_xy4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexW0, 0x4C, vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x4C}}, + {AVPBLENDW, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x0E, vexNDS | vex256 | vex66 | vex0F3A | vexWIG, 0x0E}}, + {AVPBROADCASTB, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x78, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x78}}, + {AVPBROADCASTD, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x58, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x58}}, + {AVPBROADCASTQ, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x59, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x59}}, + {AVPBROADCASTW, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x79, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x79}}, + {AVPCLMULQDQ, yvex_xxi4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexWIG, 0x44}}, + {AVPCMPEQB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x74, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x74}}, + {AVPCMPEQD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x76, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x76}}, + {AVPCMPEQQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x29, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x29}}, + {AVPCMPEQW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x75, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x75}}, + {AVPCMPESTRI, yvex_xi3, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexWIG, 0x61}}, + {AVPCMPESTRM, yvex_xi3, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexWIG, 0x60}}, + {AVPCMPGTB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x64, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x64}}, + {AVPCMPGTD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x66, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x66}}, + {AVPCMPGTQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x37, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x37}}, + {AVPCMPGTW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x65, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x65}}, + {AVPCMPISTRI, yvex_xi3, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexWIG, 0x63}}, + {AVPCMPISTRM, yvex_xi3, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexWIG, 0x62}}, + {AVPERM2F128, yvex_yyi4, Pvex, [23]uint8{vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x06}}, + {AVPERM2I128, yvex_yyi4, Pvex, [23]uint8{vexNDS | vex256 | vex66 | vex0F3A | vexW0, 0x46}}, + {AVPERMD, yvex_yy3, Pvex, [23]uint8{vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x36}}, + {AVPERMILPD, yvex_vpermilp, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexW0, 0x05, vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x0D, vexNOVSR | vex256 | vex66 | vex0F3A | vexW0, 0x05, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x0D}}, + {AVPERMILPS, yvex_vpermilp, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexW0, 0x04, vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x0C, vexNOVSR | vex256 | vex66 | vex0F3A | vexW0, 0x04, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x0C}}, + {AVPERMPD, yvex_vpermpd, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F3A | vexW1, 0x01}}, + {AVPERMPS, yvex_yy3, Pvex, [23]uint8{vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x16}}, + {AVPERMQ, yvex_vpermpd, Pvex, [23]uint8{vexNOVSR | vex256 | vex66 | vex0F3A | vexW1, 0x00}}, + {AVPEXTRB, yvex_vpextr, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexW0, 0x14}}, + {AVPEXTRD, yvex_vpextr, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexW0, 0x16}}, + {AVPEXTRQ, yvex_vpextr, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexW1, 0x16}}, + {AVPEXTRW, yvex_vpextrw, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexW0, 0xC5, vexNOVSR | vex128 | vex66 | vex0F3A | vexW0, 0x15}}, + {AVPHADDD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x02, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x02}}, + {AVPHADDSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x03, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x03}}, + {AVPHADDW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x01, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x01}}, + {AVPHMINPOSUW, yvex_x2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x41}}, + {AVPHSUBD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x06, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x06}}, + {AVPHSUBSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x07, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x07}}, + {AVPHSUBW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x05, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x05}}, + {AVPINSRB, yvex_rxi4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexW0, 0x20}}, + {AVPINSRD, yvex_rxi4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexW0, 0x22}}, + {AVPINSRQ, yvex_rxi4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F3A | vexW1, 0x22}}, + {AVPINSRW, yvex_rxi4, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexW0, 0xC4}}, + {AVPMADDUBSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x04, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x04}}, + {AVPMADDWD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xF5, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xF5}}, + {AVPMASKMOVD, yvex_vblendvpd, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x8E, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x8E, vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x8C, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x8C}}, + {AVPMASKMOVQ, yvex_vblendvpd, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW1, 0x8E, vexNDS | vex256 | vex66 | vex0F38 | vexW1, 0x8E, vexNDS | vex128 | vex66 | vex0F38 | vexW1, 0x8C, vexNDS | vex256 | vex66 | vex0F38 | vexW1, 0x8C}}, + {AVPMAXSB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x3C, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x3C}}, + {AVPMAXSD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x3D, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x3D}}, + {AVPMAXSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xEE, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xEE}}, + {AVPMAXUB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xDE, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xDE}}, + {AVPMAXUD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x3F, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x3F}}, + {AVPMAXUW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x3E, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x3E}}, + {AVPMINSB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x38, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x38}}, + {AVPMINSD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x39, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x39}}, + {AVPMINSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xEA, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xEA}}, + {AVPMINUB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xDA, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xDA}}, + {AVPMINUD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x3B, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x3B}}, + {AVPMINUW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x3A, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x3A}}, + {AVPMOVMSKB, yvex_xyr2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0xD7, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0xD7}}, + {AVPMOVSXBD, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x21, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x21}}, + {AVPMOVSXBQ, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x22, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x22}}, + {AVPMOVSXBW, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x20, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x20}}, + {AVPMOVSXDQ, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x25, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x25}}, + {AVPMOVSXWD, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x23, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x23}}, + {AVPMOVSXWQ, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x24, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x24}}, + {AVPMOVZXBD, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x31, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x31}}, + {AVPMOVZXBQ, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x32, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x32}}, + {AVPMOVZXBW, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x30, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x30}}, + {AVPMOVZXDQ, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x35, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x35}}, + {AVPMOVZXWD, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x33, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x33}}, + {AVPMOVZXWQ, yvex_vpbroadcast, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x34, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x34}}, + {AVPMULDQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x28, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x28}}, + {AVPMULHRSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x0B, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x0B}}, + {AVPMULHUW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xE4, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xE4}}, + {AVPMULHW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xE5, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xE5}}, + {AVPMULLD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x40, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x40}}, + {AVPMULLW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xD5, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xD5}}, + {AVPMULUDQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xF4, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xF4}}, + {AVPOR, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xEB, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xEB}}, + {AVPSADBW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xF6, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xF6}}, + {AVPSHUFB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x00, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x00}}, + {AVPSHUFD, yvex_xyi3, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x70, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x70, vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x70, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x70}}, + {AVPSHUFHW, yvex_xyi3, Pvex, [23]uint8{vexNOVSR | vex128 | vexF3 | vex0F | vexWIG, 0x70, vexNOVSR | vex256 | vexF3 | vex0F | vexWIG, 0x70}}, + {AVPSHUFLW, yvex_xyi3, Pvex, [23]uint8{vexNOVSR | vex128 | vexF2 | vex0F | vexWIG, 0x70, vexNOVSR | vex256 | vexF2 | vex0F | vexWIG, 0x70}}, + {AVPSIGNB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x08, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x08}}, + {AVPSIGND, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x0A, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x0A}}, + {AVPSIGNW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexWIG, 0x09, vexNDS | vex256 | vex66 | vex0F38 | vexWIG, 0x09}}, + {AVPSLLD, yvex_shift, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x72, 0xF0, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x72, 0xF0, vexNDD | vex128 | vex66 | vex0F | vexWIG, 0xF2, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0xF2}}, + {AVPSLLDQ, yvex_shift_dq, Pvex, [23]uint8{vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x73, 0xF8, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x73, 0xF8}}, + {AVPSLLQ, yvex_shift, Pvex, [23]uint8{vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x73, 0xF0, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x73, 0xF0, vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xF3, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xF3}}, + {AVPSLLVD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x47, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x47}}, + {AVPSLLVQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW1, 0x47, vexNDS | vex256 | vex66 | vex0F38 | vexW1, 0x47}}, + {AVPSLLW, yvex_vps, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xF1, vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x71, 0xF0, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xF1, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x71, 0xF0}}, + {AVPSRAD, yvex_vps, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xE2, vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x72, 0xE0, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xE2, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x72, 0xE0}}, + {AVPSRAVD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x46, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x46}}, + {AVPSRAW, yvex_vps, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xE1, vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x71, 0xE0, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xE1, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x71, 0xE0}}, + {AVPSRLD, yvex_shift, Pvex, [23]uint8{vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x72, 0xD0, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x72, 0xD0, vexNDD | vex128 | vex66 | vex0F | vexWIG, 0xD2, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0xD2}}, + {AVPSRLDQ, yvex_shift_dq, Pvex, [23]uint8{vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x73, 0xD8, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x73, 0xD8}}, + {AVPSRLQ, yvex_shift, Pvex, [23]uint8{vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x73, 0xD0, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x73, 0xD0, vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xD3, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xD3}}, + {AVPSRLVD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW0, 0x45, vexNDS | vex256 | vex66 | vex0F38 | vexW0, 0x45}}, + {AVPSRLVQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F38 | vexW1, 0x45, vexNDS | vex256 | vex66 | vex0F38 | vexW1, 0x45}}, + {AVPSRLW, yvex_vps, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xD1, vexNDD | vex128 | vex66 | vex0F | vexWIG, 0x71, 0xD0, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xD1, vexNDD | vex256 | vex66 | vex0F | vexWIG, 0x71, 0xD0}}, + {AVPSUBB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xF8, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xF8}}, + {AVPSUBD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xFA, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xFA}}, + {AVPSUBQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xFB, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xFB}}, + {AVPSUBSB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xE8, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xE8}}, + {AVPSUBSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xE9, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xE9}}, + {AVPSUBUSB, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xD8, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xD8}}, + {AVPSUBUSW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xD9, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xD9}}, + {AVPSUBW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xF9, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xF9}}, + {AVPTEST, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexWIG, 0x17, vexNOVSR | vex256 | vex66 | vex0F38 | vexWIG, 0x17}}, + {AVPUNPCKHBW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x68, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x68}}, + {AVPUNPCKHDQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x6A, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x6A}}, + {AVPUNPCKHQDQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x6D, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x6D}}, + {AVPUNPCKHWD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x69, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x69}}, + {AVPUNPCKLBW, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x60, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x60}}, + {AVPUNPCKLDQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x62, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x62}}, + {AVPUNPCKLQDQ, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x6C, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x6C}}, + {AVPUNPCKLWD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x61, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x61}}, + {AVPXOR, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xEF, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xEF}}, + {AVRCPPS, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x53, vexNOVSR | vex256 | vex0F | vexWIG, 0x53}}, + {AVRCPSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x53}}, + {AVROUNDPD, yvex_xyi3, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexWIG, 0x09, vexNOVSR | vex256 | vex66 | vex0F3A | vexWIG, 0x09}}, + {AVROUNDPS, yvex_xyi3, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F3A | vexWIG, 0x08, vexNOVSR | vex256 | vex66 | vex0F3A | vexWIG, 0x08}}, + {AVROUNDSD, yvex_xxi4, Pvex, [23]uint8{vexNDS | vexLIG | vex66 | vex0F3A | vexWIG, 0x0B}}, + {AVROUNDSS, yvex_xxi4, Pvex, [23]uint8{vexNDS | vexLIG | vex66 | vex0F3A | vexWIG, 0x0A}}, + {AVRSQRTPS, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x52, vexNOVSR | vex256 | vex0F | vexWIG, 0x52}}, + {AVRSQRTSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x52}}, + {AVSHUFPD, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0xC6, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0xC6}}, + {AVSHUFPS, yvex_vpalignr, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0xC6, vexNDS | vex256 | vex0F | vexWIG, 0xC6}}, + {AVSQRTPD, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F | vexWIG, 0x51, vexNOVSR | vex256 | vex66 | vex0F | vexWIG, 0x51}}, + {AVSQRTPS, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x51, vexNOVSR | vex256 | vex0F | vexWIG, 0x51}}, + {AVSQRTSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x51}}, + {AVSQRTSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x51}}, + {AVSTMXCSR, yvex_m, Pvex, [23]uint8{vexNOVSR | vexLZ | vex0F | vexWIG, 0xAE, 03}}, + {AVSUBPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x5C, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x5C}}, + {AVSUBPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x5C, vexNDS | vex256 | vex0F | vexWIG, 0x5C}}, + {AVSUBSD, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF2 | vex0F | vexWIG, 0x5C}}, + {AVSUBSS, yvex_x3, Pvex, [23]uint8{vexNDS | vexLIG | vexF3 | vex0F | vexWIG, 0x5C}}, + {AVTESTPD, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x0F, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x0F}}, + {AVTESTPS, yvex_xy2, Pvex, [23]uint8{vexNOVSR | vex128 | vex66 | vex0F38 | vexW0, 0x0E, vexNOVSR | vex256 | vex66 | vex0F38 | vexW0, 0x0E}}, + {AVUCOMISD, yvex_x2, Pvex, [23]uint8{vexNOVSR | vexLIG | vex66 | vex0F | vexWIG, 0x2E}}, + {AVUCOMISS, yvex_x2, Pvex, [23]uint8{vexNOVSR | vexLIG | vex0F | vexWIG, 0x2E}}, + {AVUNPCKHPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x15, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x15}}, + {AVUNPCKHPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x15, vexNDS | vex256 | vex0F | vexWIG, 0x15}}, + {AVUNPCKLPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x14, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x14}}, + {AVUNPCKLPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x14, vexNDS | vex256 | vex0F | vexWIG, 0x14}}, + {AVXORPD, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex66 | vex0F | vexWIG, 0x57, vexNDS | vex256 | vex66 | vex0F | vexWIG, 0x57}}, + {AVXORPS, yvex_xy3, Pvex, [23]uint8{vexNDS | vex128 | vex0F | vexWIG, 0x57, vexNDS | vex256 | vex0F | vexWIG, 0x57}}, + {AVZEROALL, yvex, Pvex, [23]uint8{vexNOVSR | vex256 | vex0F | vexWIG, 0x77}}, + {AVZEROUPPER, yvex, Pvex, [23]uint8{vexNOVSR | vex128 | vex0F | vexWIG, 0x77}}, +} diff --git a/src/cmd/internal/obj/x86/ytab.go b/src/cmd/internal/obj/x86/ytab.go new file mode 100644 index 00000000000..dbbef4730e8 --- /dev/null +++ b/src/cmd/internal/obj/x86/ytab.go @@ -0,0 +1,40 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86 + +type argList [6]uint8 + +type ytab struct { + zcase uint8 + zoffset uint8 + + // Last arg is usually destination. + // For unary instructions unaryDst is used to determine + // if single argument is a source or destination. + args argList +} + +// Returns true if yt is compatible with args. +// +// Elements from args and yt.args are used to +// to index ycover table like `ycover[args[i]+yt.args[i]]`. +// This means that args should contain values that already +// multiplied by Ymax. +func (yt *ytab) match(args []int) bool { + // Trailing Yxxx check is required to avoid a case + // where shorter arg list is matched. + // If we had exact yt.args length, it could be `yt.argc != len(args)`. + if len(args) < len(yt.args) && yt.args[len(args)] != Yxxx { + return false + } + + for i := range args { + if ycover[args[i]+int(yt.args[i])] == 0 { + return false + } + } + + return true +} diff --git a/src/cmd/internal/objabi/autotype.go b/src/cmd/internal/objabi/autotype.go index 17c42931310..1b46b0ffece 100644 --- a/src/cmd/internal/objabi/autotype.go +++ b/src/cmd/internal/objabi/autotype.go @@ -34,4 +34,5 @@ package objabi const ( A_AUTO = 1 + iota A_PARAM + A_DELETED_AUTO ) diff --git a/src/cmd/internal/objabi/flag.go b/src/cmd/internal/objabi/flag.go index e349b413249..1bd4bc9063a 100644 --- a/src/cmd/internal/objabi/flag.go +++ b/src/cmd/internal/objabi/flag.go @@ -9,30 +9,13 @@ import ( "fmt" "os" "strconv" + "strings" ) -func Flagfn2(string, string, func(string, string)) { panic("flag") } - func Flagcount(name, usage string, val *int) { flag.Var((*count)(val), name, usage) } -func Flagint32(name, usage string, val *int32) { - flag.Var((*int32Value)(val), name, usage) -} - -func Flagint64(name, usage string, val *int64) { - flag.Int64Var(val, name, *val, usage) -} - -func Flagstr(name, usage string, val *string) { - flag.StringVar(val, name, *val, usage) -} - -func Flagfn0(name, usage string, f func()) { - flag.Var(fn0(f), name, usage) -} - func Flagfn1(name, usage string, f func(string)) { flag.Var(fn1(f), name, usage) } @@ -49,6 +32,44 @@ func Flagparse(usage func()) { flag.Parse() } +func AddVersionFlag() { + flag.Var(versionFlag{}, "V", "print version and exit") +} + +var buildID string // filled in by linker + +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + name := os.Args[0] + name = name[strings.LastIndex(name, `/`)+1:] + name = name[strings.LastIndex(name, `\`)+1:] + name = strings.TrimSuffix(name, ".exe") + p := Expstring() + if p == DefaultExpstring() { + p = "" + } + sep := "" + if p != "" { + sep = " " + } + + // The go command invokes -V=full to get a unique identifier + // for this tool. It is assumed that the release version is sufficient + // for releases, but during development we include the full + // build ID of the binary, so that if the compiler is changed and + // rebuilt, we notice and rebuild all packages. + if s == "full" && strings.HasPrefix(Version, "devel") { + p += " buildID=" + buildID + } + fmt.Printf("%s version %s%s%s\n", name, Version, sep, p) + os.Exit(0) + return nil +} + // count is a flag.Value that is like a flag.Bool and a flag.Int. // If used as -name, it increments the count, but -name=x sets the count. // Used for verbose flag -v. @@ -74,22 +95,18 @@ func (c *count) Set(s string) error { return nil } +func (c *count) Get() interface{} { + return int(*c) +} + func (c *count) IsBoolFlag() bool { return true } -type int32Value int32 - -func (i *int32Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int32Value(v) - return err +func (c *count) IsCountFlag() bool { + return true } -func (i *int32Value) Get() interface{} { return int32(*i) } - -func (i *int32Value) String() string { return fmt.Sprint(*i) } - type fn0 func() func (f fn0) Set(s string) error { diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go index 179f049de7d..2e0b916f7c1 100644 --- a/src/cmd/internal/objabi/reloctype.go +++ b/src/cmd/internal/objabi/reloctype.go @@ -99,8 +99,15 @@ const ( // of a JMP instruction, by encoding the address into the instruction. // The stack nosplit check ignores this since it is not a function call. R_JMPMIPS - // R_DWARFREF resolves to the offset of the symbol from its section. - R_DWARFREF + + // R_DWARFSECREF resolves to the offset of the symbol from its section. + // Target of relocation must be size 4 (in current implementation). + R_DWARFSECREF + + // R_DWARFFILEREF resolves to an index into the DWARF .debug_line + // file table for the specified file symbol. Must be applied to an + // attribute of form DW_FORM_data4. + R_DWARFFILEREF // Platform dependent relocations. Architectures with fixed width instructions // have the inherent issue that a 32-bit (or 64-bit!) displacement cannot be @@ -183,6 +190,9 @@ const ( // R_ADDRMIPSTLS (only used on mips64) resolves to the low 16 bits of a TLS // address (offset from thread pointer), by encoding it into the instruction. R_ADDRMIPSTLS + // R_ADDRCUOFF resolves to a pointer-sized offset from the start of the + // symbol's DWARF compile unit. + R_ADDRCUOFF ) // IsDirectJump returns whether r is a relocation for a direct jump. diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go index 182d03f78c1..a6efe9cad04 100644 --- a/src/cmd/internal/objabi/reloctype_string.go +++ b/src/cmd/internal/objabi/reloctype_string.go @@ -4,9 +4,9 @@ package objabi import "fmt" -const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_WEAKADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLS" +const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_WEAKADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_METHODOFFR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_PCRELDBLR_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFF" -var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 60, 66, 72, 81, 92, 101, 112, 122, 129, 136, 144, 152, 160, 166, 172, 178, 188, 197, 208, 219, 229, 238, 248, 262, 276, 292, 306, 320, 331, 345, 360, 377, 395, 416, 426, 437, 450} +var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 60, 66, 72, 81, 92, 101, 112, 122, 129, 136, 144, 152, 160, 166, 172, 178, 188, 197, 208, 219, 229, 238, 251, 265, 279, 293, 309, 323, 337, 348, 362, 377, 394, 412, 433, 443, 454, 467, 478} func (i RelocType) String() string { i -= 1 diff --git a/src/cmd/internal/objabi/symkind.go b/src/cmd/internal/objabi/symkind.go index b037e9e4ed1..ea180d0bf86 100644 --- a/src/cmd/internal/objabi/symkind.go +++ b/src/cmd/internal/objabi/symkind.go @@ -52,9 +52,10 @@ const ( SBSS // Statically data that is initially all 0s and does not contain pointers SNOPTRBSS - // Thread-local data that is initally all 0s + // Thread-local data that is initially all 0s STLSBSS // Debugging data SDWARFINFO SDWARFRANGE + SDWARFLOC ) diff --git a/src/cmd/internal/objabi/symkind_string.go b/src/cmd/internal/objabi/symkind_string.go index 5123dc7097f..3064c8ee051 100644 --- a/src/cmd/internal/objabi/symkind_string.go +++ b/src/cmd/internal/objabi/symkind_string.go @@ -4,9 +4,9 @@ package objabi import "fmt" -const _SymKind_name = "SxxxSTEXTSRODATASNOPTRDATASDATASBSSSNOPTRBSSSTLSBSSSDWARFINFOSDWARFRANGE" +const _SymKind_name = "SxxxSTEXTSRODATASNOPTRDATASDATASBSSSNOPTRBSSSTLSBSSSDWARFINFOSDWARFRANGESDWARFLOC" -var _SymKind_index = [...]uint8{0, 4, 9, 16, 26, 31, 35, 44, 51, 61, 72} +var _SymKind_index = [...]uint8{0, 4, 9, 16, 26, 31, 35, 44, 51, 61, 72, 81} func (i SymKind) String() string { if i >= SymKind(len(_SymKind_index)-1) { diff --git a/src/cmd/internal/objabi/util.go b/src/cmd/internal/objabi/util.go index 1da05021f50..f8949e05a2b 100644 --- a/src/cmd/internal/objabi/util.go +++ b/src/cmd/internal/objabi/util.go @@ -24,6 +24,7 @@ var ( GOOS = envOr("GOOS", defaultGOOS) GO386 = envOr("GO386", defaultGO386) GOARM = goarm() + GOMIPS = gomips() Version = version ) @@ -41,6 +42,15 @@ func goarm() int { panic("unreachable") } +func gomips() string { + switch v := envOr("GOMIPS", defaultGOMIPS); v { + case "hardfloat", "softfloat": + return v + } + log.Fatalf("Invalid GOMIPS value. Must be hardfloat or softfloat.") + panic("unreachable") +} + func Getgoextlinkenabled() string { return envOr("GO_EXTLINK_ENABLED", defaultGO_EXTLINK_ENABLED) } diff --git a/src/cmd/internal/objfile/disasm.go b/src/cmd/internal/objfile/disasm.go index d61cb27182d..6ddf8d6cd7d 100644 --- a/src/cmd/internal/objfile/disasm.go +++ b/src/cmd/internal/objfile/disasm.go @@ -22,6 +22,7 @@ import ( "text/tabwriter" "golang.org/x/arch/arm/armasm" + "golang.org/x/arch/arm64/arm64asm" "golang.org/x/arch/ppc64/ppc64asm" "golang.org/x/arch/x86/x86asm" ) @@ -39,23 +40,23 @@ type Disasm struct { } // Disasm returns a disassembler for the file f. -func (f *File) Disasm() (*Disasm, error) { - syms, err := f.Symbols() +func (e *Entry) Disasm() (*Disasm, error) { + syms, err := e.Symbols() if err != nil { return nil, err } - pcln, err := f.PCLineTable() + pcln, err := e.PCLineTable() if err != nil { return nil, err } - textStart, textBytes, err := f.Text() + textStart, textBytes, err := e.Text() if err != nil { return nil, err } - goarch := f.GOARCH() + goarch := e.GOARCH() disasm := disasms[goarch] byteOrder := byteOrders[goarch] if disasm == nil || byteOrder == nil { @@ -242,7 +243,7 @@ func (d *Disasm) Print(w io.Writer, filter *regexp.Regexp, start, end uint64, pr fmt.Fprintf(tw, " %s:%d\t%#x\t", base(file), line, pc) } - if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" { + if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" || d.goarch == "amd64p32" { // Print instruction as bytes. fmt.Fprintf(tw, "%x", code[i:i+size]) } else { @@ -291,7 +292,7 @@ func (d *Disasm) Decode(start, end uint64, relocs []Reloc, f func(pc, size uint6 } } -type lookupFunc func(addr uint64) (sym string, base uint64) +type lookupFunc = func(addr uint64) (sym string, base uint64) type disasmFunc func(code []byte, pc uint64, lookup lookupFunc, ord binary.ByteOrder) (text string, size int) func disasm_386(code []byte, pc uint64, lookup lookupFunc, _ binary.ByteOrder) (string, int) { @@ -303,7 +304,7 @@ func disasm_amd64(code []byte, pc uint64, lookup lookupFunc, _ binary.ByteOrder) } func disasm_x86(code []byte, pc uint64, lookup lookupFunc, arch int) (string, int) { - inst, err := x86asm.Decode(code, 64) + inst, err := x86asm.Decode(code, arch) var text string size := inst.Len if err != nil || size == 0 || inst.Op == 0 { @@ -348,6 +349,17 @@ func disasm_arm(code []byte, pc uint64, lookup lookupFunc, _ binary.ByteOrder) ( return text, size } +func disasm_arm64(code []byte, pc uint64, lookup lookupFunc, byteOrder binary.ByteOrder) (string, int) { + inst, err := arm64asm.Decode(code) + var text string + if err != nil || inst.Op == 0 { + text = "?" + } else { + text = arm64asm.GoSyntax(inst, pc, lookup, textReader{code, pc}) + } + return text, 4 +} + func disasm_ppc64(code []byte, pc uint64, lookup lookupFunc, byteOrder binary.ByteOrder) (string, int) { inst, err := ppc64asm.Decode(code, byteOrder) var text string @@ -362,20 +374,24 @@ func disasm_ppc64(code []byte, pc uint64, lookup lookupFunc, byteOrder binary.By } var disasms = map[string]disasmFunc{ - "386": disasm_386, - "amd64": disasm_amd64, - "arm": disasm_arm, - "ppc64": disasm_ppc64, - "ppc64le": disasm_ppc64, + "386": disasm_386, + "amd64": disasm_amd64, + "amd64p32": disasm_amd64, + "arm": disasm_arm, + "arm64": disasm_arm64, + "ppc64": disasm_ppc64, + "ppc64le": disasm_ppc64, } var byteOrders = map[string]binary.ByteOrder{ - "386": binary.LittleEndian, - "amd64": binary.LittleEndian, - "arm": binary.LittleEndian, - "ppc64": binary.BigEndian, - "ppc64le": binary.LittleEndian, - "s390x": binary.BigEndian, + "386": binary.LittleEndian, + "amd64": binary.LittleEndian, + "amd64p32": binary.LittleEndian, + "arm": binary.LittleEndian, + "arm64": binary.LittleEndian, + "ppc64": binary.BigEndian, + "ppc64le": binary.LittleEndian, + "s390x": binary.BigEndian, } type Liner interface { diff --git a/src/cmd/internal/objfile/elf.go b/src/cmd/internal/objfile/elf.go index 4ab7e6deb81..7d5162a1e89 100644 --- a/src/cmd/internal/objfile/elf.go +++ b/src/cmd/internal/objfile/elf.go @@ -11,14 +11,14 @@ import ( "debug/elf" "encoding/binary" "fmt" - "os" + "io" ) type elfFile struct { elf *elf.File } -func openElf(r *os.File) (rawFile, error) { +func openElf(r io.ReaderAt) (rawFile, error) { f, err := elf.NewFile(r) if err != nil { return nil, err @@ -99,6 +99,8 @@ func (f *elfFile) goarch() string { return "amd64" case elf.EM_ARM: return "arm" + case elf.EM_AARCH64: + return "arm64" case elf.EM_PPC64: if f.elf.ByteOrder == binary.LittleEndian { return "ppc64le" diff --git a/src/cmd/internal/objfile/goobj.go b/src/cmd/internal/objfile/goobj.go index e075604abdf..51fa6e873fc 100644 --- a/src/cmd/internal/objfile/goobj.go +++ b/src/cmd/internal/objfile/goobj.go @@ -22,12 +22,33 @@ type goobjFile struct { f *os.File // the underlying .o or .a file } -func openGoobj(r *os.File) (rawFile, error) { +func openGoFile(r *os.File) (*File, error) { f, err := goobj.Parse(r, `""`) if err != nil { return nil, err } - return &goobjFile{goobj: f, f: r}, nil + rf := &goobjFile{goobj: f, f: r} + if len(f.Native) == 0 { + return &File{r, []*Entry{&Entry{raw: rf}}}, nil + } + entries := make([]*Entry, len(f.Native)+1) + entries[0] = &Entry{ + raw: rf, + } +L: + for i, nr := range f.Native { + for _, try := range openers { + if raw, err := try(nr); err == nil { + entries[i+1] = &Entry{ + name: nr.Name, + raw: raw, + } + continue L + } + } + return nil, fmt.Errorf("open %s: unrecognized archive member %s", r.Name(), nr.Name) + } + return &File{r, entries}, nil } func goobjName(id goobj.SymID) string { @@ -81,7 +102,7 @@ func (f *goobjFile) symbols() ([]Sym, error) { } func (f *goobjFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) { - // Should never be called. We implement Liner below, callers + // Should never be called. We implement Liner below, callers // should use that instead. return 0, nil, nil, fmt.Errorf("pcln not available in go object file") } @@ -90,7 +111,7 @@ func (f *goobjFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) // Returns "",0,nil if unknown. // This function implements the Liner interface in preference to pcln() above. func (f *goobjFile) PCToLine(pc uint64) (string, int, *gosym.Func) { - // TODO: this is really inefficient. Binary search? Memoize last result? + // TODO: this is really inefficient. Binary search? Memoize last result? var arch *sys.Arch for _, a := range sys.Archs { if a.Name == f.goobj.Arch { diff --git a/src/cmd/internal/objfile/macho.go b/src/cmd/internal/objfile/macho.go index 1d22a09b13d..7a8999e5ba3 100644 --- a/src/cmd/internal/objfile/macho.go +++ b/src/cmd/internal/objfile/macho.go @@ -10,7 +10,7 @@ import ( "debug/dwarf" "debug/macho" "fmt" - "os" + "io" "sort" ) @@ -20,7 +20,7 @@ type machoFile struct { macho *macho.File } -func openMacho(r *os.File) (rawFile, error) { +func openMacho(r io.ReaderAt) (rawFile, error) { f, err := macho.NewFile(r) if err != nil { return nil, err @@ -30,7 +30,7 @@ func openMacho(r *os.File) (rawFile, error) { func (f *machoFile) symbols() ([]Sym, error) { if f.macho.Symtab == nil { - return nil, fmt.Errorf("missing symbol table") + return nil, nil } // Build sorted list of addresses of all symbols. diff --git a/src/cmd/internal/objfile/objfile.go b/src/cmd/internal/objfile/objfile.go index 2bf6363f292..10307be0721 100644 --- a/src/cmd/internal/objfile/objfile.go +++ b/src/cmd/internal/objfile/objfile.go @@ -9,6 +9,7 @@ import ( "debug/dwarf" "debug/gosym" "fmt" + "io" "os" "sort" ) @@ -24,8 +25,13 @@ type rawFile interface { // A File is an opened executable file. type File struct { - r *os.File - raw rawFile + r *os.File + entries []*Entry +} + +type Entry struct { + name string + raw rawFile } // A Sym is a symbol defined in an executable file. @@ -50,9 +56,8 @@ type RelocStringer interface { String(insnOffset uint64) string } -var openers = []func(*os.File) (rawFile, error){ +var openers = []func(io.ReaderAt) (rawFile, error){ openElf, - openGoobj, openMacho, openPE, openPlan9, @@ -65,9 +70,12 @@ func Open(name string) (*File, error) { if err != nil { return nil, err } + if f, err := openGoFile(r); err == nil { + return f, nil + } for _, try := range openers { if raw, err := try(r); err == nil { - return &File{r, raw}, nil + return &File{r, []*Entry{&Entry{raw: raw}}}, nil } } r.Close() @@ -78,8 +86,44 @@ func (f *File) Close() error { return f.r.Close() } +func (f *File) Entries() []*Entry { + return f.entries +} + func (f *File) Symbols() ([]Sym, error) { - syms, err := f.raw.symbols() + return f.entries[0].Symbols() +} + +func (f *File) PCLineTable() (Liner, error) { + return f.entries[0].PCLineTable() +} + +func (f *File) Text() (uint64, []byte, error) { + return f.entries[0].Text() +} + +func (f *File) GOARCH() string { + return f.entries[0].GOARCH() +} + +func (f *File) LoadAddress() (uint64, error) { + return f.entries[0].LoadAddress() +} + +func (f *File) DWARF() (*dwarf.Data, error) { + return f.entries[0].DWARF() +} + +func (f *File) Disasm() (*Disasm, error) { + return f.entries[0].Disasm() +} + +func (e *Entry) Name() string { + return e.name +} + +func (e *Entry) Symbols() ([]Sym, error) { + syms, err := e.raw.symbols() if err != nil { return nil, err } @@ -93,37 +137,37 @@ func (x byAddr) Less(i, j int) bool { return x[i].Addr < x[j].Addr } func (x byAddr) Len() int { return len(x) } func (x byAddr) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (f *File) PCLineTable() (Liner, error) { +func (e *Entry) PCLineTable() (Liner, error) { // If the raw file implements Liner directly, use that. // Currently, only Go intermediate objects and archives (goobj) use this path. - if pcln, ok := f.raw.(Liner); ok { + if pcln, ok := e.raw.(Liner); ok { return pcln, nil } // Otherwise, read the pcln tables and build a Liner out of that. - textStart, symtab, pclntab, err := f.raw.pcln() + textStart, symtab, pclntab, err := e.raw.pcln() if err != nil { return nil, err } return gosym.NewTable(symtab, gosym.NewLineTable(pclntab, textStart)) } -func (f *File) Text() (uint64, []byte, error) { - return f.raw.text() +func (e *Entry) Text() (uint64, []byte, error) { + return e.raw.text() } -func (f *File) GOARCH() string { - return f.raw.goarch() +func (e *Entry) GOARCH() string { + return e.raw.goarch() } // LoadAddress returns the expected load address of the file. // This differs from the actual load address for a position-independent // executable. -func (f *File) LoadAddress() (uint64, error) { - return f.raw.loadAddress() +func (e *Entry) LoadAddress() (uint64, error) { + return e.raw.loadAddress() } // DWARF returns DWARF debug data for the file, if any. // This is for cmd/pprof to locate cgo functions. -func (f *File) DWARF() (*dwarf.Data, error) { - return f.raw.dwarf() +func (e *Entry) DWARF() (*dwarf.Data, error) { + return e.raw.dwarf() } diff --git a/src/cmd/internal/objfile/pe.go b/src/cmd/internal/objfile/pe.go index 46b23172422..80db6f0f187 100644 --- a/src/cmd/internal/objfile/pe.go +++ b/src/cmd/internal/objfile/pe.go @@ -10,7 +10,7 @@ import ( "debug/dwarf" "debug/pe" "fmt" - "os" + "io" "sort" ) @@ -18,17 +18,11 @@ type peFile struct { pe *pe.File } -func openPE(r *os.File) (rawFile, error) { +func openPE(r io.ReaderAt) (rawFile, error) { f, err := pe.NewFile(r) if err != nil { return nil, err } - switch f.OptionalHeader.(type) { - case *pe.OptionalHeader32, *pe.OptionalHeader64: - // ok - default: - return nil, fmt.Errorf("unrecognized PE format") - } return &peFile{f}, nil } diff --git a/src/cmd/internal/objfile/plan9obj.go b/src/cmd/internal/objfile/plan9obj.go index 3e34f65ae7e..da0b345f532 100644 --- a/src/cmd/internal/objfile/plan9obj.go +++ b/src/cmd/internal/objfile/plan9obj.go @@ -11,7 +11,7 @@ import ( "debug/plan9obj" "errors" "fmt" - "os" + "io" "sort" ) @@ -28,7 +28,7 @@ type plan9File struct { plan9 *plan9obj.File } -func openPlan9(r *os.File) (rawFile, error) { +func openPlan9(r io.ReaderAt) (rawFile, error) { f, err := plan9obj.NewFile(r) if err != nil { return nil, err diff --git a/src/cmd/internal/src/pos.go b/src/cmd/internal/src/pos.go index a1ea3fcdac7..10fa924c0be 100644 --- a/src/cmd/internal/src/pos.go +++ b/src/cmd/internal/src/pos.go @@ -79,15 +79,15 @@ func (p Pos) AbsFilename() string { return p.base.AbsFilename() } func (p Pos) SymFilename() string { return p.base.SymFilename() } func (p Pos) String() string { - return p.Format(true) + return p.Format(true, true) } // Format formats a position as "filename:line" or "filename:line:column", -// controlled by the showCol flag. -// If the position is relative to a line directive, the original position -// is appended in square brackets without column (since the column doesn't -// change). -func (p Pos) Format(showCol bool) string { +// controlled by the showCol flag. A position relative to a line directive +// is always formatted without column information. In that case, if showOrig +// is set, the original position (again controlled by showCol) is appended +// in square brackets: "filename:line[origfile:origline:origcolumn]". +func (p Pos) Format(showCol, showOrig bool) string { if !p.IsKnown() { return "" } @@ -105,8 +105,11 @@ func (p Pos) Format(showCol bool) string { // that's provided via a line directive). // TODO(gri) This may not be true if we have an inlining base. // We may want to differentiate at some point. - return format(p.RelFilename(), p.RelLine(), 0, false) + - "[" + format(p.Filename(), p.Line(), p.Col(), showCol) + "]" + s := format(p.RelFilename(), p.RelLine(), 0, false) + if showOrig { + s += "[" + format(p.Filename(), p.Line(), p.Col(), showCol) + "]" + } + return s } // format formats a (filename, line, col) tuple as "filename:line" (showCol @@ -155,8 +158,8 @@ func NewFileBase(filename, absFilename string) *PosBase { // NewLinePragmaBase returns a new *PosBase for a line pragma of the form // //line filename:line // at position pos. -func NewLinePragmaBase(pos Pos, filename string, line uint) *PosBase { - return &PosBase{pos, filename, filename, FileSymPrefix + filename, line - 1, -1} +func NewLinePragmaBase(pos Pos, filename, absFilename string, line uint) *PosBase { + return &PosBase{pos, filename, absFilename, FileSymPrefix + absFilename, line - 1, -1} } // NewInliningBase returns a copy of the old PosBase with the given inlining diff --git a/src/cmd/internal/src/pos_test.go b/src/cmd/internal/src/pos_test.go index a101bc10b12..b06d3825361 100644 --- a/src/cmd/internal/src/pos_test.go +++ b/src/cmd/internal/src/pos_test.go @@ -12,16 +12,16 @@ import ( func TestPos(t *testing.T) { f0 := NewFileBase("", "") f1 := NewFileBase("f1", "f1") - f2 := NewLinePragmaBase(Pos{}, "f2", 10) - f3 := NewLinePragmaBase(MakePos(f1, 10, 1), "f3", 100) - f4 := NewLinePragmaBase(MakePos(f3, 10, 1), "f4", 100) + f2 := NewLinePragmaBase(Pos{}, "f2", "f2", 10) + f3 := NewLinePragmaBase(MakePos(f1, 10, 1), "f3", "f3", 100) + f4 := NewLinePragmaBase(MakePos(f3, 10, 1), "f4", "f4", 100) // line directives from issue #19392 fp := NewFileBase("p.go", "p.go") - fc := NewLinePragmaBase(MakePos(fp, 3, 0), "c.go", 10) - ft := NewLinePragmaBase(MakePos(fp, 6, 0), "t.go", 20) - fv := NewLinePragmaBase(MakePos(fp, 9, 0), "v.go", 30) - ff := NewLinePragmaBase(MakePos(fp, 12, 0), "f.go", 40) + fc := NewLinePragmaBase(MakePos(fp, 3, 0), "c.go", "c.go", 10) + ft := NewLinePragmaBase(MakePos(fp, 6, 0), "t.go", "t.go", 20) + fv := NewLinePragmaBase(MakePos(fp, 9, 0), "v.go", "v.go", 30) + ff := NewLinePragmaBase(MakePos(fp, 12, 0), "f.go", "f.go", 40) for _, test := range []struct { pos Pos diff --git a/src/cmd/internal/src/xpos_test.go b/src/cmd/internal/src/xpos_test.go index 4cfeedcd056..8ac9c9dc4ec 100644 --- a/src/cmd/internal/src/xpos_test.go +++ b/src/cmd/internal/src/xpos_test.go @@ -19,7 +19,7 @@ func TestNoXPos(t *testing.T) { func TestConversion(t *testing.T) { b1 := NewFileBase("b1", "b1") b2 := NewFileBase("b2", "b2") - b3 := NewLinePragmaBase(MakePos(b1, 10, 0), "b3", 123) + b3 := NewLinePragmaBase(MakePos(b1, 10, 0), "b3", "b3", 123) var tab PosTable for _, want := range []Pos{ diff --git a/src/cmd/internal/test2json/test2json.go b/src/cmd/internal/test2json/test2json.go new file mode 100644 index 00000000000..3e09c8d9151 --- /dev/null +++ b/src/cmd/internal/test2json/test2json.go @@ -0,0 +1,413 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package test2json implements conversion of test binary output to JSON. +// It is used by cmd/test2json and cmd/go. +// +// See the cmd/test2json documentation for details of the JSON encoding. +package test2json + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Mode controls details of the conversion. +type Mode int + +const ( + Timestamp Mode = 1 << iota // include Time in events +) + +// event is the JSON struct we emit. +type event struct { + Time *time.Time `json:",omitempty"` + Action string + Package string `json:",omitempty"` + Test string `json:",omitempty"` + Elapsed *float64 `json:",omitempty"` + Output *textBytes `json:",omitempty"` +} + +// textBytes is a hack to get JSON to emit a []byte as a string +// without actually copying it to a string. +// It implements encoding.TextMarshaler, which returns its text form as a []byte, +// and then json encodes that text form as a string (which was our goal). +type textBytes []byte + +func (b textBytes) MarshalText() ([]byte, error) { return b, nil } + +// A converter holds the state of a test-to-JSON conversion. +// It implements io.WriteCloser; the caller writes test output in, +// and the converter writes JSON output to w. +type converter struct { + w io.Writer // JSON output stream + pkg string // package to name in events + mode Mode // mode bits + start time.Time // time converter started + testName string // name of current test, for output attribution + report []*event // pending test result reports (nested for subtests) + result string // overall test result if seen + input lineBuffer // input buffer + output lineBuffer // output buffer +} + +// inBuffer and outBuffer are the input and output buffer sizes. +// They're variables so that they can be reduced during testing. +// +// The input buffer needs to be able to hold any single test +// directive line we want to recognize, like: +// +// --- PASS: very/nested/s/u/b/t/e/s/t +// +// If anyone reports a test directive line > 4k not working, it will +// be defensible to suggest they restructure their test or test names. +// +// The output buffer must be >= utf8.UTFMax, so that it can +// accumulate any single UTF8 sequence. Lines that fit entirely +// within the output buffer are emitted in single output events. +// Otherwise they are split into multiple events. +// The output buffer size therefore limits the size of the encoding +// of a single JSON output event. 1k seems like a reasonable balance +// between wanting to avoid splitting an output line and not wanting to +// generate enormous output events. +var ( + inBuffer = 4096 + outBuffer = 1024 +) + +// NewConverter returns a "test to json" converter. +// Writes on the returned writer are written as JSON to w, +// with minimal delay. +// +// The writes to w are whole JSON events ending in \n, +// so that it is safe to run multiple tests writing to multiple converters +// writing to a single underlying output stream w. +// As long as the underlying output w can handle concurrent writes +// from multiple goroutines, the result will be a JSON stream +// describing the relative ordering of execution in all the concurrent tests. +// +// The mode flag adjusts the behavior of the converter. +// Passing ModeTime includes event timestamps and elapsed times. +// +// The pkg string, if present, specifies the import path to +// report in the JSON stream. +func NewConverter(w io.Writer, pkg string, mode Mode) io.WriteCloser { + c := new(converter) + *c = converter{ + w: w, + pkg: pkg, + mode: mode, + start: time.Now(), + input: lineBuffer{ + b: make([]byte, 0, inBuffer), + line: c.handleInputLine, + part: c.output.write, + }, + output: lineBuffer{ + b: make([]byte, 0, outBuffer), + line: c.writeOutputEvent, + part: c.writeOutputEvent, + }, + } + return c +} + +// Write writes the test input to the converter. +func (c *converter) Write(b []byte) (int, error) { + c.input.write(b) + return len(b), nil +} + +var ( + bigPass = []byte("PASS\n") + bigFail = []byte("FAIL\n") + + updates = [][]byte{ + []byte("=== RUN "), + []byte("=== PAUSE "), + []byte("=== CONT "), + } + + reports = [][]byte{ + []byte("--- PASS: "), + []byte("--- FAIL: "), + []byte("--- SKIP: "), + } + + fourSpace = []byte(" ") + + skipLinePrefix = []byte("? \t") + skipLineSuffix = []byte("\t[no test files]\n") +) + +// handleInputLine handles a single whole test output line. +// It must write the line to c.output but may choose to do so +// before or after emitting other events. +func (c *converter) handleInputLine(line []byte) { + // Final PASS or FAIL. + if bytes.Equal(line, bigPass) || bytes.Equal(line, bigFail) { + c.flushReport(0) + c.output.write(line) + if bytes.Equal(line, bigPass) { + c.result = "pass" + } else { + c.result = "fail" + } + return + } + + // Special case for entirely skipped test binary: "? \tpkgname\t[no test files]\n" is only line. + // Report it as plain output but remember to say skip in the final summary. + if bytes.HasPrefix(line, skipLinePrefix) && bytes.HasSuffix(line, skipLineSuffix) && len(c.report) == 0 { + c.result = "skip" + } + + // "=== RUN " + // "=== PAUSE " + // "=== CONT " + origLine := line + ok := false + indent := 0 + for _, magic := range updates { + if bytes.HasPrefix(line, magic) { + ok = true + break + } + } + if !ok { + // "--- PASS: " + // "--- FAIL: " + // "--- SKIP: " + // but possibly indented. + for bytes.HasPrefix(line, fourSpace) { + line = line[4:] + indent++ + } + for _, magic := range reports { + if bytes.HasPrefix(line, magic) { + ok = true + break + } + } + } + + if !ok { + // Not a special test output line. + c.output.write(origLine) + return + } + + // Parse out action and test name. + action := strings.ToLower(strings.TrimSuffix(strings.TrimSpace(string(line[4:4+6])), ":")) + name := strings.TrimSpace(string(line[4+6:])) + + e := &event{Action: action} + if line[0] == '-' { // PASS or FAIL report + // Parse out elapsed time. + if i := strings.Index(name, " ("); i >= 0 { + if strings.HasSuffix(name, "s)") { + t, err := strconv.ParseFloat(name[i+2:len(name)-2], 64) + if err == nil { + if c.mode&Timestamp != 0 { + e.Elapsed = &t + } + } + } + name = name[:i] + } + if len(c.report) < indent { + // Nested deeper than expected. + // Treat this line as plain output. + return + } + // Flush reports at this indentation level or deeper. + c.flushReport(indent) + e.Test = name + c.testName = name + c.report = append(c.report, e) + c.output.write(origLine) + return + } + // === update. + // Finish any pending PASS/FAIL reports. + c.flushReport(0) + c.testName = name + + if action == "pause" { + // For a pause, we want to write the pause notification before + // delivering the pause event, just so it doesn't look like the test + // is generating output immediately after being paused. + c.output.write(origLine) + } + c.writeEvent(e) + if action != "pause" { + c.output.write(origLine) + } + + return +} + +// flushReport flushes all pending PASS/FAIL reports at levels >= depth. +func (c *converter) flushReport(depth int) { + c.testName = "" + for len(c.report) > depth { + e := c.report[len(c.report)-1] + c.report = c.report[:len(c.report)-1] + c.writeEvent(e) + } +} + +// Close marks the end of the go test output. +// It flushes any pending input and then output (only partial lines at this point) +// and then emits the final overall package-level pass/fail event. +func (c *converter) Close() error { + c.input.flush() + c.output.flush() + e := &event{Action: "fail"} + if c.result != "" { + e.Action = c.result + } + if c.mode&Timestamp != 0 { + dt := time.Since(c.start).Round(1 * time.Millisecond).Seconds() + e.Elapsed = &dt + } + c.writeEvent(e) + return nil +} + +// writeOutputEvent writes a single output event with the given bytes. +func (c *converter) writeOutputEvent(out []byte) { + c.writeEvent(&event{ + Action: "output", + Output: (*textBytes)(&out), + }) +} + +// writeEvent writes a single event. +// It adds the package, time (if requested), and test name (if needed). +func (c *converter) writeEvent(e *event) { + e.Package = c.pkg + if c.mode&Timestamp != 0 { + t := time.Now() + e.Time = &t + } + if e.Test == "" { + e.Test = c.testName + } + js, err := json.Marshal(e) + if err != nil { + // Should not happen - event is valid for json.Marshal. + c.w.Write([]byte(fmt.Sprintf("testjson internal error: %v\n", err))) + return + } + js = append(js, '\n') + c.w.Write(js) +} + +// A lineBuffer is an I/O buffer that reacts to writes by invoking +// input-processing callbacks on whole lines or (for long lines that +// have been split) line fragments. +// +// It should be initialized with b set to a buffer of length 0 but non-zero capacity, +// and line and part set to the desired input processors. +// The lineBuffer will call line(x) for any whole line x (including the final newline) +// that fits entirely in cap(b). It will handle input lines longer than cap(b) by +// calling part(x) for sections of the line. The line will be split at UTF8 boundaries, +// and the final call to part for a long line includes the final newline. +type lineBuffer struct { + b []byte // buffer + mid bool // whether we're in the middle of a long line + line func([]byte) // line callback + part func([]byte) // partial line callback +} + +// write writes b to the buffer. +func (l *lineBuffer) write(b []byte) { + for len(b) > 0 { + // Copy what we can into b. + m := copy(l.b[len(l.b):cap(l.b)], b) + l.b = l.b[:len(l.b)+m] + b = b[m:] + + // Process lines in b. + i := 0 + for i < len(l.b) { + j := bytes.IndexByte(l.b[i:], '\n') + if j < 0 { + break + } + e := i + j + 1 + if l.mid { + // Found the end of a partial line. + l.part(l.b[i:e]) + l.mid = false + } else { + // Found a whole line. + l.line(l.b[i:e]) + } + i = e + } + + // Whatever's left in l.b is a line fragment. + if i == 0 && len(l.b) == cap(l.b) { + // The whole buffer is a fragment. + // Emit it as the beginning (or continuation) of a partial line. + t := trimUTF8(l.b) + l.part(l.b[:t]) + l.b = l.b[:copy(l.b, l.b[t:])] + l.mid = true + } + + // There's room for more input. + // Slide it down in hope of completing the line. + if i > 0 { + l.b = l.b[:copy(l.b, l.b[i:])] + } + } +} + +// flush flushes the line buffer. +func (l *lineBuffer) flush() { + if len(l.b) > 0 { + // Must be a line without a \n, so a partial line. + l.part(l.b) + l.b = l.b[:0] + } +} + +// trimUTF8 returns a length t as close to len(b) as possible such that b[:t] +// does not end in the middle of a possibly-valid UTF-8 sequence. +// +// If a large text buffer must be split before position i at the latest, +// splitting at position trimUTF(b[:i]) avoids splitting a UTF-8 sequence. +func trimUTF8(b []byte) int { + // Scan backward to find non-continuation byte. + for i := 1; i < utf8.UTFMax && i <= len(b); i++ { + if c := b[len(b)-i]; c&0xc0 != 0x80 { + switch { + case c&0xe0 == 0xc0: + if i < 2 { + return len(b) - i + } + case c&0xf0 == 0xe0: + if i < 3 { + return len(b) - i + } + case c&0xf8 == 0xf0: + if i < 4 { + return len(b) - i + } + } + break + } + } + return len(b) +} diff --git a/src/cmd/internal/test2json/test2json_test.go b/src/cmd/internal/test2json/test2json_test.go new file mode 100644 index 00000000000..4683907888c --- /dev/null +++ b/src/cmd/internal/test2json/test2json_test.go @@ -0,0 +1,277 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test2json + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "testing" + "unicode/utf8" +) + +var update = flag.Bool("update", false, "rewrite testdata/*.json files") + +func TestGolden(t *testing.T) { + files, err := filepath.Glob("testdata/*.test") + if err != nil { + t.Fatal(err) + } + for _, file := range files { + name := strings.TrimSuffix(filepath.Base(file), ".test") + t.Run(name, func(t *testing.T) { + orig, err := ioutil.ReadFile(file) + if err != nil { + t.Fatal(err) + } + + // Test one line written to c at a time. + // Assume that's the most likely to be handled correctly. + var buf bytes.Buffer + c := NewConverter(&buf, "", 0) + in := append([]byte{}, orig...) + for _, line := range bytes.SplitAfter(in, []byte("\n")) { + writeAndKill(c, line) + } + c.Close() + + if *update { + js := strings.TrimSuffix(file, ".test") + ".json" + t.Logf("rewriting %s", js) + if err := ioutil.WriteFile(js, buf.Bytes(), 0666); err != nil { + t.Fatal(err) + } + return + } + + want, err := ioutil.ReadFile(strings.TrimSuffix(file, ".test") + ".json") + if err != nil { + t.Fatal(err) + } + diffJSON(t, buf.Bytes(), want) + if t.Failed() { + // If the line-at-a-time conversion fails, no point testing boundary conditions. + return + } + + // Write entire input in bulk. + t.Run("bulk", func(t *testing.T) { + buf.Reset() + c = NewConverter(&buf, "", 0) + in = append([]byte{}, orig...) + writeAndKill(c, in) + c.Close() + diffJSON(t, buf.Bytes(), want) + }) + + // Write 2 bytes at a time on even boundaries. + t.Run("even2", func(t *testing.T) { + buf.Reset() + c = NewConverter(&buf, "", 0) + in = append([]byte{}, orig...) + for i := 0; i < len(in); i += 2 { + if i+2 <= len(in) { + writeAndKill(c, in[i:i+2]) + } else { + writeAndKill(c, in[i:]) + } + } + c.Close() + diffJSON(t, buf.Bytes(), want) + }) + + // Write 2 bytes at a time on odd boundaries. + t.Run("odd2", func(t *testing.T) { + buf.Reset() + c = NewConverter(&buf, "", 0) + in = append([]byte{}, orig...) + if len(in) > 0 { + writeAndKill(c, in[:1]) + } + for i := 1; i < len(in); i += 2 { + if i+2 <= len(in) { + writeAndKill(c, in[i:i+2]) + } else { + writeAndKill(c, in[i:]) + } + } + c.Close() + diffJSON(t, buf.Bytes(), want) + }) + + // Test with very small output buffers, to check that + // UTF8 sequences are not broken up. + for b := 5; b <= 8; b++ { + t.Run(fmt.Sprintf("tiny%d", b), func(t *testing.T) { + oldIn := inBuffer + oldOut := outBuffer + defer func() { + inBuffer = oldIn + outBuffer = oldOut + }() + inBuffer = 64 + outBuffer = b + buf.Reset() + c = NewConverter(&buf, "", 0) + in = append([]byte{}, orig...) + writeAndKill(c, in) + c.Close() + diffJSON(t, buf.Bytes(), want) + }) + } + }) + } +} + +// writeAndKill writes b to w and then fills b with Zs. +// The filling makes sure that if w is holding onto b for +// future use, that future use will have obviously wrong data. +func writeAndKill(w io.Writer, b []byte) { + w.Write(b) + for i := range b { + b[i] = 'Z' + } +} + +// diffJSON diffs the stream we have against the stream we want +// and fails the test with a useful message if they don't match. +func diffJSON(t *testing.T, have, want []byte) { + t.Helper() + type event map[string]interface{} + + // Parse into events, one per line. + parseEvents := func(b []byte) ([]event, []string) { + t.Helper() + var events []event + var lines []string + for _, line := range bytes.SplitAfter(b, []byte("\n")) { + if len(line) > 0 { + line = bytes.TrimSpace(line) + var e event + err := json.Unmarshal(line, &e) + if err != nil { + t.Errorf("unmarshal %s: %v", b, err) + continue + } + events = append(events, e) + lines = append(lines, string(line)) + } + } + return events, lines + } + haveEvents, haveLines := parseEvents(have) + wantEvents, wantLines := parseEvents(want) + if t.Failed() { + return + } + + // Make sure the events we have match the events we want. + // At each step we're matching haveEvents[i] against wantEvents[j]. + // i and j can move independently due to choices about exactly + // how to break up text in "output" events. + i := 0 + j := 0 + + // Fail reports a failure at the current i,j and stops the test. + // It shows the events around the current positions, + // with the current positions marked. + fail := func() { + var buf bytes.Buffer + show := func(i int, lines []string) { + for k := -2; k < 5; k++ { + marker := "" + if k == 0 { + marker = "» " + } + if 0 <= i+k && i+k < len(lines) { + fmt.Fprintf(&buf, "\t%s%s\n", marker, lines[i+k]) + } + } + if i >= len(lines) { + // show marker after end of input + fmt.Fprintf(&buf, "\t» \n") + } + } + fmt.Fprintf(&buf, "have:\n") + show(i, haveLines) + fmt.Fprintf(&buf, "want:\n") + show(j, wantLines) + t.Fatal(buf.String()) + } + + var outputTest string // current "Test" key in "output" events + var wantOutput, haveOutput string // collected "Output" of those events + + // getTest returns the "Test" setting, or "" if it is missing. + getTest := func(e event) string { + s, _ := e["Test"].(string) + return s + } + + // checkOutput collects output from the haveEvents for the current outputTest + // and then checks that the collected output matches the wanted output. + checkOutput := func() { + for i < len(haveEvents) && haveEvents[i]["Action"] == "output" && getTest(haveEvents[i]) == outputTest { + haveOutput += haveEvents[i]["Output"].(string) + i++ + } + if haveOutput != wantOutput { + t.Errorf("output mismatch for Test=%q:\nhave %q\nwant %q", outputTest, haveOutput, wantOutput) + fail() + } + haveOutput = "" + wantOutput = "" + } + + // Walk through wantEvents matching against haveEvents. + for j = range wantEvents { + e := wantEvents[j] + if e["Action"] == "output" && getTest(e) == outputTest { + wantOutput += e["Output"].(string) + continue + } + checkOutput() + if e["Action"] == "output" { + outputTest = getTest(e) + wantOutput += e["Output"].(string) + continue + } + if i >= len(haveEvents) { + t.Errorf("early end of event stream: missing event") + fail() + } + if !reflect.DeepEqual(haveEvents[i], e) { + t.Errorf("events out of sync") + fail() + } + i++ + } + checkOutput() + if i < len(haveEvents) { + t.Errorf("extra events in stream") + fail() + } +} + +func TestTrimUTF8(t *testing.T) { + s := "hello α ☺ 😂 world" // α is 2-byte, ☺ is 3-byte, 😂 is 4-byte + b := []byte(s) + for i := 0; i < len(s); i++ { + j := trimUTF8(b[:i]) + u := string([]rune(s[:j])) + string([]rune(s[j:])) + if u != s { + t.Errorf("trimUTF8(%q) = %d (-%d), not at boundary (split: %q %q)", s[:i], j, i-j, s[:j], s[j:]) + } + if utf8.FullRune(b[j:i]) { + t.Errorf("trimUTF8(%q) = %d (-%d), too early (missed: %q)", s[:j], j, i-j, s[j:i]) + } + } +} diff --git a/src/cmd/internal/test2json/testdata/ascii.json b/src/cmd/internal/test2json/testdata/ascii.json new file mode 100644 index 00000000000..67fccfc1121 --- /dev/null +++ b/src/cmd/internal/test2json/testdata/ascii.json @@ -0,0 +1,10 @@ +{"Action":"run","Test":"TestAscii"} +{"Action":"output","Test":"TestAscii","Output":"=== RUN TestAscii\n"} +{"Action":"output","Test":"TestAscii","Output":"I can eat glass, and it doesn't hurt me. I can eat glass, and it doesn't hurt me.\n"} +{"Action":"output","Test":"TestAscii","Output":"I CAN EAT GLASS, AND IT DOESN'T HURT ME. I CAN EAT GLASS, AND IT DOESN'T HURT ME.\n"} +{"Action":"output","Test":"TestAscii","Output":"--- PASS: TestAscii\n"} +{"Action":"output","Test":"TestAscii","Output":" i can eat glass, and it doesn't hurt me. i can eat glass, and it doesn't hurt me.\n"} +{"Action":"output","Test":"TestAscii","Output":" V PNA RNG TYNFF, NAQ VG QBRFA'G UHEG ZR. V PNA RNG TYNFF, NAQ VG QBRFA'G UHEG ZR.\n"} +{"Action":"pass","Test":"TestAscii"} +{"Action":"output","Output":"PASS\n"} +{"Action":"pass"} diff --git a/src/cmd/internal/test2json/testdata/ascii.test b/src/cmd/internal/test2json/testdata/ascii.test new file mode 100644 index 00000000000..4ff7453430c --- /dev/null +++ b/src/cmd/internal/test2json/testdata/ascii.test @@ -0,0 +1,7 @@ +=== RUN TestAscii +I can eat glass, and it doesn't hurt me. I can eat glass, and it doesn't hurt me. +I CAN EAT GLASS, AND IT DOESN'T HURT ME. I CAN EAT GLASS, AND IT DOESN'T HURT ME. +--- PASS: TestAscii + i can eat glass, and it doesn't hurt me. i can eat glass, and it doesn't hurt me. + V PNA RNG TYNFF, NAQ VG QBRFA'G UHEG ZR. V PNA RNG TYNFF, NAQ VG QBRFA'G UHEG ZR. +PASS diff --git a/src/cmd/internal/test2json/testdata/smiley.json b/src/cmd/internal/test2json/testdata/smiley.json new file mode 100644 index 00000000000..afa990d7c03 --- /dev/null +++ b/src/cmd/internal/test2json/testdata/smiley.json @@ -0,0 +1,182 @@ +{"Action":"run","Test":"Test☺☹"} +{"Action":"output","Test":"Test☺☹","Output":"=== RUN Test☺☹\n"} +{"Action":"output","Test":"Test☺☹","Output":"=== PAUSE Test☺☹\n"} +{"Action":"pause","Test":"Test☺☹"} +{"Action":"run","Test":"Test☺☹Asm"} +{"Action":"output","Test":"Test☺☹Asm","Output":"=== RUN Test☺☹Asm\n"} +{"Action":"output","Test":"Test☺☹Asm","Output":"=== PAUSE Test☺☹Asm\n"} +{"Action":"pause","Test":"Test☺☹Asm"} +{"Action":"run","Test":"Test☺☹Dirs"} +{"Action":"output","Test":"Test☺☹Dirs","Output":"=== RUN Test☺☹Dirs\n"} +{"Action":"output","Test":"Test☺☹Dirs","Output":"=== PAUSE Test☺☹Dirs\n"} +{"Action":"pause","Test":"Test☺☹Dirs"} +{"Action":"run","Test":"TestTags"} +{"Action":"output","Test":"TestTags","Output":"=== RUN TestTags\n"} +{"Action":"output","Test":"TestTags","Output":"=== PAUSE TestTags\n"} +{"Action":"pause","Test":"TestTags"} +{"Action":"run","Test":"Test☺☹Verbose"} +{"Action":"output","Test":"Test☺☹Verbose","Output":"=== RUN Test☺☹Verbose\n"} +{"Action":"output","Test":"Test☺☹Verbose","Output":"=== PAUSE Test☺☹Verbose\n"} +{"Action":"pause","Test":"Test☺☹Verbose"} +{"Action":"cont","Test":"Test☺☹"} +{"Action":"output","Test":"Test☺☹","Output":"=== CONT Test☺☹\n"} +{"Action":"cont","Test":"TestTags"} +{"Action":"output","Test":"TestTags","Output":"=== CONT TestTags\n"} +{"Action":"cont","Test":"Test☺☹Verbose"} +{"Action":"output","Test":"Test☺☹Verbose","Output":"=== CONT Test☺☹Verbose\n"} +{"Action":"run","Test":"TestTags/testtag"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== RUN TestTags/testtag\n"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== PAUSE TestTags/testtag\n"} +{"Action":"pause","Test":"TestTags/testtag"} +{"Action":"cont","Test":"Test☺☹Dirs"} +{"Action":"output","Test":"Test☺☹Dirs","Output":"=== CONT Test☺☹Dirs\n"} +{"Action":"cont","Test":"Test☺☹Asm"} +{"Action":"output","Test":"Test☺☹Asm","Output":"=== CONT Test☺☹Asm\n"} +{"Action":"run","Test":"Test☺☹/0"} +{"Action":"output","Test":"Test☺☹/0","Output":"=== RUN Test☺☹/0\n"} +{"Action":"output","Test":"Test☺☹/0","Output":"=== PAUSE Test☺☹/0\n"} +{"Action":"pause","Test":"Test☺☹/0"} +{"Action":"run","Test":"Test☺☹/1"} +{"Action":"output","Test":"Test☺☹/1","Output":"=== RUN Test☺☹/1\n"} +{"Action":"output","Test":"Test☺☹/1","Output":"=== PAUSE Test☺☹/1\n"} +{"Action":"pause","Test":"Test☺☹/1"} +{"Action":"run","Test":"Test☺☹/2"} +{"Action":"output","Test":"Test☺☹/2","Output":"=== RUN Test☺☹/2\n"} +{"Action":"output","Test":"Test☺☹/2","Output":"=== PAUSE Test☺☹/2\n"} +{"Action":"pause","Test":"Test☺☹/2"} +{"Action":"run","Test":"Test☺☹/3"} +{"Action":"output","Test":"Test☺☹/3","Output":"=== RUN Test☺☹/3\n"} +{"Action":"output","Test":"Test☺☹/3","Output":"=== PAUSE Test☺☹/3\n"} +{"Action":"pause","Test":"Test☺☹/3"} +{"Action":"run","Test":"Test☺☹/4"} +{"Action":"output","Test":"Test☺☹/4","Output":"=== RUN Test☺☹/4\n"} +{"Action":"run","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== RUN TestTags/x_testtag_y\n"} +{"Action":"output","Test":"Test☺☹/4","Output":"=== PAUSE Test☺☹/4\n"} +{"Action":"pause","Test":"Test☺☹/4"} +{"Action":"run","Test":"Test☺☹/5"} +{"Action":"output","Test":"Test☺☹/5","Output":"=== RUN Test☺☹/5\n"} +{"Action":"output","Test":"Test☺☹/5","Output":"=== PAUSE Test☺☹/5\n"} +{"Action":"pause","Test":"Test☺☹/5"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== PAUSE TestTags/x_testtag_y\n"} +{"Action":"pause","Test":"TestTags/x_testtag_y"} +{"Action":"run","Test":"Test☺☹/6"} +{"Action":"output","Test":"Test☺☹/6","Output":"=== RUN Test☺☹/6\n"} +{"Action":"run","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== RUN TestTags/x,testtag,y\n"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== PAUSE TestTags/x,testtag,y\n"} +{"Action":"pause","Test":"TestTags/x,testtag,y"} +{"Action":"run","Test":"Test☺☹Dirs/testingpkg"} +{"Action":"output","Test":"Test☺☹Dirs/testingpkg","Output":"=== RUN Test☺☹Dirs/testingpkg\n"} +{"Action":"output","Test":"Test☺☹/6","Output":"=== PAUSE Test☺☹/6\n"} +{"Action":"pause","Test":"Test☺☹/6"} +{"Action":"cont","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== CONT TestTags/x,testtag,y\n"} +{"Action":"output","Test":"Test☺☹Dirs/testingpkg","Output":"=== PAUSE Test☺☹Dirs/testingpkg\n"} +{"Action":"pause","Test":"Test☺☹Dirs/testingpkg"} +{"Action":"run","Test":"Test☺☹Dirs/divergent"} +{"Action":"output","Test":"Test☺☹Dirs/divergent","Output":"=== RUN Test☺☹Dirs/divergent\n"} +{"Action":"run","Test":"Test☺☹/7"} +{"Action":"output","Test":"Test☺☹/7","Output":"=== RUN Test☺☹/7\n"} +{"Action":"output","Test":"Test☺☹/7","Output":"=== PAUSE Test☺☹/7\n"} +{"Action":"pause","Test":"Test☺☹/7"} +{"Action":"output","Test":"Test☺☹Dirs/divergent","Output":"=== PAUSE Test☺☹Dirs/divergent\n"} +{"Action":"pause","Test":"Test☺☹Dirs/divergent"} +{"Action":"cont","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== CONT TestTags/x_testtag_y\n"} +{"Action":"cont","Test":"TestTags/testtag"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== CONT TestTags/testtag\n"} +{"Action":"run","Test":"Test☺☹Dirs/buildtag"} +{"Action":"output","Test":"Test☺☹Dirs/buildtag","Output":"=== RUN Test☺☹Dirs/buildtag\n"} +{"Action":"output","Test":"Test☺☹Dirs/buildtag","Output":"=== PAUSE Test☺☹Dirs/buildtag\n"} +{"Action":"pause","Test":"Test☺☹Dirs/buildtag"} +{"Action":"cont","Test":"Test☺☹/0"} +{"Action":"output","Test":"Test☺☹/0","Output":"=== CONT Test☺☹/0\n"} +{"Action":"cont","Test":"Test☺☹/4"} +{"Action":"output","Test":"Test☺☹/4","Output":"=== CONT Test☺☹/4\n"} +{"Action":"run","Test":"Test☺☹Dirs/incomplete"} +{"Action":"output","Test":"Test☺☹Dirs/incomplete","Output":"=== RUN Test☺☹Dirs/incomplete\n"} +{"Action":"output","Test":"Test☺☹Dirs/incomplete","Output":"=== PAUSE Test☺☹Dirs/incomplete\n"} +{"Action":"pause","Test":"Test☺☹Dirs/incomplete"} +{"Action":"run","Test":"Test☺☹Dirs/cgo"} +{"Action":"output","Test":"Test☺☹Dirs/cgo","Output":"=== RUN Test☺☹Dirs/cgo\n"} +{"Action":"output","Test":"Test☺☹Dirs/cgo","Output":"=== PAUSE Test☺☹Dirs/cgo\n"} +{"Action":"pause","Test":"Test☺☹Dirs/cgo"} +{"Action":"cont","Test":"Test☺☹/7"} +{"Action":"output","Test":"Test☺☹/7","Output":"=== CONT Test☺☹/7\n"} +{"Action":"cont","Test":"Test☺☹/6"} +{"Action":"output","Test":"Test☺☹/6","Output":"=== CONT Test☺☹/6\n"} +{"Action":"output","Test":"Test☺☹Verbose","Output":"--- PASS: Test☺☹Verbose (0.04s)\n"} +{"Action":"pass","Test":"Test☺☹Verbose"} +{"Action":"cont","Test":"Test☺☹/5"} +{"Action":"output","Test":"Test☺☹/5","Output":"=== CONT Test☺☹/5\n"} +{"Action":"cont","Test":"Test☺☹/3"} +{"Action":"output","Test":"Test☺☹/3","Output":"=== CONT Test☺☹/3\n"} +{"Action":"cont","Test":"Test☺☹/2"} +{"Action":"output","Test":"Test☺☹/2","Output":"=== CONT Test☺☹/2\n"} +{"Action":"output","Test":"TestTags","Output":"--- PASS: TestTags (0.00s)\n"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":" --- PASS: TestTags/x_testtag_y (0.04s)\n"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":" \tvet_test.go:187: -tags=x testtag y\n"} +{"Action":"pass","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":" --- PASS: TestTags/x,testtag,y (0.04s)\n"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":" \tvet_test.go:187: -tags=x,testtag,y\n"} +{"Action":"pass","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/testtag","Output":" --- PASS: TestTags/testtag (0.04s)\n"} +{"Action":"output","Test":"TestTags/testtag","Output":" \tvet_test.go:187: -tags=testtag\n"} +{"Action":"pass","Test":"TestTags/testtag"} +{"Action":"pass","Test":"TestTags"} +{"Action":"cont","Test":"Test☺☹/1"} +{"Action":"output","Test":"Test☺☹/1","Output":"=== CONT Test☺☹/1\n"} +{"Action":"cont","Test":"Test☺☹Dirs/testingpkg"} +{"Action":"output","Test":"Test☺☹Dirs/testingpkg","Output":"=== CONT Test☺☹Dirs/testingpkg\n"} +{"Action":"cont","Test":"Test☺☹Dirs/buildtag"} +{"Action":"output","Test":"Test☺☹Dirs/buildtag","Output":"=== CONT Test☺☹Dirs/buildtag\n"} +{"Action":"cont","Test":"Test☺☹Dirs/divergent"} +{"Action":"output","Test":"Test☺☹Dirs/divergent","Output":"=== CONT Test☺☹Dirs/divergent\n"} +{"Action":"cont","Test":"Test☺☹Dirs/incomplete"} +{"Action":"output","Test":"Test☺☹Dirs/incomplete","Output":"=== CONT Test☺☹Dirs/incomplete\n"} +{"Action":"cont","Test":"Test☺☹Dirs/cgo"} +{"Action":"output","Test":"Test☺☹Dirs/cgo","Output":"=== CONT Test☺☹Dirs/cgo\n"} +{"Action":"output","Test":"Test☺☹","Output":"--- PASS: Test☺☹ (0.39s)\n"} +{"Action":"output","Test":"Test☺☹/5","Output":" --- PASS: Test☺☹/5 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/5","Output":" \tvet_test.go:114: φιλεσ: [\"testdata/copylock_func.go\" \"testdata/rangeloop.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/5"} +{"Action":"output","Test":"Test☺☹/3","Output":" --- PASS: Test☺☹/3 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/3","Output":" \tvet_test.go:114: φιλεσ: [\"testdata/composite.go\" \"testdata/nilfunc.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/3"} +{"Action":"output","Test":"Test☺☹/6","Output":" --- PASS: Test☺☹/6 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/6","Output":" \tvet_test.go:114: φιλεσ: [\"testdata/copylock_range.go\" \"testdata/shadow.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/6"} +{"Action":"output","Test":"Test☺☹/2","Output":" --- PASS: Test☺☹/2 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/2","Output":" \tvet_test.go:114: φιλεσ: [\"testdata/bool.go\" \"testdata/method.go\" \"testdata/unused.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/2"} +{"Action":"output","Test":"Test☺☹/0","Output":" --- PASS: Test☺☹/0 (0.13s)\n"} +{"Action":"output","Test":"Test☺☹/0","Output":" \tvet_test.go:114: φιλεσ: [\"testdata/assign.go\" \"testdata/httpresponse.go\" \"testdata/structtag.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/0"} +{"Action":"output","Test":"Test☺☹/4","Output":" --- PASS: Test☺☹/4 (0.16s)\n"} +{"Action":"output","Test":"Test☺☹/4","Output":" \tvet_test.go:114: φιλεσ: [\"testdata/copylock.go\" \"testdata/print.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/4"} +{"Action":"output","Test":"Test☺☹/1","Output":" --- PASS: Test☺☹/1 (0.07s)\n"} +{"Action":"output","Test":"Test☺☹/1","Output":" \tvet_test.go:114: φιλεσ: [\"testdata/atomic.go\" \"testdata/lostcancel.go\" \"testdata/unsafeptr.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/1"} +{"Action":"output","Test":"Test☺☹/7","Output":" --- PASS: Test☺☹/7 (0.19s)\n"} +{"Action":"output","Test":"Test☺☹/7","Output":" \tvet_test.go:114: φιλεσ: [\"testdata/deadcode.go\" \"testdata/shift.go\"]\n"} +{"Action":"pass","Test":"Test☺☹/7"} +{"Action":"pass","Test":"Test☺☹"} +{"Action":"output","Test":"Test☺☹Dirs","Output":"--- PASS: Test☺☹Dirs (0.01s)\n"} +{"Action":"output","Test":"Test☺☹Dirs/testingpkg","Output":" --- PASS: Test☺☹Dirs/testingpkg (0.06s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/testingpkg"} +{"Action":"output","Test":"Test☺☹Dirs/divergent","Output":" --- PASS: Test☺☹Dirs/divergent (0.05s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/divergent"} +{"Action":"output","Test":"Test☺☹Dirs/buildtag","Output":" --- PASS: Test☺☹Dirs/buildtag (0.06s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/buildtag"} +{"Action":"output","Test":"Test☺☹Dirs/incomplete","Output":" --- PASS: Test☺☹Dirs/incomplete (0.05s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/incomplete"} +{"Action":"output","Test":"Test☺☹Dirs/cgo","Output":" --- PASS: Test☺☹Dirs/cgo (0.04s)\n"} +{"Action":"pass","Test":"Test☺☹Dirs/cgo"} +{"Action":"pass","Test":"Test☺☹Dirs"} +{"Action":"output","Test":"Test☺☹Asm","Output":"--- PASS: Test☺☹Asm (0.75s)\n"} +{"Action":"pass","Test":"Test☺☹Asm"} +{"Action":"output","Output":"PASS\n"} +{"Action":"output","Output":"ok \tcmd/vet\t(cached)\n"} +{"Action":"pass"} diff --git a/src/cmd/internal/test2json/testdata/smiley.test b/src/cmd/internal/test2json/testdata/smiley.test new file mode 100644 index 00000000000..05edf5a312f --- /dev/null +++ b/src/cmd/internal/test2json/testdata/smiley.test @@ -0,0 +1,97 @@ +=== RUN Test☺☹ +=== PAUSE Test☺☹ +=== RUN Test☺☹Asm +=== PAUSE Test☺☹Asm +=== RUN Test☺☹Dirs +=== PAUSE Test☺☹Dirs +=== RUN TestTags +=== PAUSE TestTags +=== RUN Test☺☹Verbose +=== PAUSE Test☺☹Verbose +=== CONT Test☺☹ +=== CONT TestTags +=== CONT Test☺☹Verbose +=== RUN TestTags/testtag +=== PAUSE TestTags/testtag +=== CONT Test☺☹Dirs +=== CONT Test☺☹Asm +=== RUN Test☺☹/0 +=== PAUSE Test☺☹/0 +=== RUN Test☺☹/1 +=== PAUSE Test☺☹/1 +=== RUN Test☺☹/2 +=== PAUSE Test☺☹/2 +=== RUN Test☺☹/3 +=== PAUSE Test☺☹/3 +=== RUN Test☺☹/4 +=== RUN TestTags/x_testtag_y +=== PAUSE Test☺☹/4 +=== RUN Test☺☹/5 +=== PAUSE Test☺☹/5 +=== PAUSE TestTags/x_testtag_y +=== RUN Test☺☹/6 +=== RUN TestTags/x,testtag,y +=== PAUSE TestTags/x,testtag,y +=== RUN Test☺☹Dirs/testingpkg +=== PAUSE Test☺☹/6 +=== CONT TestTags/x,testtag,y +=== PAUSE Test☺☹Dirs/testingpkg +=== RUN Test☺☹Dirs/divergent +=== RUN Test☺☹/7 +=== PAUSE Test☺☹/7 +=== PAUSE Test☺☹Dirs/divergent +=== CONT TestTags/x_testtag_y +=== CONT TestTags/testtag +=== RUN Test☺☹Dirs/buildtag +=== PAUSE Test☺☹Dirs/buildtag +=== CONT Test☺☹/0 +=== CONT Test☺☹/4 +=== RUN Test☺☹Dirs/incomplete +=== PAUSE Test☺☹Dirs/incomplete +=== RUN Test☺☹Dirs/cgo +=== PAUSE Test☺☹Dirs/cgo +=== CONT Test☺☹/7 +=== CONT Test☺☹/6 +--- PASS: Test☺☹Verbose (0.04s) +=== CONT Test☺☹/5 +=== CONT Test☺☹/3 +=== CONT Test☺☹/2 +--- PASS: TestTags (0.00s) + --- PASS: TestTags/x_testtag_y (0.04s) + vet_test.go:187: -tags=x testtag y + --- PASS: TestTags/x,testtag,y (0.04s) + vet_test.go:187: -tags=x,testtag,y + --- PASS: TestTags/testtag (0.04s) + vet_test.go:187: -tags=testtag +=== CONT Test☺☹/1 +=== CONT Test☺☹Dirs/testingpkg +=== CONT Test☺☹Dirs/buildtag +=== CONT Test☺☹Dirs/divergent +=== CONT Test☺☹Dirs/incomplete +=== CONT Test☺☹Dirs/cgo +--- PASS: Test☺☹ (0.39s) + --- PASS: Test☺☹/5 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/copylock_func.go" "testdata/rangeloop.go"] + --- PASS: Test☺☹/3 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/composite.go" "testdata/nilfunc.go"] + --- PASS: Test☺☹/6 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/copylock_range.go" "testdata/shadow.go"] + --- PASS: Test☺☹/2 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/bool.go" "testdata/method.go" "testdata/unused.go"] + --- PASS: Test☺☹/0 (0.13s) + vet_test.go:114: φιλεσ: ["testdata/assign.go" "testdata/httpresponse.go" "testdata/structtag.go"] + --- PASS: Test☺☹/4 (0.16s) + vet_test.go:114: φιλεσ: ["testdata/copylock.go" "testdata/print.go"] + --- PASS: Test☺☹/1 (0.07s) + vet_test.go:114: φιλεσ: ["testdata/atomic.go" "testdata/lostcancel.go" "testdata/unsafeptr.go"] + --- PASS: Test☺☹/7 (0.19s) + vet_test.go:114: φιλεσ: ["testdata/deadcode.go" "testdata/shift.go"] +--- PASS: Test☺☹Dirs (0.01s) + --- PASS: Test☺☹Dirs/testingpkg (0.06s) + --- PASS: Test☺☹Dirs/divergent (0.05s) + --- PASS: Test☺☹Dirs/buildtag (0.06s) + --- PASS: Test☺☹Dirs/incomplete (0.05s) + --- PASS: Test☺☹Dirs/cgo (0.04s) +--- PASS: Test☺☹Asm (0.75s) +PASS +ok cmd/vet (cached) diff --git a/src/cmd/internal/test2json/testdata/unicode.json b/src/cmd/internal/test2json/testdata/unicode.json new file mode 100644 index 00000000000..9cfb5f2d498 --- /dev/null +++ b/src/cmd/internal/test2json/testdata/unicode.json @@ -0,0 +1,10 @@ +{"Action":"run","Test":"TestUnicode"} +{"Action":"output","Test":"TestUnicode","Output":"=== RUN TestUnicode\n"} +{"Action":"output","Test":"TestUnicode","Output":"Μπορώ να φάω σπασμένα γυαλιά χωρίς να πάθω τίποτα. Μπορώ να φάω σπασμένα γυαλιά χωρίς να πάθω τίποτα.\n"} +{"Action":"output","Test":"TestUnicode","Output":"私はガラスを食べられます。それは私を傷つけません。私はガラスを食べられます。それは私を傷つけません。\n"} +{"Action":"output","Test":"TestUnicode","Output":"--- PASS: TestUnicode\n"} +{"Action":"output","Test":"TestUnicode","Output":" ฉันกินกระจกได้ แต่มันไม่ทำให้ฉันเจ็บ ฉันกินกระจกได้ แต่มันไม่ทำให้ฉันเจ็บ\n"} +{"Action":"output","Test":"TestUnicode","Output":" אני יכול לאכול זכוכית וזה לא מזיק לי. אני יכול לאכול זכוכית וזה לא מזיק לי.\n"} +{"Action":"pass","Test":"TestUnicode"} +{"Action":"output","Output":"PASS\n"} +{"Action":"pass"} diff --git a/src/cmd/internal/test2json/testdata/unicode.test b/src/cmd/internal/test2json/testdata/unicode.test new file mode 100644 index 00000000000..58c620d5f74 --- /dev/null +++ b/src/cmd/internal/test2json/testdata/unicode.test @@ -0,0 +1,7 @@ +=== RUN TestUnicode +Μπορώ να φάω σπασμένα γυαλιά χωρίς να πάθω τίποτα. Μπορώ να φάω σπασμένα γυαλιά χωρίς να πάθω τίποτα. +私はガラスを食べられます。それは私を傷つけません。私はガラスを食べられます。それは私を傷つけません。 +--- PASS: TestUnicode + ฉันกินกระจกได้ แต่มันไม่ทำให้ฉันเจ็บ ฉันกินกระจกได้ แต่มันไม่ทำให้ฉันเจ็บ + אני יכול לאכול זכוכית וזה לא מזיק לי. אני יכול לאכול זכוכית וזה לא מזיק לי. +PASS diff --git a/src/cmd/internal/test2json/testdata/vet.json b/src/cmd/internal/test2json/testdata/vet.json new file mode 100644 index 00000000000..8c5921d686f --- /dev/null +++ b/src/cmd/internal/test2json/testdata/vet.json @@ -0,0 +1,182 @@ +{"Action":"run","Test":"TestVet"} +{"Action":"output","Test":"TestVet","Output":"=== RUN TestVet\n"} +{"Action":"output","Test":"TestVet","Output":"=== PAUSE TestVet\n"} +{"Action":"pause","Test":"TestVet"} +{"Action":"run","Test":"TestVetAsm"} +{"Action":"output","Test":"TestVetAsm","Output":"=== RUN TestVetAsm\n"} +{"Action":"output","Test":"TestVetAsm","Output":"=== PAUSE TestVetAsm\n"} +{"Action":"pause","Test":"TestVetAsm"} +{"Action":"run","Test":"TestVetDirs"} +{"Action":"output","Test":"TestVetDirs","Output":"=== RUN TestVetDirs\n"} +{"Action":"output","Test":"TestVetDirs","Output":"=== PAUSE TestVetDirs\n"} +{"Action":"pause","Test":"TestVetDirs"} +{"Action":"run","Test":"TestTags"} +{"Action":"output","Test":"TestTags","Output":"=== RUN TestTags\n"} +{"Action":"output","Test":"TestTags","Output":"=== PAUSE TestTags\n"} +{"Action":"pause","Test":"TestTags"} +{"Action":"run","Test":"TestVetVerbose"} +{"Action":"output","Test":"TestVetVerbose","Output":"=== RUN TestVetVerbose\n"} +{"Action":"output","Test":"TestVetVerbose","Output":"=== PAUSE TestVetVerbose\n"} +{"Action":"pause","Test":"TestVetVerbose"} +{"Action":"cont","Test":"TestVet"} +{"Action":"output","Test":"TestVet","Output":"=== CONT TestVet\n"} +{"Action":"cont","Test":"TestTags"} +{"Action":"output","Test":"TestTags","Output":"=== CONT TestTags\n"} +{"Action":"cont","Test":"TestVetVerbose"} +{"Action":"output","Test":"TestVetVerbose","Output":"=== CONT TestVetVerbose\n"} +{"Action":"run","Test":"TestTags/testtag"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== RUN TestTags/testtag\n"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== PAUSE TestTags/testtag\n"} +{"Action":"pause","Test":"TestTags/testtag"} +{"Action":"cont","Test":"TestVetDirs"} +{"Action":"output","Test":"TestVetDirs","Output":"=== CONT TestVetDirs\n"} +{"Action":"cont","Test":"TestVetAsm"} +{"Action":"output","Test":"TestVetAsm","Output":"=== CONT TestVetAsm\n"} +{"Action":"run","Test":"TestVet/0"} +{"Action":"output","Test":"TestVet/0","Output":"=== RUN TestVet/0\n"} +{"Action":"output","Test":"TestVet/0","Output":"=== PAUSE TestVet/0\n"} +{"Action":"pause","Test":"TestVet/0"} +{"Action":"run","Test":"TestVet/1"} +{"Action":"output","Test":"TestVet/1","Output":"=== RUN TestVet/1\n"} +{"Action":"output","Test":"TestVet/1","Output":"=== PAUSE TestVet/1\n"} +{"Action":"pause","Test":"TestVet/1"} +{"Action":"run","Test":"TestVet/2"} +{"Action":"output","Test":"TestVet/2","Output":"=== RUN TestVet/2\n"} +{"Action":"output","Test":"TestVet/2","Output":"=== PAUSE TestVet/2\n"} +{"Action":"pause","Test":"TestVet/2"} +{"Action":"run","Test":"TestVet/3"} +{"Action":"output","Test":"TestVet/3","Output":"=== RUN TestVet/3\n"} +{"Action":"output","Test":"TestVet/3","Output":"=== PAUSE TestVet/3\n"} +{"Action":"pause","Test":"TestVet/3"} +{"Action":"run","Test":"TestVet/4"} +{"Action":"output","Test":"TestVet/4","Output":"=== RUN TestVet/4\n"} +{"Action":"run","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== RUN TestTags/x_testtag_y\n"} +{"Action":"output","Test":"TestVet/4","Output":"=== PAUSE TestVet/4\n"} +{"Action":"pause","Test":"TestVet/4"} +{"Action":"run","Test":"TestVet/5"} +{"Action":"output","Test":"TestVet/5","Output":"=== RUN TestVet/5\n"} +{"Action":"output","Test":"TestVet/5","Output":"=== PAUSE TestVet/5\n"} +{"Action":"pause","Test":"TestVet/5"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== PAUSE TestTags/x_testtag_y\n"} +{"Action":"pause","Test":"TestTags/x_testtag_y"} +{"Action":"run","Test":"TestVet/6"} +{"Action":"output","Test":"TestVet/6","Output":"=== RUN TestVet/6\n"} +{"Action":"run","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== RUN TestTags/x,testtag,y\n"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== PAUSE TestTags/x,testtag,y\n"} +{"Action":"pause","Test":"TestTags/x,testtag,y"} +{"Action":"run","Test":"TestVetDirs/testingpkg"} +{"Action":"output","Test":"TestVetDirs/testingpkg","Output":"=== RUN TestVetDirs/testingpkg\n"} +{"Action":"output","Test":"TestVet/6","Output":"=== PAUSE TestVet/6\n"} +{"Action":"pause","Test":"TestVet/6"} +{"Action":"cont","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":"=== CONT TestTags/x,testtag,y\n"} +{"Action":"output","Test":"TestVetDirs/testingpkg","Output":"=== PAUSE TestVetDirs/testingpkg\n"} +{"Action":"pause","Test":"TestVetDirs/testingpkg"} +{"Action":"run","Test":"TestVetDirs/divergent"} +{"Action":"output","Test":"TestVetDirs/divergent","Output":"=== RUN TestVetDirs/divergent\n"} +{"Action":"run","Test":"TestVet/7"} +{"Action":"output","Test":"TestVet/7","Output":"=== RUN TestVet/7\n"} +{"Action":"output","Test":"TestVet/7","Output":"=== PAUSE TestVet/7\n"} +{"Action":"pause","Test":"TestVet/7"} +{"Action":"output","Test":"TestVetDirs/divergent","Output":"=== PAUSE TestVetDirs/divergent\n"} +{"Action":"pause","Test":"TestVetDirs/divergent"} +{"Action":"cont","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":"=== CONT TestTags/x_testtag_y\n"} +{"Action":"cont","Test":"TestTags/testtag"} +{"Action":"output","Test":"TestTags/testtag","Output":"=== CONT TestTags/testtag\n"} +{"Action":"run","Test":"TestVetDirs/buildtag"} +{"Action":"output","Test":"TestVetDirs/buildtag","Output":"=== RUN TestVetDirs/buildtag\n"} +{"Action":"output","Test":"TestVetDirs/buildtag","Output":"=== PAUSE TestVetDirs/buildtag\n"} +{"Action":"pause","Test":"TestVetDirs/buildtag"} +{"Action":"cont","Test":"TestVet/0"} +{"Action":"output","Test":"TestVet/0","Output":"=== CONT TestVet/0\n"} +{"Action":"cont","Test":"TestVet/4"} +{"Action":"output","Test":"TestVet/4","Output":"=== CONT TestVet/4\n"} +{"Action":"run","Test":"TestVetDirs/incomplete"} +{"Action":"output","Test":"TestVetDirs/incomplete","Output":"=== RUN TestVetDirs/incomplete\n"} +{"Action":"output","Test":"TestVetDirs/incomplete","Output":"=== PAUSE TestVetDirs/incomplete\n"} +{"Action":"pause","Test":"TestVetDirs/incomplete"} +{"Action":"run","Test":"TestVetDirs/cgo"} +{"Action":"output","Test":"TestVetDirs/cgo","Output":"=== RUN TestVetDirs/cgo\n"} +{"Action":"output","Test":"TestVetDirs/cgo","Output":"=== PAUSE TestVetDirs/cgo\n"} +{"Action":"pause","Test":"TestVetDirs/cgo"} +{"Action":"cont","Test":"TestVet/7"} +{"Action":"output","Test":"TestVet/7","Output":"=== CONT TestVet/7\n"} +{"Action":"cont","Test":"TestVet/6"} +{"Action":"output","Test":"TestVet/6","Output":"=== CONT TestVet/6\n"} +{"Action":"output","Test":"TestVetVerbose","Output":"--- PASS: TestVetVerbose (0.04s)\n"} +{"Action":"pass","Test":"TestVetVerbose"} +{"Action":"cont","Test":"TestVet/5"} +{"Action":"output","Test":"TestVet/5","Output":"=== CONT TestVet/5\n"} +{"Action":"cont","Test":"TestVet/3"} +{"Action":"output","Test":"TestVet/3","Output":"=== CONT TestVet/3\n"} +{"Action":"cont","Test":"TestVet/2"} +{"Action":"output","Test":"TestVet/2","Output":"=== CONT TestVet/2\n"} +{"Action":"output","Test":"TestTags","Output":"--- PASS: TestTags (0.00s)\n"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":" --- PASS: TestTags/x_testtag_y (0.04s)\n"} +{"Action":"output","Test":"TestTags/x_testtag_y","Output":" \tvet_test.go:187: -tags=x testtag y\n"} +{"Action":"pass","Test":"TestTags/x_testtag_y"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":" --- PASS: TestTags/x,testtag,y (0.04s)\n"} +{"Action":"output","Test":"TestTags/x,testtag,y","Output":" \tvet_test.go:187: -tags=x,testtag,y\n"} +{"Action":"pass","Test":"TestTags/x,testtag,y"} +{"Action":"output","Test":"TestTags/testtag","Output":" --- PASS: TestTags/testtag (0.04s)\n"} +{"Action":"output","Test":"TestTags/testtag","Output":" \tvet_test.go:187: -tags=testtag\n"} +{"Action":"pass","Test":"TestTags/testtag"} +{"Action":"pass","Test":"TestTags"} +{"Action":"cont","Test":"TestVet/1"} +{"Action":"output","Test":"TestVet/1","Output":"=== CONT TestVet/1\n"} +{"Action":"cont","Test":"TestVetDirs/testingpkg"} +{"Action":"output","Test":"TestVetDirs/testingpkg","Output":"=== CONT TestVetDirs/testingpkg\n"} +{"Action":"cont","Test":"TestVetDirs/buildtag"} +{"Action":"output","Test":"TestVetDirs/buildtag","Output":"=== CONT TestVetDirs/buildtag\n"} +{"Action":"cont","Test":"TestVetDirs/divergent"} +{"Action":"output","Test":"TestVetDirs/divergent","Output":"=== CONT TestVetDirs/divergent\n"} +{"Action":"cont","Test":"TestVetDirs/incomplete"} +{"Action":"output","Test":"TestVetDirs/incomplete","Output":"=== CONT TestVetDirs/incomplete\n"} +{"Action":"cont","Test":"TestVetDirs/cgo"} +{"Action":"output","Test":"TestVetDirs/cgo","Output":"=== CONT TestVetDirs/cgo\n"} +{"Action":"output","Test":"TestVet","Output":"--- PASS: TestVet (0.39s)\n"} +{"Action":"output","Test":"TestVet/5","Output":" --- PASS: TestVet/5 (0.07s)\n"} +{"Action":"output","Test":"TestVet/5","Output":" \tvet_test.go:114: files: [\"testdata/copylock_func.go\" \"testdata/rangeloop.go\"]\n"} +{"Action":"pass","Test":"TestVet/5"} +{"Action":"output","Test":"TestVet/3","Output":" --- PASS: TestVet/3 (0.07s)\n"} +{"Action":"output","Test":"TestVet/3","Output":" \tvet_test.go:114: files: [\"testdata/composite.go\" \"testdata/nilfunc.go\"]\n"} +{"Action":"pass","Test":"TestVet/3"} +{"Action":"output","Test":"TestVet/6","Output":" --- PASS: TestVet/6 (0.07s)\n"} +{"Action":"output","Test":"TestVet/6","Output":" \tvet_test.go:114: files: [\"testdata/copylock_range.go\" \"testdata/shadow.go\"]\n"} +{"Action":"pass","Test":"TestVet/6"} +{"Action":"output","Test":"TestVet/2","Output":" --- PASS: TestVet/2 (0.07s)\n"} +{"Action":"output","Test":"TestVet/2","Output":" \tvet_test.go:114: files: [\"testdata/bool.go\" \"testdata/method.go\" \"testdata/unused.go\"]\n"} +{"Action":"pass","Test":"TestVet/2"} +{"Action":"output","Test":"TestVet/0","Output":" --- PASS: TestVet/0 (0.13s)\n"} +{"Action":"output","Test":"TestVet/0","Output":" \tvet_test.go:114: files: [\"testdata/assign.go\" \"testdata/httpresponse.go\" \"testdata/structtag.go\"]\n"} +{"Action":"pass","Test":"TestVet/0"} +{"Action":"output","Test":"TestVet/4","Output":" --- PASS: TestVet/4 (0.16s)\n"} +{"Action":"output","Test":"TestVet/4","Output":" \tvet_test.go:114: files: [\"testdata/copylock.go\" \"testdata/print.go\"]\n"} +{"Action":"pass","Test":"TestVet/4"} +{"Action":"output","Test":"TestVet/1","Output":" --- PASS: TestVet/1 (0.07s)\n"} +{"Action":"output","Test":"TestVet/1","Output":" \tvet_test.go:114: files: [\"testdata/atomic.go\" \"testdata/lostcancel.go\" \"testdata/unsafeptr.go\"]\n"} +{"Action":"pass","Test":"TestVet/1"} +{"Action":"output","Test":"TestVet/7","Output":" --- PASS: TestVet/7 (0.19s)\n"} +{"Action":"output","Test":"TestVet/7","Output":" \tvet_test.go:114: files: [\"testdata/deadcode.go\" \"testdata/shift.go\"]\n"} +{"Action":"pass","Test":"TestVet/7"} +{"Action":"pass","Test":"TestVet"} +{"Action":"output","Test":"TestVetDirs","Output":"--- PASS: TestVetDirs (0.01s)\n"} +{"Action":"output","Test":"TestVetDirs/testingpkg","Output":" --- PASS: TestVetDirs/testingpkg (0.06s)\n"} +{"Action":"pass","Test":"TestVetDirs/testingpkg"} +{"Action":"output","Test":"TestVetDirs/divergent","Output":" --- PASS: TestVetDirs/divergent (0.05s)\n"} +{"Action":"pass","Test":"TestVetDirs/divergent"} +{"Action":"output","Test":"TestVetDirs/buildtag","Output":" --- PASS: TestVetDirs/buildtag (0.06s)\n"} +{"Action":"pass","Test":"TestVetDirs/buildtag"} +{"Action":"output","Test":"TestVetDirs/incomplete","Output":" --- PASS: TestVetDirs/incomplete (0.05s)\n"} +{"Action":"pass","Test":"TestVetDirs/incomplete"} +{"Action":"output","Test":"TestVetDirs/cgo","Output":" --- PASS: TestVetDirs/cgo (0.04s)\n"} +{"Action":"pass","Test":"TestVetDirs/cgo"} +{"Action":"pass","Test":"TestVetDirs"} +{"Action":"output","Test":"TestVetAsm","Output":"--- PASS: TestVetAsm (0.75s)\n"} +{"Action":"pass","Test":"TestVetAsm"} +{"Action":"output","Output":"PASS\n"} +{"Action":"output","Output":"ok \tcmd/vet\t(cached)\n"} +{"Action":"pass"} diff --git a/src/cmd/internal/test2json/testdata/vet.test b/src/cmd/internal/test2json/testdata/vet.test new file mode 100644 index 00000000000..3389559cb81 --- /dev/null +++ b/src/cmd/internal/test2json/testdata/vet.test @@ -0,0 +1,97 @@ +=== RUN TestVet +=== PAUSE TestVet +=== RUN TestVetAsm +=== PAUSE TestVetAsm +=== RUN TestVetDirs +=== PAUSE TestVetDirs +=== RUN TestTags +=== PAUSE TestTags +=== RUN TestVetVerbose +=== PAUSE TestVetVerbose +=== CONT TestVet +=== CONT TestTags +=== CONT TestVetVerbose +=== RUN TestTags/testtag +=== PAUSE TestTags/testtag +=== CONT TestVetDirs +=== CONT TestVetAsm +=== RUN TestVet/0 +=== PAUSE TestVet/0 +=== RUN TestVet/1 +=== PAUSE TestVet/1 +=== RUN TestVet/2 +=== PAUSE TestVet/2 +=== RUN TestVet/3 +=== PAUSE TestVet/3 +=== RUN TestVet/4 +=== RUN TestTags/x_testtag_y +=== PAUSE TestVet/4 +=== RUN TestVet/5 +=== PAUSE TestVet/5 +=== PAUSE TestTags/x_testtag_y +=== RUN TestVet/6 +=== RUN TestTags/x,testtag,y +=== PAUSE TestTags/x,testtag,y +=== RUN TestVetDirs/testingpkg +=== PAUSE TestVet/6 +=== CONT TestTags/x,testtag,y +=== PAUSE TestVetDirs/testingpkg +=== RUN TestVetDirs/divergent +=== RUN TestVet/7 +=== PAUSE TestVet/7 +=== PAUSE TestVetDirs/divergent +=== CONT TestTags/x_testtag_y +=== CONT TestTags/testtag +=== RUN TestVetDirs/buildtag +=== PAUSE TestVetDirs/buildtag +=== CONT TestVet/0 +=== CONT TestVet/4 +=== RUN TestVetDirs/incomplete +=== PAUSE TestVetDirs/incomplete +=== RUN TestVetDirs/cgo +=== PAUSE TestVetDirs/cgo +=== CONT TestVet/7 +=== CONT TestVet/6 +--- PASS: TestVetVerbose (0.04s) +=== CONT TestVet/5 +=== CONT TestVet/3 +=== CONT TestVet/2 +--- PASS: TestTags (0.00s) + --- PASS: TestTags/x_testtag_y (0.04s) + vet_test.go:187: -tags=x testtag y + --- PASS: TestTags/x,testtag,y (0.04s) + vet_test.go:187: -tags=x,testtag,y + --- PASS: TestTags/testtag (0.04s) + vet_test.go:187: -tags=testtag +=== CONT TestVet/1 +=== CONT TestVetDirs/testingpkg +=== CONT TestVetDirs/buildtag +=== CONT TestVetDirs/divergent +=== CONT TestVetDirs/incomplete +=== CONT TestVetDirs/cgo +--- PASS: TestVet (0.39s) + --- PASS: TestVet/5 (0.07s) + vet_test.go:114: files: ["testdata/copylock_func.go" "testdata/rangeloop.go"] + --- PASS: TestVet/3 (0.07s) + vet_test.go:114: files: ["testdata/composite.go" "testdata/nilfunc.go"] + --- PASS: TestVet/6 (0.07s) + vet_test.go:114: files: ["testdata/copylock_range.go" "testdata/shadow.go"] + --- PASS: TestVet/2 (0.07s) + vet_test.go:114: files: ["testdata/bool.go" "testdata/method.go" "testdata/unused.go"] + --- PASS: TestVet/0 (0.13s) + vet_test.go:114: files: ["testdata/assign.go" "testdata/httpresponse.go" "testdata/structtag.go"] + --- PASS: TestVet/4 (0.16s) + vet_test.go:114: files: ["testdata/copylock.go" "testdata/print.go"] + --- PASS: TestVet/1 (0.07s) + vet_test.go:114: files: ["testdata/atomic.go" "testdata/lostcancel.go" "testdata/unsafeptr.go"] + --- PASS: TestVet/7 (0.19s) + vet_test.go:114: files: ["testdata/deadcode.go" "testdata/shift.go"] +--- PASS: TestVetDirs (0.01s) + --- PASS: TestVetDirs/testingpkg (0.06s) + --- PASS: TestVetDirs/divergent (0.05s) + --- PASS: TestVetDirs/buildtag (0.06s) + --- PASS: TestVetDirs/incomplete (0.05s) + --- PASS: TestVetDirs/cgo (0.04s) +--- PASS: TestVetAsm (0.75s) +PASS +ok cmd/vet (cached) diff --git a/src/cmd/link/doc.go b/src/cmd/link/doc.go index 16fddf2345d..d61b66a9389 100644 --- a/src/cmd/link/doc.go +++ b/src/cmd/link/doc.go @@ -36,7 +36,7 @@ Flags: -T address Set text segment address. -V - Print the linker version and exit. + Print linker version and exit. -X importpath.name=value Set the value of the string variable in importpath named name to value. Note that before Go 1.5 this option took two separate arguments. diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go index fab4e37e985..aad3c53cb61 100644 --- a/src/cmd/link/internal/amd64/asm.go +++ b/src/cmd/link/internal/amd64/asm.go @@ -32,7 +32,9 @@ package amd64 import ( "cmd/internal/objabi" + "cmd/internal/sys" "cmd/link/internal/ld" + "cmd/link/internal/sym" "debug/elf" "log" ) @@ -41,12 +43,12 @@ func PADDR(x uint32) uint32 { return x &^ 0x80000000 } -func Addcall(ctxt *ld.Link, s *ld.Symbol, t *ld.Symbol) int64 { - s.Attr |= ld.AttrReachable +func Addcall(ctxt *ld.Link, s *sym.Symbol, t *sym.Symbol) int64 { + s.Attr |= sym.AttrReachable i := s.Size s.Size += 4 - ld.Symgrow(s, s.Size) - r := ld.Addrel(s) + s.Grow(s.Size) + r := s.AddRel() r.Sym = t r.Off = int32(i) r.Type = objabi.R_CALL @@ -59,80 +61,82 @@ func gentext(ctxt *ld.Link) { return } addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0) - if addmoduledata.Type == ld.STEXT && ld.Buildmode != ld.BuildmodePlugin { + if addmoduledata.Type == sym.STEXT && ctxt.BuildMode != ld.BuildModePlugin { // we're linking a module containing the runtime -> no need for // an init function return } - addmoduledata.Attr |= ld.AttrReachable + addmoduledata.Attr |= sym.AttrReachable initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0) - initfunc.Type = ld.STEXT - initfunc.Attr |= ld.AttrLocal - initfunc.Attr |= ld.AttrReachable + initfunc.Type = sym.STEXT + initfunc.Attr |= sym.AttrLocal + initfunc.Attr |= sym.AttrReachable o := func(op ...uint8) { for _, op1 := range op { - ld.Adduint8(ctxt, initfunc, op1) + initfunc.AddUint8(op1) } } // 0000000000000000 : // 0: 48 8d 3d 00 00 00 00 lea 0x0(%rip),%rdi # 7 // 3: R_X86_64_PC32 runtime.firstmoduledata-0x4 o(0x48, 0x8d, 0x3d) - ld.Addpcrelplus(ctxt, initfunc, ctxt.Moduledata, 0) + initfunc.AddPCRelPlus(ctxt.Arch, ctxt.Moduledata, 0) // 7: e8 00 00 00 00 callq c // 8: R_X86_64_PLT32 runtime.addmoduledata-0x4 o(0xe8) Addcall(ctxt, initfunc, addmoduledata) // c: c3 retq o(0xc3) - if ld.Buildmode == ld.BuildmodePlugin { + if ctxt.BuildMode == ld.BuildModePlugin { ctxt.Textp = append(ctxt.Textp, addmoduledata) } ctxt.Textp = append(ctxt.Textp, initfunc) initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0) - initarray_entry.Attr |= ld.AttrReachable - initarray_entry.Attr |= ld.AttrLocal - initarray_entry.Type = ld.SINITARR - ld.Addaddr(ctxt, initarray_entry, initfunc) + initarray_entry.Attr |= sym.AttrReachable + initarray_entry.Attr |= sym.AttrLocal + initarray_entry.Type = sym.SINITARR + initarray_entry.AddAddr(ctxt.Arch, initfunc) } -func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { +func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { targ := r.Sym switch r.Type { default: if r.Type >= 256 { - ld.Errorf(s, "unexpected relocation type %d", r.Type) + ld.Errorf(s, "unexpected relocation type %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type)) return false } // Handle relocations found in ELF object files. - case 256 + ld.R_X86_64_PC32: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_X86_64_PC32): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_X86_64_PC32 relocation for dynamic symbol %s", targ.Name) } - if targ.Type == 0 || targ.Type == ld.SXREF { + // TODO(mwhudson): the test of VisibilityHidden here probably doesn't make + // sense and should be removed when someone has thought about it properly. + if (targ.Type == 0 || targ.Type == sym.SXREF) && !targ.Attr.VisibilityHidden() { ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name) } r.Type = objabi.R_PCREL r.Add += 4 return true - case 256 + ld.R_X86_64_PC64: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_X86_64_PC64): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_X86_64_PC64 relocation for dynamic symbol %s", targ.Name) } - if targ.Type == 0 || targ.Type == ld.SXREF { + if targ.Type == 0 || targ.Type == sym.SXREF { ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name) } r.Type = objabi.R_PCREL r.Add += 8 return true - case 256 + ld.R_X86_64_PLT32: + case 256 + objabi.RelocType(elf.R_X86_64_PLT32): r.Type = objabi.R_PCREL r.Add += 4 - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) @@ -140,8 +144,8 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return true - case 256 + ld.R_X86_64_GOTPCREL, 256 + ld.R_X86_64_GOTPCRELX, 256 + ld.R_X86_64_REX_GOTPCRELX: - if targ.Type != ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_X86_64_GOTPCREL), 256 + objabi.RelocType(elf.R_X86_64_GOTPCRELX), 256 + objabi.RelocType(elf.R_X86_64_REX_GOTPCRELX): + if targ.Type != sym.SDYNIMPORT { // have symbol if r.Off >= 2 && s.P[r.Off-2] == 0x8b { // turn MOVQ of GOT entry into LEAQ of symbol itself @@ -163,8 +167,8 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { r.Add += int64(targ.Got) return true - case 256 + ld.R_X86_64_64: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_X86_64_64): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_X86_64_64 relocation for dynamic symbol %s", targ.Name) } r.Type = objabi.R_ADDR @@ -177,13 +181,13 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { // TODO: What is the difference between all these? r.Type = objabi.R_ADDR - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected reloc for dynamic symbol %s", targ.Name) } return true case 512 + ld.MACHO_X86_64_RELOC_BRANCH*2 + 1: - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(targ.Plt) @@ -200,13 +204,13 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { 512 + ld.MACHO_X86_64_RELOC_SIGNED_4*2 + 1: r.Type = objabi.R_PCREL - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected pc-relative reloc for dynamic symbol %s", targ.Name) } return true case 512 + ld.MACHO_X86_64_RELOC_GOT_LOAD*2 + 1: - if targ.Type != ld.SDYNIMPORT { + if targ.Type != sym.SDYNIMPORT { // have symbol // turn MOVQ of GOT entry into LEAQ of symbol itself if r.Off < 2 || s.P[r.Off-2] != 0x8b { @@ -222,7 +226,7 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { // fall through case 512 + ld.MACHO_X86_64_RELOC_GOT*2 + 1: - if targ.Type != ld.SDYNIMPORT { + if targ.Type != sym.SDYNIMPORT { ld.Errorf(s, "unexpected GOT reloc for non-dynamic symbol %s", targ.Name) } addgotsym(ctxt, targ) @@ -235,24 +239,19 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { switch r.Type { case objabi.R_CALL, objabi.R_PCREL: - if targ.Type != ld.SDYNIMPORT { + if targ.Type != sym.SDYNIMPORT { // nothing to do, the relocation will be laid out in reloc return true } - if ld.Headtype == objabi.Hwindows { - // nothing to do, the relocation will be laid out in pereloc1 - return true - } else { - // for both ELF and Mach-O - addpltsym(ctxt, targ) - r.Sym = ctxt.Syms.Lookup(".plt", 0) - r.Add = int64(targ.Plt) - return true - } + // for both ELF and Mach-O + addpltsym(ctxt, targ) + r.Sym = ctxt.Syms.Lookup(".plt", 0) + r.Add = int64(targ.Plt) + return true case objabi.R_ADDR: - if s.Type == ld.STEXT && ld.Iself { - if ld.Headtype == objabi.Hsolaris { + if s.Type == sym.STEXT && ctxt.IsELF { + if ctxt.HeadType == objabi.Hsolaris { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) @@ -269,7 +268,7 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { } // Process dynamic relocations for the data sections. - if ld.Buildmode == ld.BuildmodePIE && ld.Linkmode == ld.LinkInternal { + if ctxt.BuildMode == ld.BuildModePIE && ctxt.LinkMode == ld.LinkInternal { // When internally linking, generate dynamic relocations // for all typical R_ADDR relocations. The exception // are those R_ADDR that are created as part of generating @@ -312,30 +311,30 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { // linking, in which case the relocation will be // prepared in the 'reloc' phase and passed to the // external linker in the 'asmb' phase. - if s.Type != ld.SDATA && s.Type != ld.SRODATA { + if s.Type != sym.SDATA && s.Type != sym.SRODATA { break } } - if ld.Iself { + if ctxt.IsELF { // TODO: We generate a R_X86_64_64 relocation for every R_ADDR, even // though it would be more efficient (for the dynamic linker) if we // generated R_X86_RELATIVE instead. ld.Adddynsym(ctxt, targ) rela := ctxt.Syms.Lookup(".rela", 0) - ld.Addaddrplus(ctxt, rela, s, int64(r.Off)) + rela.AddAddrPlus(ctxt.Arch, s, int64(r.Off)) if r.Siz == 8 { - ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_64)) + rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(targ.Dynid), uint32(elf.R_X86_64_64))) } else { // TODO: never happens, remove. - ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_32)) + rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(targ.Dynid), uint32(elf.R_X86_64_32))) } - ld.Adduint64(ctxt, rela, uint64(r.Add)) + rela.AddUint64(ctxt.Arch, uint64(r.Add)) r.Type = 256 // ignore during relocsym return true } - if ld.Headtype == objabi.Hdarwin && s.Size == int64(ld.SysArch.PtrSize) && r.Off == 0 { + if ctxt.HeadType == objabi.Hdarwin && s.Size == int64(ctxt.Arch.PtrSize) && r.Off == 0 { // Mach-O relocations are a royal pain to lay out. // They use a compact stateful bytecode representation // that is too much bother to deal with. @@ -349,104 +348,94 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { ld.Adddynsym(ctxt, targ) got := ctxt.Syms.Lookup(".got", 0) - s.Type = got.Type | ld.SSUB + s.Type = got.Type + s.Attr |= sym.AttrSubSymbol s.Outer = got s.Sub = got.Sub got.Sub = s s.Value = got.Size - ld.Adduint64(ctxt, got, 0) - ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(targ.Dynid)) + got.AddUint64(ctxt.Arch, 0) + ctxt.Syms.Lookup(".linkedit.got", 0).AddUint32(ctxt.Arch, uint32(targ.Dynid)) r.Type = 256 // ignore during relocsym return true } - - if ld.Headtype == objabi.Hwindows { - // nothing to do, the relocation will be laid out in pereloc1 - return true - } } return false } -func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { - ld.Thearch.Vput(uint64(sectoff)) +func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { + ctxt.Out.Write64(uint64(sectoff)) elfsym := r.Xsym.ElfsymForReloc() switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: if r.Siz == 4 { - ld.Thearch.Vput(ld.R_X86_64_32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_32) | uint64(elfsym)<<32) } else if r.Siz == 8 { - ld.Thearch.Vput(ld.R_X86_64_64 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_64) | uint64(elfsym)<<32) } else { - return -1 + return false } - case objabi.R_TLS_LE: if r.Siz == 4 { - ld.Thearch.Vput(ld.R_X86_64_TPOFF32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_TPOFF32) | uint64(elfsym)<<32) } else { - return -1 + return false } - case objabi.R_TLS_IE: if r.Siz == 4 { - ld.Thearch.Vput(ld.R_X86_64_GOTTPOFF | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_GOTTPOFF) | uint64(elfsym)<<32) } else { - return -1 + return false } - case objabi.R_CALL: if r.Siz == 4 { - if r.Xsym.Type == ld.SDYNIMPORT { + if r.Xsym.Type == sym.SDYNIMPORT { if ctxt.DynlinkingGo() { - ld.Thearch.Vput(ld.R_X86_64_PLT32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_PLT32) | uint64(elfsym)<<32) } else { - ld.Thearch.Vput(ld.R_X86_64_GOTPCREL | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_GOTPCREL) | uint64(elfsym)<<32) } } else { - ld.Thearch.Vput(ld.R_X86_64_PC32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_PC32) | uint64(elfsym)<<32) } } else { - return -1 + return false } - case objabi.R_PCREL: if r.Siz == 4 { - if r.Xsym.Type == ld.SDYNIMPORT && r.Xsym.ElfType == elf.STT_FUNC { - ld.Thearch.Vput(ld.R_X86_64_PLT32 | uint64(elfsym)<<32) + if r.Xsym.Type == sym.SDYNIMPORT && r.Xsym.ElfType == elf.STT_FUNC { + ctxt.Out.Write64(uint64(elf.R_X86_64_PLT32) | uint64(elfsym)<<32) } else { - ld.Thearch.Vput(ld.R_X86_64_PC32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_PC32) | uint64(elfsym)<<32) } } else { - return -1 + return false } - case objabi.R_GOTPCREL: if r.Siz == 4 { - ld.Thearch.Vput(ld.R_X86_64_GOTPCREL | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_X86_64_GOTPCREL) | uint64(elfsym)<<32) } else { - return -1 + return false } } - ld.Thearch.Vput(uint64(r.Xadd)) - return 0 + ctxt.Out.Write64(uint64(r.Xadd)) + return true } -func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { var v uint32 rs := r.Xsym - if rs.Type == ld.SHOSTOBJ || r.Type == objabi.R_PCREL || r.Type == objabi.R_GOTPCREL { + if rs.Type == sym.SHOSTOBJ || r.Type == objabi.R_PCREL || r.Type == objabi.R_GOTPCREL { if rs.Dynid < 0 { - ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) - return -1 + ld.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type) + return false } v = uint32(rs.Dynid) @@ -454,14 +443,14 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { } else { v = uint32(rs.Sect.Extnum) if v == 0 { - ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) - return -1 + ld.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Sect.Name, rs.Type, rs.Type) + return false } } switch r.Type { default: - return -1 + return false case objabi.R_ADDR: v |= ld.MACHO_X86_64_RELOC_UNSIGNED << 28 @@ -481,7 +470,7 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { switch r.Siz { default: - return -1 + return false case 1: v |= 0 << 25 @@ -496,29 +485,29 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { v |= 3 << 25 } - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput(v) - return 0 + out.Write32(uint32(sectoff)) + out.Write32(v) + return true } -func pereloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) bool { +func pereloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { var v uint32 rs := r.Xsym if rs.Dynid < 0 { - ld.Errorf(s, "reloc %d to non-coff symbol %s type=%d", r.Type, rs.Name, rs.Type) + ld.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type) return false } - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput(uint32(rs.Dynid)) + out.Write32(uint32(sectoff)) + out.Write32(uint32(rs.Dynid)) switch r.Type { default: return false - case objabi.R_DWARFREF: + case objabi.R_DWARFSECREF: v = ld.IMAGE_REL_AMD64_SECREL case objabi.R_ADDR: @@ -533,16 +522,16 @@ func pereloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) bool { v = ld.IMAGE_REL_AMD64_REL32 } - ld.Thearch.Wput(uint16(v)) + out.Write16(uint16(v)) return true } -func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { - return -1 +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { + return false } -func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { +func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { log.Fatalf("unexpected relocation variant") return t } @@ -552,36 +541,36 @@ func elfsetupplt(ctxt *ld.Link) { got := ctxt.Syms.Lookup(".got.plt", 0) if plt.Size == 0 { // pushq got+8(IP) - ld.Adduint8(ctxt, plt, 0xff) + plt.AddUint8(0xff) - ld.Adduint8(ctxt, plt, 0x35) - ld.Addpcrelplus(ctxt, plt, got, 8) + plt.AddUint8(0x35) + plt.AddPCRelPlus(ctxt.Arch, got, 8) // jmpq got+16(IP) - ld.Adduint8(ctxt, plt, 0xff) + plt.AddUint8(0xff) - ld.Adduint8(ctxt, plt, 0x25) - ld.Addpcrelplus(ctxt, plt, got, 16) + plt.AddUint8(0x25) + plt.AddPCRelPlus(ctxt.Arch, got, 16) // nopl 0(AX) - ld.Adduint32(ctxt, plt, 0x00401f0f) + plt.AddUint32(ctxt.Arch, 0x00401f0f) // assume got->size == 0 too - ld.Addaddrplus(ctxt, got, ctxt.Syms.Lookup(".dynamic", 0), 0) + got.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup(".dynamic", 0), 0) - ld.Adduint64(ctxt, got, 0) - ld.Adduint64(ctxt, got, 0) + got.AddUint64(ctxt.Arch, 0) + got.AddUint64(ctxt.Arch, 0) } } -func addpltsym(ctxt *ld.Link, s *ld.Symbol) { +func addpltsym(ctxt *ld.Link, s *sym.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) - if ld.Iself { + if ctxt.IsELF { plt := ctxt.Syms.Lookup(".plt", 0) got := ctxt.Syms.Lookup(".got.plt", 0) rela := ctxt.Syms.Lookup(".rela.plt", 0) @@ -590,32 +579,32 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { } // jmpq *got+size(IP) - ld.Adduint8(ctxt, plt, 0xff) + plt.AddUint8(0xff) - ld.Adduint8(ctxt, plt, 0x25) - ld.Addpcrelplus(ctxt, plt, got, got.Size) + plt.AddUint8(0x25) + plt.AddPCRelPlus(ctxt.Arch, got, got.Size) // add to got: pointer to current pos in plt - ld.Addaddrplus(ctxt, got, plt, plt.Size) + got.AddAddrPlus(ctxt.Arch, plt, plt.Size) // pushq $x - ld.Adduint8(ctxt, plt, 0x68) + plt.AddUint8(0x68) - ld.Adduint32(ctxt, plt, uint32((got.Size-24-8)/8)) + plt.AddUint32(ctxt.Arch, uint32((got.Size-24-8)/8)) // jmpq .plt - ld.Adduint8(ctxt, plt, 0xe9) + plt.AddUint8(0xe9) - ld.Adduint32(ctxt, plt, uint32(-(plt.Size + 4))) + plt.AddUint32(ctxt.Arch, uint32(-(plt.Size + 4))) // rela - ld.Addaddrplus(ctxt, rela, got, got.Size-8) + rela.AddAddrPlus(ctxt.Arch, got, got.Size-8) - ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_JMP_SLOT)) - ld.Adduint64(ctxt, rela, 0) + rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_X86_64_JMP_SLOT))) + rela.AddUint64(ctxt.Arch, 0) s.Plt = int32(plt.Size - 16) - } else if ld.Headtype == objabi.Hdarwin { + } else if ctxt.HeadType == objabi.Hdarwin { // To do lazy symbol lookup right, we're supposed // to tell the dynamic loader which library each // symbol comes from and format the link info @@ -629,20 +618,20 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { addgotsym(ctxt, s) plt := ctxt.Syms.Lookup(".plt", 0) - ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.plt", 0), uint32(s.Dynid)) + ctxt.Syms.Lookup(".linkedit.plt", 0).AddUint32(ctxt.Arch, uint32(s.Dynid)) // jmpq *got+size(IP) s.Plt = int32(plt.Size) - ld.Adduint8(ctxt, plt, 0xff) - ld.Adduint8(ctxt, plt, 0x25) - ld.Addpcrelplus(ctxt, plt, ctxt.Syms.Lookup(".got", 0), int64(s.Got)) + plt.AddUint8(0xff) + plt.AddUint8(0x25) + plt.AddPCRelPlus(ctxt.Arch, ctxt.Syms.Lookup(".got", 0), int64(s.Got)) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } } -func addgotsym(ctxt *ld.Link, s *ld.Symbol) { +func addgotsym(ctxt *ld.Link, s *sym.Symbol) { if s.Got >= 0 { return } @@ -650,15 +639,15 @@ func addgotsym(ctxt *ld.Link, s *ld.Symbol) { ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) s.Got = int32(got.Size) - ld.Adduint64(ctxt, got, 0) + got.AddUint64(ctxt.Arch, 0) - if ld.Iself { + if ctxt.IsELF { rela := ctxt.Syms.Lookup(".rela", 0) - ld.Addaddrplus(ctxt, rela, got, int64(s.Got)) - ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_GLOB_DAT)) - ld.Adduint64(ctxt, rela, 0) - } else if ld.Headtype == objabi.Hdarwin { - ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(s.Dynid)) + rela.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_X86_64_GLOB_DAT))) + rela.AddUint64(ctxt.Arch, 0) + } else if ctxt.HeadType == objabi.Hdarwin { + ctxt.Syms.Lookup(".linkedit.got", 0).AddUint32(ctxt.Arch, uint32(s.Dynid)) } else { ld.Errorf(s, "addgotsym: unsupported binary format") } @@ -673,16 +662,16 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f codeblk\n", ld.Cputime()) } - if ld.Iself { + if ctxt.IsELF { ld.Asmbelfsetup() } sect := ld.Segtext.Sections[0] - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) // 0xCC is INT $3 - breakpoint instruction ld.CodeblkPad(ctxt, int64(sect.Vaddr), int64(sect.Length), []byte{0xCC}) for _, sect = range ld.Segtext.Sections[1:] { - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } @@ -690,14 +679,14 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Segrelrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f relrodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrelrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen)) } @@ -705,20 +694,20 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f datblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segdata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) - ld.Cseek(int64(ld.Segdwarf.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) machlink := int64(0) - if ld.Headtype == objabi.Hdarwin { + if ctxt.HeadType == objabi.Hdarwin { machlink = ld.Domacholink(ctxt) } - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Errorf(nil, "unknown header type %v", ld.Headtype) + ld.Errorf(nil, "unknown header type %v", ctxt.HeadType) fallthrough case objabi.Hplan9: @@ -748,7 +737,7 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f sym\n", ld.Cputime()) } - switch ld.Headtype { + switch ctxt.HeadType { default: case objabi.Hplan9: *ld.FlagS = true @@ -772,36 +761,33 @@ func asmb(ctxt *ld.Link) { symo = ld.Rnd(symo, ld.PEFILEALIGN) } - ld.Cseek(symo) - switch ld.Headtype { + ctxt.Out.SeekSet(symo) + switch ctxt.HeadType { default: - if ld.Iself { - ld.Cseek(symo) + if ctxt.IsELF { + ctxt.Out.SeekSet(symo) ld.Asmelfsym(ctxt) - ld.Cflush() - ld.Cwrite(ld.Elfstrdat) + ctxt.Out.Flush() + ctxt.Out.Write(ld.Elfstrdat) if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f dwarf\n", ld.Cputime()) } - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } case objabi.Hplan9: ld.Asmplan9sym(ctxt) - ld.Cflush() + ctxt.Out.Flush() sym := ctxt.Syms.Lookup("pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) - for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(sym.P[i]) - } - - ld.Cflush() + ctxt.Out.Write(sym.P) + ctxt.Out.Flush() } case objabi.Hwindows: @@ -810,7 +796,7 @@ func asmb(ctxt *ld.Link) { } case objabi.Hdarwin: - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Machoemitreloc(ctxt) } } @@ -819,23 +805,23 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f headr\n", ld.Cputime()) } - ld.Cseek(0) - switch ld.Headtype { + ctxt.Out.SeekSet(0) + switch ctxt.HeadType { default: case objabi.Hplan9: /* plan9 */ magic := int32(4*26*26 + 7) - magic |= 0x00008000 /* fat header */ - ld.Lputb(uint32(magic)) /* magic */ - ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */ - ld.Lputb(uint32(ld.Segdata.Filelen)) - ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) - ld.Lputb(uint32(ld.Symsize)) /* nsyms */ + magic |= 0x00008000 /* fat header */ + ctxt.Out.Write32b(uint32(magic)) /* magic */ + ctxt.Out.Write32b(uint32(ld.Segtext.Filelen)) /* sizes */ + ctxt.Out.Write32b(uint32(ld.Segdata.Filelen)) + ctxt.Out.Write32b(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) + ctxt.Out.Write32b(uint32(ld.Symsize)) /* nsyms */ vl := ld.Entryvalue(ctxt) - ld.Lputb(PADDR(uint32(vl))) /* va of entry */ - ld.Lputb(uint32(ld.Spsize)) /* sp offsets */ - ld.Lputb(uint32(ld.Lcsize)) /* line offsets */ - ld.Vputb(uint64(vl)) /* va of entry */ + ctxt.Out.Write32b(PADDR(uint32(vl))) /* va of entry */ + ctxt.Out.Write32b(uint32(ld.Spsize)) /* sp offsets */ + ctxt.Out.Write32b(uint32(ld.Lcsize)) /* line offsets */ + ctxt.Out.Write64b(uint64(vl)) /* va of entry */ case objabi.Hdarwin: ld.Asmbmacho(ctxt) @@ -853,10 +839,10 @@ func asmb(ctxt *ld.Link) { ld.Asmbpe(ctxt) } - ld.Cflush() + ctxt.Out.Flush() } -func tlsIEtoLE(s *ld.Symbol, off, size int) { +func tlsIEtoLE(s *sym.Symbol, off, size int) { // Transform the PC-relative instruction into a constant load. // That is, // diff --git a/src/cmd/link/internal/amd64/obj.go b/src/cmd/link/internal/amd64/obj.go index ef69c269c15..87e809166a3 100644 --- a/src/cmd/link/internal/amd64/obj.go +++ b/src/cmd/link/internal/amd64/obj.go @@ -37,48 +37,46 @@ import ( "fmt" ) -func Init() { - ld.SysArch = sys.ArchAMD64 +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchAMD64 if objabi.GOARCH == "amd64p32" { - ld.SysArch = sys.ArchAMD64P32 + arch = sys.ArchAMD64P32 } - ld.Thearch.Funcalign = funcAlign - ld.Thearch.Maxalign = maxAlign - ld.Thearch.Minalign = minAlign - ld.Thearch.Dwarfregsp = dwarfRegSP - ld.Thearch.Dwarfreglr = dwarfRegLR + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, - ld.Thearch.Adddynrel = adddynrel - ld.Thearch.Archinit = archinit - ld.Thearch.Archreloc = archreloc - ld.Thearch.Archrelocvariant = archrelocvariant - ld.Thearch.Asmb = asmb - ld.Thearch.Elfreloc1 = elfreloc1 - ld.Thearch.Elfsetupplt = elfsetupplt - ld.Thearch.Gentext = gentext - ld.Thearch.Machoreloc1 = machoreloc1 - ld.Thearch.PEreloc1 = pereloc1 - ld.Thearch.Lput = ld.Lputl - ld.Thearch.Wput = ld.Wputl - ld.Thearch.Vput = ld.Vputl - ld.Thearch.Append16 = ld.Append16l - ld.Thearch.Append32 = ld.Append32l - ld.Thearch.Append64 = ld.Append64l - ld.Thearch.TLSIEtoLE = tlsIEtoLE + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Asmb: asmb, + Elfreloc1: elfreloc1, + Elfsetupplt: elfsetupplt, + Gentext: gentext, + Machoreloc1: machoreloc1, + PEreloc1: pereloc1, + TLSIEtoLE: tlsIEtoLE, - ld.Thearch.Linuxdynld = "/lib64/ld-linux-x86-64.so.2" - ld.Thearch.Freebsddynld = "/libexec/ld-elf.so.1" - ld.Thearch.Openbsddynld = "/usr/libexec/ld.so" - ld.Thearch.Netbsddynld = "/libexec/ld.elf_so" - ld.Thearch.Dragonflydynld = "/usr/libexec/ld-elf.so.2" - ld.Thearch.Solarisdynld = "/lib/amd64/ld.so.1" + Linuxdynld: "/lib64/ld-linux-x86-64.so.2", + Freebsddynld: "/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/libexec/ld.elf_so", + Dragonflydynld: "/usr/libexec/ld-elf.so.2", + Solarisdynld: "/lib/amd64/ld.so.1", + } + + return arch, theArch } func archinit(ctxt *ld.Link) { - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Exitf("unknown -H option: %v", ld.Headtype) + ld.Exitf("unknown -H option: %v", ctxt.HeadType) case objabi.Hplan9: /* plan 9 */ ld.HEADR = 32 + 8 @@ -94,8 +92,6 @@ func archinit(ctxt *ld.Link) { } case objabi.Hdarwin: /* apple MACH */ - ld.Machoinit() - ld.HEADR = ld.INITIAL_MACHO_HEADR if *ld.FlagRound == -1 { *ld.FlagRound = 4096 diff --git a/src/cmd/link/internal/arm/asm.go b/src/cmd/link/internal/arm/asm.go index 0f281c15998..93c2a856901 100644 --- a/src/cmd/link/internal/arm/asm.go +++ b/src/cmd/link/internal/arm/asm.go @@ -32,7 +32,10 @@ package arm import ( "cmd/internal/objabi" + "cmd/internal/sys" "cmd/link/internal/ld" + "cmd/link/internal/sym" + "debug/elf" "fmt" "log" ) @@ -63,24 +66,24 @@ func gentext(ctxt *ld.Link) { return } addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0) - if addmoduledata.Type == ld.STEXT && ld.Buildmode != ld.BuildmodePlugin { + if addmoduledata.Type == sym.STEXT && ctxt.BuildMode != ld.BuildModePlugin { // we're linking a module containing the runtime -> no need for // an init function return } - addmoduledata.Attr |= ld.AttrReachable + addmoduledata.Attr |= sym.AttrReachable initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0) - initfunc.Type = ld.STEXT - initfunc.Attr |= ld.AttrLocal - initfunc.Attr |= ld.AttrReachable + initfunc.Type = sym.STEXT + initfunc.Attr |= sym.AttrLocal + initfunc.Attr |= sym.AttrReachable o := func(op uint32) { - ld.Adduint32(ctxt, initfunc, op) + initfunc.AddUint32(ctxt.Arch, op) } o(0xe59f0004) o(0xe08f0000) o(0xeafffffe) - rel := ld.Addrel(initfunc) + rel := initfunc.AddRel() rel.Off = 8 rel.Siz = 4 rel.Sym = ctxt.Syms.Lookup("runtime.addmoduledata", 0) @@ -88,22 +91,22 @@ func gentext(ctxt *ld.Link) { rel.Add = 0xeafffffe // vomit o(0x00000000) - rel = ld.Addrel(initfunc) + rel = initfunc.AddRel() rel.Off = 12 rel.Siz = 4 rel.Sym = ctxt.Moduledata rel.Type = objabi.R_PCREL rel.Add = 4 - if ld.Buildmode == ld.BuildmodePlugin { + if ctxt.BuildMode == ld.BuildModePlugin { ctxt.Textp = append(ctxt.Textp, addmoduledata) } ctxt.Textp = append(ctxt.Textp, initfunc) initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0) - initarray_entry.Attr |= ld.AttrReachable - initarray_entry.Attr |= ld.AttrLocal - initarray_entry.Type = ld.SINITARR - ld.Addaddr(ctxt, initarray_entry, initfunc) + initarray_entry.Attr |= sym.AttrReachable + initarray_entry.Attr |= sym.AttrLocal + initarray_entry.Type = sym.SINITARR + initarray_entry.AddAddr(ctxt.Arch, initfunc) } // Preserve highest 8 bits of a, and do addition to lower 24-bit @@ -112,21 +115,21 @@ func braddoff(a int32, b int32) int32 { return int32((uint32(a))&0xff000000 | 0x00ffffff&uint32(a+b)) } -func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { +func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { targ := r.Sym switch r.Type { default: if r.Type >= 256 { - ld.Errorf(s, "unexpected relocation type %d", r.Type) + ld.Errorf(s, "unexpected relocation type %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type)) return false } // Handle relocations found in ELF object files. - case 256 + ld.R_ARM_PLT32: + case 256 + objabi.RelocType(elf.R_ARM_PLT32): r.Type = objabi.R_CALLARM - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) @@ -134,12 +137,12 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return true - case 256 + ld.R_ARM_THM_PC22: // R_ARM_THM_CALL + case 256 + objabi.RelocType(elf.R_ARM_THM_PC22): // R_ARM_THM_CALL ld.Exitf("R_ARM_THM_CALL, are you using -marm?") return false - case 256 + ld.R_ARM_GOT32: // R_ARM_GOT_BREL - if targ.Type != ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_ARM_GOT32): // R_ARM_GOT_BREL + if targ.Type != sym.SDYNIMPORT { addgotsyminternal(ctxt, targ) } else { addgotsym(ctxt, targ) @@ -150,8 +153,8 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { r.Add += int64(targ.Got) return true - case 256 + ld.R_ARM_GOT_PREL: // GOT(nil) + A - nil - if targ.Type != ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_ARM_GOT_PREL): // GOT(nil) + A - nil + if targ.Type != sym.SDYNIMPORT { addgotsyminternal(ctxt, targ) } else { addgotsym(ctxt, targ) @@ -162,21 +165,21 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { r.Add += int64(targ.Got) + 4 return true - case 256 + ld.R_ARM_GOTOFF: // R_ARM_GOTOFF32 + case 256 + objabi.RelocType(elf.R_ARM_GOTOFF): // R_ARM_GOTOFF32 r.Type = objabi.R_GOTOFF return true - case 256 + ld.R_ARM_GOTPC: // R_ARM_BASE_PREL + case 256 + objabi.RelocType(elf.R_ARM_GOTPC): // R_ARM_BASE_PREL r.Type = objabi.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += 4 return true - case 256 + ld.R_ARM_CALL: + case 256 + objabi.RelocType(elf.R_ARM_CALL): r.Type = objabi.R_CALLARM - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) @@ -184,21 +187,21 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return true - case 256 + ld.R_ARM_REL32: // R_ARM_REL32 + case 256 + objabi.RelocType(elf.R_ARM_REL32): // R_ARM_REL32 r.Type = objabi.R_PCREL r.Add += 4 return true - case 256 + ld.R_ARM_ABS32: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_ARM_ABS32): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_ARM_ABS32 relocation for dynamic symbol %s", targ.Name) } r.Type = objabi.R_ADDR return true // we can just ignore this, because we are targeting ARM V5+ anyway - case 256 + ld.R_ARM_V4BX: + case 256 + objabi.RelocType(elf.R_ARM_V4BX): if r.Sym != nil { // R_ARM_V4BX is ABS relocation, so this symbol is a dummy symbol, ignore it r.Sym.Type = 0 @@ -207,10 +210,10 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { r.Sym = nil return true - case 256 + ld.R_ARM_PC24, - 256 + ld.R_ARM_JUMP24: + case 256 + objabi.RelocType(elf.R_ARM_PC24), + 256 + objabi.RelocType(elf.R_ARM_JUMP24): r.Type = objabi.R_CALLARM - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) @@ -220,7 +223,7 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { } // Handle references to ELF symbols from our own object files. - if targ.Type != ld.SDYNIMPORT { + if targ.Type != sym.SDYNIMPORT { return true } @@ -232,15 +235,15 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return true case objabi.R_ADDR: - if s.Type != ld.SDATA { + if s.Type != sym.SDATA { break } - if ld.Iself { + if ctxt.IsELF { ld.Adddynsym(ctxt, targ) rel := ctxt.Syms.Lookup(".rel", 0) - ld.Addaddrplus(ctxt, rel, s, int64(r.Off)) - ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynamic reloc - r.Type = objabi.R_CONST // write r->add during relocsym + rel.AddAddrPlus(ctxt.Arch, s, int64(r.Off)) + rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(targ.Dynid), uint32(elf.R_ARM_GLOB_DAT))) // we need a nil + A dynamic reloc + r.Type = objabi.R_CONST // write r->add during relocsym r.Sym = nil return true } @@ -249,54 +252,48 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return false } -func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { - ld.Thearch.Lput(uint32(sectoff)) +func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { + ctxt.Out.Write32(uint32(sectoff)) elfsym := r.Xsym.ElfsymForReloc() switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: if r.Siz == 4 { - ld.Thearch.Lput(ld.R_ARM_ABS32 | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_ARM_ABS32) | uint32(elfsym)<<8) } else { - return -1 + return false } - case objabi.R_PCREL: if r.Siz == 4 { - ld.Thearch.Lput(ld.R_ARM_REL32 | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_ARM_REL32) | uint32(elfsym)<<8) } else { - return -1 + return false } - case objabi.R_CALLARM: if r.Siz == 4 { if r.Add&0xff000000 == 0xeb000000 { // BL - ld.Thearch.Lput(ld.R_ARM_CALL | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_ARM_CALL) | uint32(elfsym)<<8) } else { - ld.Thearch.Lput(ld.R_ARM_JUMP24 | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_ARM_JUMP24) | uint32(elfsym)<<8) } } else { - return -1 + return false } - case objabi.R_TLS_LE: - ld.Thearch.Lput(ld.R_ARM_TLS_LE32 | uint32(elfsym)<<8) - + ctxt.Out.Write32(uint32(elf.R_ARM_TLS_LE32) | uint32(elfsym)<<8) case objabi.R_TLS_IE: - ld.Thearch.Lput(ld.R_ARM_TLS_IE32 | uint32(elfsym)<<8) - + ctxt.Out.Write32(uint32(elf.R_ARM_TLS_IE32) | uint32(elfsym)<<8) case objabi.R_GOTPCREL: if r.Siz == 4 { - ld.Thearch.Lput(ld.R_ARM_GOT_PREL | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_ARM_GOT_PREL) | uint32(elfsym)<<8) } else { - return -1 + return false } } - return 0 + return true } func elfsetupplt(ctxt *ld.Link) { @@ -304,40 +301,40 @@ func elfsetupplt(ctxt *ld.Link) { got := ctxt.Syms.Lookup(".got.plt", 0) if plt.Size == 0 { // str lr, [sp, #-4]! - ld.Adduint32(ctxt, plt, 0xe52de004) + plt.AddUint32(ctxt.Arch, 0xe52de004) // ldr lr, [pc, #4] - ld.Adduint32(ctxt, plt, 0xe59fe004) + plt.AddUint32(ctxt.Arch, 0xe59fe004) // add lr, pc, lr - ld.Adduint32(ctxt, plt, 0xe08fe00e) + plt.AddUint32(ctxt.Arch, 0xe08fe00e) // ldr pc, [lr, #8]! - ld.Adduint32(ctxt, plt, 0xe5bef008) + plt.AddUint32(ctxt.Arch, 0xe5bef008) // .word &GLOBAL_OFFSET_TABLE[0] - . - ld.Addpcrelplus(ctxt, plt, got, 4) + plt.AddPCRelPlus(ctxt.Arch, got, 4) // the first .plt entry requires 3 .plt.got entries - ld.Adduint32(ctxt, got, 0) + got.AddUint32(ctxt.Arch, 0) - ld.Adduint32(ctxt, got, 0) - ld.Adduint32(ctxt, got, 0) + got.AddUint32(ctxt.Arch, 0) + got.AddUint32(ctxt.Arch, 0) } } -func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { var v uint32 rs := r.Xsym if r.Type == objabi.R_PCREL { - if rs.Type == ld.SHOSTOBJ { + if rs.Type == sym.SHOSTOBJ { ld.Errorf(s, "pc-relative relocation of external symbol is not supported") - return -1 + return false } if r.Siz != 4 { - return -1 + return false } // emit a pair of "scattered" relocations that @@ -354,17 +351,17 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { o2 |= ld.MACHO_ARM_RELOC_PAIR << 24 o2 |= 2 << 28 // size = 4 - ld.Thearch.Lput(o1) - ld.Thearch.Lput(uint32(ld.Symaddr(rs))) - ld.Thearch.Lput(o2) - ld.Thearch.Lput(uint32(s.Value + int64(r.Off))) - return 0 + out.Write32(o1) + out.Write32(uint32(ld.Symaddr(rs))) + out.Write32(o2) + out.Write32(uint32(s.Value + int64(r.Off))) + return true } - if rs.Type == ld.SHOSTOBJ || r.Type == objabi.R_CALLARM { + if rs.Type == sym.SHOSTOBJ || r.Type == objabi.R_CALLARM { if rs.Dynid < 0 { - ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) - return -1 + ld.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type) + return false } v = uint32(rs.Dynid) @@ -372,14 +369,14 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { } else { v = uint32(rs.Sect.Extnum) if v == 0 { - ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) - return -1 + ld.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Sect.Name, rs.Type, rs.Type) + return false } } switch r.Type { default: - return -1 + return false case objabi.R_ADDR: v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28 @@ -391,8 +388,7 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { switch r.Siz { default: - return -1 - + return false case 1: v |= 0 << 25 @@ -406,9 +402,9 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { v |= 3 << 25 } - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput(v) - return 0 + out.Write32(uint32(sectoff)) + out.Write32(v) + return true } // sign extend a 24-bit integer @@ -428,7 +424,7 @@ func immrot(v uint32) uint32 { } // Convert the direct jump relocation r to refer to a trampoline if the target is too far -func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { +func trampoline(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol) { switch r.Type { case objabi.R_CALLARM: // r.Add is the instruction @@ -439,11 +435,11 @@ func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { // look up existing trampolines first. if we found one within the range // of direct call, we can reuse it. otherwise create a new one. offset := (signext24(r.Add&0xffffff) + 2) * 4 - var tramp *ld.Symbol + var tramp *sym.Symbol for i := 0; ; i++ { name := r.Sym.Name + fmt.Sprintf("%+d-tramp%d", offset, i) tramp = ctxt.Syms.Lookup(name, int(r.Sym.Version)) - if tramp.Type == ld.SDYNIMPORT { + if tramp.Type == sym.SDYNIMPORT { // don't reuse trampoline defined in other module continue } @@ -468,37 +464,37 @@ func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { if immrot(uint32(offset)) == 0 { ld.Errorf(s, "odd offset in dynlink direct call: %v+%d", r.Sym, offset) } - gentrampdyn(tramp, r.Sym, int64(offset)) - } else if ld.Buildmode == ld.BuildmodeCArchive || ld.Buildmode == ld.BuildmodeCShared || ld.Buildmode == ld.BuildmodePIE { - gentramppic(tramp, r.Sym, int64(offset)) + gentrampdyn(ctxt.Arch, tramp, r.Sym, int64(offset)) + } else if ctxt.BuildMode == ld.BuildModeCArchive || ctxt.BuildMode == ld.BuildModeCShared || ctxt.BuildMode == ld.BuildModePIE { + gentramppic(ctxt.Arch, tramp, r.Sym, int64(offset)) } else { - gentramp(tramp, r.Sym, int64(offset)) + gentramp(ctxt.Arch, ctxt.LinkMode, tramp, r.Sym, int64(offset)) } } // modify reloc to point to tramp, which will be resolved later r.Sym = tramp r.Add = r.Add&0xff000000 | 0xfffffe // clear the offset embedded in the instruction - r.Done = 0 + r.Done = false } default: - ld.Errorf(s, "trampoline called with non-jump reloc: %v", r.Type) + ld.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type)) } } // generate a trampoline to target+offset -func gentramp(tramp, target *ld.Symbol, offset int64) { +func gentramp(arch *sys.Arch, linkmode ld.LinkMode, tramp, target *sym.Symbol, offset int64) { tramp.Size = 12 // 3 instructions tramp.P = make([]byte, tramp.Size) t := ld.Symaddr(target) + int64(offset) o1 := uint32(0xe5900000 | 11<<12 | 15<<16) // MOVW (R15), R11 // R15 is actual pc + 8 o2 := uint32(0xe12fff10 | 11) // JMP (R11) o3 := uint32(t) // WORD $target - ld.SysArch.ByteOrder.PutUint32(tramp.P, o1) - ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2) - ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3) + arch.ByteOrder.PutUint32(tramp.P, o1) + arch.ByteOrder.PutUint32(tramp.P[4:], o2) + arch.ByteOrder.PutUint32(tramp.P[8:], o3) - if ld.Linkmode == ld.LinkExternal { - r := ld.Addrel(tramp) + if linkmode == ld.LinkExternal { + r := tramp.AddRel() r.Off = 8 r.Type = objabi.R_ADDR r.Siz = 4 @@ -508,19 +504,19 @@ func gentramp(tramp, target *ld.Symbol, offset int64) { } // generate a trampoline to target+offset in position independent code -func gentramppic(tramp, target *ld.Symbol, offset int64) { +func gentramppic(arch *sys.Arch, tramp, target *sym.Symbol, offset int64) { tramp.Size = 16 // 4 instructions tramp.P = make([]byte, tramp.Size) o1 := uint32(0xe5900000 | 11<<12 | 15<<16 | 4) // MOVW 4(R15), R11 // R15 is actual pc + 8 o2 := uint32(0xe0800000 | 11<<12 | 15<<16 | 11) // ADD R15, R11, R11 o3 := uint32(0xe12fff10 | 11) // JMP (R11) o4 := uint32(0) // WORD $(target-pc) // filled in with relocation - ld.SysArch.ByteOrder.PutUint32(tramp.P, o1) - ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2) - ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3) - ld.SysArch.ByteOrder.PutUint32(tramp.P[12:], o4) + arch.ByteOrder.PutUint32(tramp.P, o1) + arch.ByteOrder.PutUint32(tramp.P[4:], o2) + arch.ByteOrder.PutUint32(tramp.P[8:], o3) + arch.ByteOrder.PutUint32(tramp.P[12:], o4) - r := ld.Addrel(tramp) + r := tramp.AddRel() r.Off = 12 r.Type = objabi.R_PCREL r.Siz = 4 @@ -529,7 +525,7 @@ func gentramppic(tramp, target *ld.Symbol, offset int64) { } // generate a trampoline to target+offset in dynlink mode (using GOT) -func gentrampdyn(tramp, target *ld.Symbol, offset int64) { +func gentrampdyn(arch *sys.Arch, tramp, target *sym.Symbol, offset int64) { tramp.Size = 20 // 5 instructions o1 := uint32(0xe5900000 | 11<<12 | 15<<16 | 8) // MOVW 8(R15), R11 // R15 is actual pc + 8 o2 := uint32(0xe0800000 | 11<<12 | 15<<16 | 11) // ADD R15, R11, R11 @@ -546,16 +542,16 @@ func gentrampdyn(tramp, target *ld.Symbol, offset int64) { o1 = uint32(0xe5900000 | 11<<12 | 15<<16 | 12) // MOVW 12(R15), R11 } tramp.P = make([]byte, tramp.Size) - ld.SysArch.ByteOrder.PutUint32(tramp.P, o1) - ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2) - ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3) - ld.SysArch.ByteOrder.PutUint32(tramp.P[12:], o4) - ld.SysArch.ByteOrder.PutUint32(tramp.P[16:], o5) + arch.ByteOrder.PutUint32(tramp.P, o1) + arch.ByteOrder.PutUint32(tramp.P[4:], o2) + arch.ByteOrder.PutUint32(tramp.P[8:], o3) + arch.ByteOrder.PutUint32(tramp.P[12:], o4) + arch.ByteOrder.PutUint32(tramp.P[16:], o5) if offset != 0 { - ld.SysArch.ByteOrder.PutUint32(tramp.P[20:], o6) + arch.ByteOrder.PutUint32(tramp.P[20:], o6) } - r := ld.Addrel(tramp) + r := tramp.AddRel() r.Off = 16 r.Type = objabi.R_GOTPCREL r.Siz = 4 @@ -568,11 +564,11 @@ func gentrampdyn(tramp, target *ld.Symbol, offset int64) { } } -func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { - if ld.Linkmode == ld.LinkExternal { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { + if ctxt.LinkMode == ld.LinkExternal { switch r.Type { case objabi.R_CALLARM: - r.Done = 0 + r.Done = false // set up addend for eventual relocation via outer symbol. rs := r.Sym @@ -584,7 +580,7 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { rs = rs.Outer } - if rs.Type != ld.SHOSTOBJ && rs.Type != ld.SDYNIMPORT && rs.Sect == nil { + if rs.Type != sym.SHOSTOBJ && rs.Type != sym.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs @@ -594,7 +590,7 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { // the section load address. // we need to compensate that by removing the instruction's address // from addend. - if ld.Headtype == objabi.Hdarwin { + if ctxt.HeadType == objabi.Hdarwin { r.Xadd -= ld.Symaddr(s) + int64(r.Off) } @@ -603,20 +599,19 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { } *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32(r.Xadd/4)))) - return 0 + return true } - return -1 + return false } switch r.Type { case objabi.R_CONST: *val = r.Add - return 0 - + return true case objabi.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return 0 + return true // The following three arch specific relocations are only for generation of // Linux/ARM ELF's PLT entry (3 assembler instruction) @@ -625,18 +620,15 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { ld.Errorf(s, ".got.plt should be placed after .plt section.") } *val = 0xe28fc600 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add)) >> 20)) - return 0 - + return true case objabi.R_PLT1: // add ip, ip, #0xYY000 *val = 0xe28cca00 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+4)) >> 12)) - return 0 - + return true case objabi.R_PLT2: // ldr pc, [ip, #0xZZZ]! *val = 0xe5bcf000 + (0xfff & int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ctxt.Syms.Lookup(".plt", 0))+int64(r.Off))+r.Add+8))) - return 0 - + return true case objabi.R_CALLARM: // bl XXXXXX or b YYYYYY // r.Add is the instruction // low 24-bit encodes the target address @@ -646,40 +638,40 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { } *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&t))) - return 0 + return true } - return -1 + return false } -func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { +func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { log.Fatalf("unexpected relocation variant") return t } -func addpltreloc(ctxt *ld.Link, plt *ld.Symbol, got *ld.Symbol, sym *ld.Symbol, typ objabi.RelocType) *ld.Reloc { - r := ld.Addrel(plt) +func addpltreloc(ctxt *ld.Link, plt *sym.Symbol, got *sym.Symbol, s *sym.Symbol, typ objabi.RelocType) *sym.Reloc { + r := plt.AddRel() r.Sym = got r.Off = int32(plt.Size) r.Siz = 4 r.Type = typ - r.Add = int64(sym.Got) - 8 + r.Add = int64(s.Got) - 8 - plt.Attr |= ld.AttrReachable + plt.Attr |= sym.AttrReachable plt.Size += 4 - ld.Symgrow(plt, plt.Size) + plt.Grow(plt.Size) return r } -func addpltsym(ctxt *ld.Link, s *ld.Symbol) { +func addpltsym(ctxt *ld.Link, s *sym.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) - if ld.Iself { + if ctxt.IsELF { plt := ctxt.Syms.Lookup(".plt", 0) got := ctxt.Syms.Lookup(".got.plt", 0) rel := ctxt.Syms.Lookup(".rel.plt", 0) @@ -693,7 +685,7 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { // In theory, all GOT should point to the first PLT entry, // Linux/ARM's dynamic linker will do that for us, but FreeBSD/ARM's // dynamic linker won't, so we'd better do it ourselves. - ld.Addaddrplus(ctxt, got, plt, 0) + got.AddAddrPlus(ctxt.Arch, plt, 0) // .plt entry, this depends on the .got entry s.Plt = int32(plt.Size) @@ -703,15 +695,15 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { addpltreloc(ctxt, plt, got, s, objabi.R_PLT2) // ldr pc, [lr, #0xZZZ]! // rel - ld.Addaddrplus(ctxt, rel, got, int64(s.Got)) + rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) - ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_JUMP_SLOT)) + rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(s.Dynid), uint32(elf.R_ARM_JUMP_SLOT))) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } } -func addgotsyminternal(ctxt *ld.Link, s *ld.Symbol) { +func addgotsyminternal(ctxt *ld.Link, s *sym.Symbol) { if s.Got >= 0 { return } @@ -719,15 +711,15 @@ func addgotsyminternal(ctxt *ld.Link, s *ld.Symbol) { got := ctxt.Syms.Lookup(".got", 0) s.Got = int32(got.Size) - ld.Addaddrplus(ctxt, got, s, 0) + got.AddAddrPlus(ctxt.Arch, s, 0) - if ld.Iself { + if ctxt.IsELF { } else { ld.Errorf(s, "addgotsyminternal: unsupported binary format") } } -func addgotsym(ctxt *ld.Link, s *ld.Symbol) { +func addgotsym(ctxt *ld.Link, s *sym.Symbol) { if s.Got >= 0 { return } @@ -735,12 +727,12 @@ func addgotsym(ctxt *ld.Link, s *ld.Symbol) { ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) s.Got = int32(got.Size) - ld.Adduint32(ctxt, got, 0) + got.AddUint32(ctxt.Arch, 0) - if ld.Iself { + if ctxt.IsELF { rel := ctxt.Syms.Lookup(".rel", 0) - ld.Addaddrplus(ctxt, rel, got, int64(s.Got)) - ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_GLOB_DAT)) + rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(s.Dynid), uint32(elf.R_ARM_GLOB_DAT))) } else { ld.Errorf(s, "addgotsym: unsupported binary format") } @@ -751,15 +743,15 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f asmb\n", ld.Cputime()) } - if ld.Iself { + if ctxt.IsELF { ld.Asmbelfsetup() } sect := ld.Segtext.Sections[0] - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) for _, sect = range ld.Segtext.Sections[1:] { - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } @@ -767,14 +759,14 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Segrelrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f relrodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrelrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen)) } @@ -782,14 +774,14 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f datblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segdata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) - ld.Cseek(int64(ld.Segdwarf.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) machlink := uint32(0) - if ld.Headtype == objabi.Hdarwin { + if ctxt.HeadType == objabi.Hdarwin { machlink = uint32(ld.Domacholink(ctxt)) } @@ -803,9 +795,9 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f sym\n", ld.Cputime()) } - switch ld.Headtype { + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) } @@ -817,38 +809,35 @@ func asmb(ctxt *ld.Link) { symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink)) } - ld.Cseek(int64(symo)) - switch ld.Headtype { + ctxt.Out.SeekSet(int64(symo)) + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f elfsym\n", ld.Cputime()) } ld.Asmelfsym(ctxt) - ld.Cflush() - ld.Cwrite(ld.Elfstrdat) + ctxt.Out.Flush() + ctxt.Out.Write(ld.Elfstrdat) - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } case objabi.Hplan9: ld.Asmplan9sym(ctxt) - ld.Cflush() + ctxt.Out.Flush() sym := ctxt.Syms.Lookup("pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) - for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(sym.P[i]) - } - - ld.Cflush() + ctxt.Out.Write(sym.P) + ctxt.Out.Flush() } case objabi.Hdarwin: - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Machoemitreloc(ctxt) } } @@ -857,18 +846,18 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f header\n", ld.Cputime()) } - ld.Cseek(0) - switch ld.Headtype { + ctxt.Out.SeekSet(0) + switch ctxt.HeadType { default: case objabi.Hplan9: /* plan 9 */ - ld.Lputb(0x647) /* magic */ - ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */ - ld.Lputb(uint32(ld.Segdata.Filelen)) - ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) - ld.Lputb(uint32(ld.Symsize)) /* nsyms */ - ld.Lputb(uint32(ld.Entryvalue(ctxt))) /* va of entry */ - ld.Lputb(0) - ld.Lputb(uint32(ld.Lcsize)) + ctxt.Out.Write32b(0x647) /* magic */ + ctxt.Out.Write32b(uint32(ld.Segtext.Filelen)) /* sizes */ + ctxt.Out.Write32b(uint32(ld.Segdata.Filelen)) + ctxt.Out.Write32b(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) + ctxt.Out.Write32b(uint32(ld.Symsize)) /* nsyms */ + ctxt.Out.Write32b(uint32(ld.Entryvalue(ctxt))) /* va of entry */ + ctxt.Out.Write32b(0) + ctxt.Out.Write32b(uint32(ld.Lcsize)) case objabi.Hlinux, objabi.Hfreebsd, @@ -881,7 +870,7 @@ func asmb(ctxt *ld.Link) { ld.Asmbmacho(ctxt) } - ld.Cflush() + ctxt.Out.Flush() if *ld.FlagC { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) diff --git a/src/cmd/link/internal/arm/obj.go b/src/cmd/link/internal/arm/obj.go index 2975e5d45a9..da16f923458 100644 --- a/src/cmd/link/internal/arm/obj.go +++ b/src/cmd/link/internal/arm/obj.go @@ -37,44 +37,42 @@ import ( "fmt" ) -func Init() { - ld.SysArch = sys.ArchARM +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchARM - ld.Thearch.Funcalign = funcAlign - ld.Thearch.Maxalign = maxAlign - ld.Thearch.Minalign = minAlign - ld.Thearch.Dwarfregsp = dwarfRegSP - ld.Thearch.Dwarfreglr = dwarfRegLR + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, - ld.Thearch.Adddynrel = adddynrel - ld.Thearch.Archinit = archinit - ld.Thearch.Archreloc = archreloc - ld.Thearch.Archrelocvariant = archrelocvariant - ld.Thearch.Trampoline = trampoline - ld.Thearch.Asmb = asmb - ld.Thearch.Elfreloc1 = elfreloc1 - ld.Thearch.Elfsetupplt = elfsetupplt - ld.Thearch.Gentext = gentext - ld.Thearch.Machoreloc1 = machoreloc1 - ld.Thearch.Lput = ld.Lputl - ld.Thearch.Wput = ld.Wputl - ld.Thearch.Vput = ld.Vputl - ld.Thearch.Append16 = ld.Append16l - ld.Thearch.Append32 = ld.Append32l - ld.Thearch.Append64 = ld.Append64l + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Trampoline: trampoline, + Asmb: asmb, + Elfreloc1: elfreloc1, + Elfsetupplt: elfsetupplt, + Gentext: gentext, + Machoreloc1: machoreloc1, - ld.Thearch.Linuxdynld = "/lib/ld-linux.so.3" // 2 for OABI, 3 for EABI - ld.Thearch.Freebsddynld = "/usr/libexec/ld-elf.so.1" - ld.Thearch.Openbsddynld = "/usr/libexec/ld.so" - ld.Thearch.Netbsddynld = "/libexec/ld.elf_so" - ld.Thearch.Dragonflydynld = "XXX" - ld.Thearch.Solarisdynld = "XXX" + Linuxdynld: "/lib/ld-linux.so.3", // 2 for OABI, 3 for EABI + Freebsddynld: "/usr/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/libexec/ld.elf_so", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + } + + return arch, theArch } func archinit(ctxt *ld.Link) { - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Exitf("unknown -H option: %v", ld.Headtype) + ld.Exitf("unknown -H option: %v", ctxt.HeadType) case objabi.Hplan9: /* plan 9 */ ld.HEADR = 32 @@ -123,7 +121,6 @@ func archinit(ctxt *ld.Link) { case objabi.Hdarwin: /* apple MACH */ *ld.FlagW = true // disable DWARF generation - ld.Machoinit() ld.HEADR = ld.INITIAL_MACHO_HEADR if *ld.FlagTextAddr == -1 { *ld.FlagTextAddr = 4096 + int64(ld.HEADR) diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index 92a87f99f7d..d8245590bea 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -32,7 +32,10 @@ package arm64 import ( "cmd/internal/objabi" + "cmd/internal/sys" "cmd/link/internal/ld" + "cmd/link/internal/sym" + "debug/elf" "encoding/binary" "fmt" "log" @@ -43,18 +46,18 @@ func gentext(ctxt *ld.Link) { return } addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0) - if addmoduledata.Type == ld.STEXT { + if addmoduledata.Type == sym.STEXT { // we're linking a module containing the runtime -> no need for // an init function return } - addmoduledata.Attr |= ld.AttrReachable + addmoduledata.Attr |= sym.AttrReachable initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0) - initfunc.Type = ld.STEXT - initfunc.Attr |= ld.AttrLocal - initfunc.Attr |= ld.AttrReachable + initfunc.Type = sym.STEXT + initfunc.Attr |= sym.AttrLocal + initfunc.Attr |= sym.AttrReachable o := func(op uint32) { - ld.Adduint32(ctxt, initfunc, op) + initfunc.AddUint32(ctxt.Arch, op) } // 0000000000000000 : // 0: 90000000 adrp x0, 0 @@ -63,7 +66,7 @@ func gentext(ctxt *ld.Link) { // 4: R_AARCH64_ADD_ABS_LO12_NC local.moduledata o(0x90000000) o(0x91000000) - rel := ld.Addrel(initfunc) + rel := initfunc.AddRel() rel.Off = 0 rel.Siz = 8 rel.Sym = ctxt.Moduledata @@ -72,7 +75,7 @@ func gentext(ctxt *ld.Link) { // 8: 14000000 bl 0 // 8: R_AARCH64_CALL26 runtime.addmoduledata o(0x14000000) - rel = ld.Addrel(initfunc) + rel = initfunc.AddRel() rel.Off = 8 rel.Siz = 4 rel.Sym = ctxt.Syms.Lookup("runtime.addmoduledata", 0) @@ -80,67 +83,61 @@ func gentext(ctxt *ld.Link) { ctxt.Textp = append(ctxt.Textp, initfunc) initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0) - initarray_entry.Attr |= ld.AttrReachable - initarray_entry.Attr |= ld.AttrLocal - initarray_entry.Type = ld.SINITARR - ld.Addaddr(ctxt, initarray_entry, initfunc) + initarray_entry.Attr |= sym.AttrReachable + initarray_entry.Attr |= sym.AttrLocal + initarray_entry.Type = sym.SINITARR + initarray_entry.AddAddr(ctxt.Arch, initfunc) } -func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { +func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { log.Fatalf("adddynrel not implemented") return false } -func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { - ld.Thearch.Vput(uint64(sectoff)) +func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { + ctxt.Out.Write64(uint64(sectoff)) elfsym := r.Xsym.ElfsymForReloc() switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: switch r.Siz { case 4: - ld.Thearch.Vput(ld.R_AARCH64_ABS32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_AARCH64_ABS32) | uint64(elfsym)<<32) case 8: - ld.Thearch.Vput(ld.R_AARCH64_ABS64 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_AARCH64_ABS64) | uint64(elfsym)<<32) default: - return -1 + return false } - case objabi.R_ADDRARM64: // two relocations: R_AARCH64_ADR_PREL_PG_HI21 and R_AARCH64_ADD_ABS_LO12_NC - ld.Thearch.Vput(ld.R_AARCH64_ADR_PREL_PG_HI21 | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_AARCH64_ADD_ABS_LO12_NC | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_AARCH64_ADR_PREL_PG_HI21) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_AARCH64_ADD_ABS_LO12_NC) | uint64(elfsym)<<32) case objabi.R_ARM64_TLS_LE: - ld.Thearch.Vput(ld.R_AARCH64_TLSLE_MOVW_TPREL_G0 | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSLE_MOVW_TPREL_G0) | uint64(elfsym)<<32) case objabi.R_ARM64_TLS_IE: - ld.Thearch.Vput(ld.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) | uint64(elfsym)<<32) case objabi.R_ARM64_GOTPCREL: - ld.Thearch.Vput(ld.R_AARCH64_ADR_GOT_PAGE | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_AARCH64_LD64_GOT_LO12_NC | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_AARCH64_ADR_GOT_PAGE) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_AARCH64_LD64_GOT_LO12_NC) | uint64(elfsym)<<32) case objabi.R_CALLARM64: if r.Siz != 4 { - return -1 + return false } - ld.Thearch.Vput(ld.R_AARCH64_CALL26 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_AARCH64_CALL26) | uint64(elfsym)<<32) } - ld.Thearch.Vput(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(r.Xadd)) - return 0 + return true } func elfsetupplt(ctxt *ld.Link) { @@ -148,7 +145,7 @@ func elfsetupplt(ctxt *ld.Link) { return } -func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { var v uint32 rs := r.Xsym @@ -156,10 +153,10 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { // ld64 has a bug handling MACHO_ARM64_RELOC_UNSIGNED with !extern relocation. // see cmd/internal/ld/data.go for details. The workaround is that don't use !extern // UNSIGNED relocation at all. - if rs.Type == ld.SHOSTOBJ || r.Type == objabi.R_CALLARM64 || r.Type == objabi.R_ADDRARM64 || r.Type == objabi.R_ADDR { + if rs.Type == sym.SHOSTOBJ || r.Type == objabi.R_CALLARM64 || r.Type == objabi.R_ADDRARM64 || r.Type == objabi.R_ADDR { if rs.Dynid < 0 { - ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) - return -1 + ld.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type) + return false } v = uint32(rs.Dynid) @@ -167,18 +164,16 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { } else { v = uint32(rs.Sect.Extnum) if v == 0 { - ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) - return -1 + ld.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Sect.Name, rs.Type, rs.Type) + return false } } switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: v |= ld.MACHO_ARM64_RELOC_UNSIGNED << 28 - case objabi.R_CALLARM64: if r.Xadd != 0 { ld.Errorf(s, "ld64 doesn't allow BR26 reloc with non-zero addend: %s+%d", rs.Name, r.Xadd) @@ -186,20 +181,19 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_BRANCH26 << 28 - case objabi.R_ADDRARM64: r.Siz = 4 // Two relocation entries: MACHO_ARM64_RELOC_PAGEOFF12 MACHO_ARM64_RELOC_PAGE21 // if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND. if r.Xadd != 0 { - ld.Thearch.Lput(uint32(sectoff + 4)) - ld.Thearch.Lput((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) + out.Write32(uint32(sectoff + 4)) + out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) } - ld.Thearch.Lput(uint32(sectoff + 4)) - ld.Thearch.Lput(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25)) + out.Write32(uint32(sectoff + 4)) + out.Write32(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25)) if r.Xadd != 0 { - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) + out.Write32(uint32(sectoff)) + out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) } v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28 @@ -207,32 +201,27 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { switch r.Siz { default: - return -1 - + return false case 1: v |= 0 << 25 - case 2: v |= 1 << 25 - case 4: v |= 2 << 25 - case 8: v |= 3 << 25 } - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput(v) - return 0 + out.Write32(uint32(sectoff)) + out.Write32(v) + return true } -func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { - if ld.Linkmode == ld.LinkExternal { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { + if ctxt.LinkMode == ld.LinkExternal { switch r.Type { default: - return -1 - + return false case objabi.R_ARM64_GOTPCREL: var o1, o2 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { @@ -250,7 +239,7 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { // (https://sourceware.org/bugzilla/show_bug.cgi?id=18270). So // we convert the adrp; ld64 + R_ARM64_GOTPCREL into adrp; // add + R_ADDRARM64. - if !(r.Sym.Version != 0 || (r.Sym.Type&ld.SHIDDEN != 0) || r.Sym.Attr.Local()) && r.Sym.Type == ld.STEXT && ctxt.DynlinkingGo() { + if !(r.Sym.Version != 0 || r.Sym.Attr.VisibilityHidden() || r.Sym.Attr.Local()) && r.Sym.Type == sym.STEXT && ctxt.DynlinkingGo() { if o2&0xffc00000 != 0xf9400000 { ld.Errorf(s, "R_ARM64_GOTPCREL against unexpected instruction %x", o2) } @@ -263,9 +252,8 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { *val = int64(o2)<<32 | int64(o1) } fallthrough - case objabi.R_ADDRARM64: - r.Done = 0 + r.Done = false // set up addend for eventual relocation via outer symbol. rs := r.Sym @@ -275,7 +263,7 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { rs = rs.Outer } - if rs.Type != ld.SHOSTOBJ && rs.Type != ld.SDYNIMPORT && rs.Sect == nil { + if rs.Type != sym.SHOSTOBJ && rs.Type != sym.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs @@ -285,7 +273,7 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { // the BR26 relocation should be fully resolved at link time. // That is the reason why the next if block is disabled. When the bug in ld64 // is fixed, we can enable this block and also enable duff's device in cmd/7g. - if false && ld.Headtype == objabi.Hdarwin { + if false && ctxt.HeadType == objabi.Hdarwin { var o0, o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { @@ -312,27 +300,24 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { } } - return 0 - + return true case objabi.R_CALLARM64, objabi.R_ARM64_TLS_LE, objabi.R_ARM64_TLS_IE: - r.Done = 0 + r.Done = false r.Xsym = r.Sym r.Xadd = r.Add - return 0 + return true } } switch r.Type { case objabi.R_CONST: *val = r.Add - return 0 - + return true case objabi.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return 0 - + return true case objabi.R_ADDRARM64: t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff) if t >= 1<<32 || t < -1<<32 { @@ -358,35 +343,33 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { } else { *val = int64(o1)<<32 | int64(o0) } - return 0 - + return true case objabi.R_ARM64_TLS_LE: - r.Done = 0 - if ld.Headtype != objabi.Hlinux { - ld.Errorf(s, "TLS reloc on unsupported OS %v", ld.Headtype) + r.Done = false + if ctxt.HeadType != objabi.Hlinux { + ld.Errorf(s, "TLS reloc on unsupported OS %v", ctxt.HeadType) } // The TCB is two pointers. This is not documented anywhere, but is // de facto part of the ABI. - v := r.Sym.Value + int64(2*ld.SysArch.PtrSize) + v := r.Sym.Value + int64(2*ctxt.Arch.PtrSize) if v < 0 || v >= 32678 { ld.Errorf(s, "TLS offset out of range %d", v) } *val |= v << 5 - return 0 - + return true case objabi.R_CALLARM64: t := (ld.Symaddr(r.Sym) + r.Add) - (s.Value + int64(r.Off)) if t >= 1<<27 || t < -1<<27 { ld.Errorf(s, "program too large, call relocation distance = %d", t) } *val |= (t >> 2) & 0x03ffffff - return 0 + return true } - return -1 + return false } -func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { +func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { log.Fatalf("unexpected relocation variant") return -1 } @@ -396,15 +379,15 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f asmb\n", ld.Cputime()) } - if ld.Iself { + if ctxt.IsELF { ld.Asmbelfsetup() } sect := ld.Segtext.Sections[0] - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) for _, sect = range ld.Segtext.Sections[1:] { - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } @@ -412,14 +395,14 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Segrelrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f relrodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrelrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen)) } @@ -427,14 +410,14 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f datblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segdata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) - ld.Cseek(int64(ld.Segdwarf.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) machlink := uint32(0) - if ld.Headtype == objabi.Hdarwin { + if ctxt.HeadType == objabi.Hdarwin { machlink = uint32(ld.Domacholink(ctxt)) } @@ -448,9 +431,9 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f sym\n", ld.Cputime()) } - switch ld.Headtype { + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) } @@ -462,38 +445,35 @@ func asmb(ctxt *ld.Link) { symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink)) } - ld.Cseek(int64(symo)) - switch ld.Headtype { + ctxt.Out.SeekSet(int64(symo)) + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f elfsym\n", ld.Cputime()) } ld.Asmelfsym(ctxt) - ld.Cflush() - ld.Cwrite(ld.Elfstrdat) + ctxt.Out.Flush() + ctxt.Out.Write(ld.Elfstrdat) - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } case objabi.Hplan9: ld.Asmplan9sym(ctxt) - ld.Cflush() + ctxt.Out.Flush() sym := ctxt.Syms.Lookup("pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) - for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(sym.P[i]) - } - - ld.Cflush() + ctxt.Out.Write(sym.P) + ctxt.Out.Flush() } case objabi.Hdarwin: - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Machoemitreloc(ctxt) } } @@ -502,18 +482,18 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f header\n", ld.Cputime()) } - ld.Cseek(0) - switch ld.Headtype { + ctxt.Out.SeekSet(0) + switch ctxt.HeadType { default: case objabi.Hplan9: /* plan 9 */ - ld.Thearch.Lput(0x647) /* magic */ - ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */ - ld.Thearch.Lput(uint32(ld.Segdata.Filelen)) - ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) - ld.Thearch.Lput(uint32(ld.Symsize)) /* nsyms */ - ld.Thearch.Lput(uint32(ld.Entryvalue(ctxt))) /* va of entry */ - ld.Thearch.Lput(0) - ld.Thearch.Lput(uint32(ld.Lcsize)) + ctxt.Out.Write32(0x647) /* magic */ + ctxt.Out.Write32(uint32(ld.Segtext.Filelen)) /* sizes */ + ctxt.Out.Write32(uint32(ld.Segdata.Filelen)) + ctxt.Out.Write32(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) + ctxt.Out.Write32(uint32(ld.Symsize)) /* nsyms */ + ctxt.Out.Write32(uint32(ld.Entryvalue(ctxt))) /* va of entry */ + ctxt.Out.Write32(0) + ctxt.Out.Write32(uint32(ld.Lcsize)) case objabi.Hlinux, objabi.Hfreebsd, @@ -526,7 +506,7 @@ func asmb(ctxt *ld.Link) { ld.Asmbmacho(ctxt) } - ld.Cflush() + ctxt.Out.Flush() if *ld.FlagC { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) diff --git a/src/cmd/link/internal/arm64/obj.go b/src/cmd/link/internal/arm64/obj.go index dce9beb1509..6b386ad7379 100644 --- a/src/cmd/link/internal/arm64/obj.go +++ b/src/cmd/link/internal/arm64/obj.go @@ -37,44 +37,42 @@ import ( "fmt" ) -func Init() { - ld.SysArch = sys.ArchARM64 +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchARM64 - ld.Thearch.Funcalign = funcAlign - ld.Thearch.Maxalign = maxAlign - ld.Thearch.Minalign = minAlign - ld.Thearch.Dwarfregsp = dwarfRegSP - ld.Thearch.Dwarfreglr = dwarfRegLR + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, - ld.Thearch.Adddynrel = adddynrel - ld.Thearch.Archinit = archinit - ld.Thearch.Archreloc = archreloc - ld.Thearch.Archrelocvariant = archrelocvariant - ld.Thearch.Asmb = asmb - ld.Thearch.Elfreloc1 = elfreloc1 - ld.Thearch.Elfsetupplt = elfsetupplt - ld.Thearch.Gentext = gentext - ld.Thearch.Machoreloc1 = machoreloc1 - ld.Thearch.Lput = ld.Lputl - ld.Thearch.Wput = ld.Wputl - ld.Thearch.Vput = ld.Vputl - ld.Thearch.Append16 = ld.Append16l - ld.Thearch.Append32 = ld.Append32l - ld.Thearch.Append64 = ld.Append64l + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Asmb: asmb, + Elfreloc1: elfreloc1, + Elfsetupplt: elfsetupplt, + Gentext: gentext, + Machoreloc1: machoreloc1, - ld.Thearch.Linuxdynld = "/lib/ld-linux-aarch64.so.1" + Linuxdynld: "/lib/ld-linux-aarch64.so.1", - ld.Thearch.Freebsddynld = "XXX" - ld.Thearch.Openbsddynld = "XXX" - ld.Thearch.Netbsddynld = "XXX" - ld.Thearch.Dragonflydynld = "XXX" - ld.Thearch.Solarisdynld = "XXX" + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + } + + return arch, theArch } func archinit(ctxt *ld.Link) { - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Exitf("unknown -H option: %v", ld.Headtype) + ld.Exitf("unknown -H option: %v", ctxt.HeadType) case objabi.Hplan9: /* plan 9 */ ld.HEADR = 32 @@ -104,7 +102,6 @@ func archinit(ctxt *ld.Link) { case objabi.Hdarwin: /* apple MACH */ *ld.FlagW = true // disable DWARF generation - ld.Machoinit() ld.HEADR = ld.INITIAL_MACHO_HEADR if *ld.FlagTextAddr == -1 { *ld.FlagTextAddr = 4096 + int64(ld.HEADR) diff --git a/src/cmd/link/internal/ld/ar.go b/src/cmd/link/internal/ld/ar.go index 8827b76aedf..4b513041a37 100644 --- a/src/cmd/link/internal/ld/ar.go +++ b/src/cmd/link/internal/ld/ar.go @@ -33,6 +33,7 @@ package ld import ( "cmd/internal/bio" "cmd/internal/objabi" + "cmd/link/internal/sym" "encoding/binary" "fmt" "io" @@ -82,6 +83,10 @@ func hostArchive(ctxt *Link, name string) { Exitf("file %s too short", name) } + if string(magbuf[:]) != ARMAG { + Exitf("%s is not an archive file", name) + } + var arhdr ArHdr l := nextar(f, f.Offset(), &arhdr) if l <= 0 { @@ -101,7 +106,7 @@ func hostArchive(ctxt *Link, name string) { var load []uint64 for _, s := range ctxt.Syms.Allsym { for _, r := range s.R { - if r.Sym != nil && r.Sym.Type&SMASK == SXREF { + if r.Sym != nil && r.Sym.Type == sym.SXREF { if off := armap[r.Sym.Name]; off != 0 && !loaded[off] { load = append(load, off) loaded[off] = true @@ -118,7 +123,7 @@ func hostArchive(ctxt *Link, name string) { pname := fmt.Sprintf("%s(%s)", name, arhdr.name) l = atolwhex(arhdr.size) - libgcc := Library{Pkg: "libgcc"} + libgcc := sym.Library{Pkg: "libgcc"} h := ldobj(ctxt, f, &libgcc, l, pname, name, ArchiveObj) f.Seek(h.off, 0) h.ld(ctxt, f, h.pkg, h.length, h.pn) diff --git a/src/cmd/link/internal/ld/config.go b/src/cmd/link/internal/ld/config.go index 342351b4bca..cc95392d772 100644 --- a/src/cmd/link/internal/ld/config.go +++ b/src/cmd/link/internal/ld/config.go @@ -11,11 +11,6 @@ import ( "log" ) -var ( - Linkmode LinkMode - Buildmode BuildMode -) - // A BuildMode indicates the sort of object we are building. // // Possible build modes are the same as those for the -buildmode flag @@ -23,13 +18,13 @@ var ( type BuildMode uint8 const ( - BuildmodeUnset BuildMode = iota - BuildmodeExe - BuildmodePIE - BuildmodeCArchive - BuildmodeCShared - BuildmodeShared - BuildmodePlugin + BuildModeUnset BuildMode = iota + BuildModeExe + BuildModePIE + BuildModeCArchive + BuildModeCShared + BuildModeShared + BuildModePlugin ) func (mode *BuildMode) Set(s string) error { @@ -40,14 +35,20 @@ func (mode *BuildMode) Set(s string) error { default: return fmt.Errorf("invalid buildmode: %q", s) case "exe": - *mode = BuildmodeExe + *mode = BuildModeExe case "pie": switch objabi.GOOS { case "android", "linux": + case "darwin": + switch objabi.GOARCH { + case "amd64": + default: + return badmode() + } default: return badmode() } - *mode = BuildmodePIE + *mode = BuildModePIE case "c-archive": switch objabi.GOOS { case "darwin", "linux": @@ -60,14 +61,14 @@ func (mode *BuildMode) Set(s string) error { default: return badmode() } - *mode = BuildmodeCArchive + *mode = BuildModeCArchive case "c-shared": switch objabi.GOARCH { - case "386", "amd64", "arm", "arm64": + case "386", "amd64", "arm", "arm64", "ppc64le", "s390x": default: return badmode() } - *mode = BuildmodeCShared + *mode = BuildModeCShared case "shared": switch objabi.GOOS { case "linux": @@ -79,12 +80,12 @@ func (mode *BuildMode) Set(s string) error { default: return badmode() } - *mode = BuildmodeShared + *mode = BuildModeShared case "plugin": switch objabi.GOOS { case "linux": switch objabi.GOARCH { - case "386", "amd64", "arm", "arm64", "s390x": + case "386", "amd64", "arm", "arm64", "s390x", "ppc64le": default: return badmode() } @@ -97,26 +98,26 @@ func (mode *BuildMode) Set(s string) error { default: return badmode() } - *mode = BuildmodePlugin + *mode = BuildModePlugin } return nil } func (mode *BuildMode) String() string { switch *mode { - case BuildmodeUnset: + case BuildModeUnset: return "" // avoid showing a default in usage message - case BuildmodeExe: + case BuildModeExe: return "exe" - case BuildmodePIE: + case BuildModePIE: return "pie" - case BuildmodeCArchive: + case BuildModeCArchive: return "c-archive" - case BuildmodeCShared: + case BuildModeCShared: return "c-shared" - case BuildmodeShared: + case BuildModeShared: return "shared" - case BuildmodePlugin: + case BuildModePlugin: return "plugin" } return fmt.Sprintf("BuildMode(%d)", uint8(*mode)) @@ -172,7 +173,7 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { case "android": return true, "android" case "darwin": - if SysArch.InFamily(sys.ARM, sys.ARM64) { + if ctxt.Arch.InFamily(sys.ARM, sys.ARM64) { return true, "iOS" } } @@ -184,42 +185,43 @@ func mustLinkExternal(ctxt *Link) (res bool, reason string) { // Internally linking cgo is incomplete on some architectures. // https://golang.org/issue/10373 // https://golang.org/issue/14449 - if iscgo && SysArch.InFamily(sys.ARM64, sys.MIPS64, sys.MIPS) { + // https://golang.org/issue/21961 + if iscgo && ctxt.Arch.InFamily(sys.ARM64, sys.MIPS64, sys.MIPS, sys.PPC64) { return true, objabi.GOARCH + " does not support internal cgo" } // Some build modes require work the internal linker cannot do (yet). - switch Buildmode { - case BuildmodeCArchive: + switch ctxt.BuildMode { + case BuildModeCArchive: return true, "buildmode=c-archive" - case BuildmodeCShared: + case BuildModeCShared: return true, "buildmode=c-shared" - case BuildmodePIE: + case BuildModePIE: switch objabi.GOOS + "/" + objabi.GOARCH { case "linux/amd64": default: // Internal linking does not support TLS_IE. return true, "buildmode=pie" } - case BuildmodePlugin: + case BuildModePlugin: return true, "buildmode=plugin" - case BuildmodeShared: + case BuildModeShared: return true, "buildmode=shared" } - if *FlagLinkshared { + if ctxt.linkShared { return true, "dynamically linking with a shared library" } return false, "" } -// determineLinkMode sets Linkmode. +// determineLinkMode sets ctxt.LinkMode. // // It is called after flags are processed and inputs are processed, -// so the Linkmode variable has an initial value from the -linkmode +// so the ctxt.LinkMode variable has an initial value from the -linkmode // flag and the iscgo externalobj variables are set. func determineLinkMode(ctxt *Link) { - switch Linkmode { + switch ctxt.LinkMode { case LinkAuto: // The environment variable GO_EXTLINK_ENABLED controls the // default value of -linkmode. If it is not set when the @@ -230,18 +232,18 @@ func determineLinkMode(ctxt *Link) { if needed, reason := mustLinkExternal(ctxt); needed { Exitf("internal linking requested via GO_EXTLINK_ENABLED, but external linking required: %s", reason) } - Linkmode = LinkInternal + ctxt.LinkMode = LinkInternal case "1": - Linkmode = LinkExternal + ctxt.LinkMode = LinkExternal default: if needed, _ := mustLinkExternal(ctxt); needed { - Linkmode = LinkExternal + ctxt.LinkMode = LinkExternal } else if iscgo && externalobj { - Linkmode = LinkExternal - } else if Buildmode == BuildmodePIE { - Linkmode = LinkExternal // https://golang.org/issue/18968 + ctxt.LinkMode = LinkExternal + } else if ctxt.BuildMode == BuildModePIE { + ctxt.LinkMode = LinkExternal // https://golang.org/issue/18968 } else { - Linkmode = LinkInternal + ctxt.LinkMode = LinkInternal } } case LinkInternal: diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 3c92e263009..e0541c435bd 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -35,6 +35,7 @@ import ( "cmd/internal/gcprog" "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "fmt" "log" "os" @@ -44,280 +45,6 @@ import ( "sync" ) -func Symgrow(s *Symbol, siz int64) { - if int64(int(siz)) != siz { - log.Fatalf("symgrow size %d too long", siz) - } - if int64(len(s.P)) >= siz { - return - } - if cap(s.P) < int(siz) { - p := make([]byte, 2*(siz+1)) - s.P = append(p[:0], s.P...) - } - s.P = s.P[:siz] -} - -func Addrel(s *Symbol) *Reloc { - s.R = append(s.R, Reloc{}) - return &s.R[len(s.R)-1] -} - -func setuintxx(ctxt *Link, s *Symbol, off int64, v uint64, wid int64) int64 { - if s.Type == 0 { - s.Type = SDATA - } - s.Attr |= AttrReachable - if s.Size < off+wid { - s.Size = off + wid - Symgrow(s, s.Size) - } - - switch wid { - case 1: - s.P[off] = uint8(v) - case 2: - ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(v)) - case 4: - ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(v)) - case 8: - ctxt.Arch.ByteOrder.PutUint64(s.P[off:], v) - } - - return off + wid -} - -func Addbytes(s *Symbol, bytes []byte) int64 { - if s.Type == 0 { - s.Type = SDATA - } - s.Attr |= AttrReachable - s.P = append(s.P, bytes...) - s.Size = int64(len(s.P)) - - return s.Size -} - -func adduintxx(ctxt *Link, s *Symbol, v uint64, wid int) int64 { - off := s.Size - setuintxx(ctxt, s, off, v, int64(wid)) - return off -} - -func Adduint8(ctxt *Link, s *Symbol, v uint8) int64 { - off := s.Size - if s.Type == 0 { - s.Type = SDATA - } - s.Attr |= AttrReachable - s.Size++ - s.P = append(s.P, v) - - return off -} - -func Adduint16(ctxt *Link, s *Symbol, v uint16) int64 { - return adduintxx(ctxt, s, uint64(v), 2) -} - -func Adduint32(ctxt *Link, s *Symbol, v uint32) int64 { - return adduintxx(ctxt, s, uint64(v), 4) -} - -func Adduint64(ctxt *Link, s *Symbol, v uint64) int64 { - return adduintxx(ctxt, s, v, 8) -} - -func adduint(ctxt *Link, s *Symbol, v uint64) int64 { - return adduintxx(ctxt, s, v, SysArch.PtrSize) -} - -func setuint8(ctxt *Link, s *Symbol, r int64, v uint8) int64 { - return setuintxx(ctxt, s, r, uint64(v), 1) -} - -func setuint32(ctxt *Link, s *Symbol, r int64, v uint32) int64 { - return setuintxx(ctxt, s, r, uint64(v), 4) -} - -func setuint(ctxt *Link, s *Symbol, r int64, v uint64) int64 { - return setuintxx(ctxt, s, r, v, int64(SysArch.PtrSize)) -} - -func Addaddrplus(ctxt *Link, s *Symbol, t *Symbol, add int64) int64 { - if s.Type == 0 { - s.Type = SDATA - } - s.Attr |= AttrReachable - i := s.Size - s.Size += int64(ctxt.Arch.PtrSize) - Symgrow(s, s.Size) - r := Addrel(s) - r.Sym = t - r.Off = int32(i) - r.Siz = uint8(ctxt.Arch.PtrSize) - r.Type = objabi.R_ADDR - r.Add = add - return i + int64(r.Siz) -} - -func Addpcrelplus(ctxt *Link, s *Symbol, t *Symbol, add int64) int64 { - if s.Type == 0 { - s.Type = SDATA - } - s.Attr |= AttrReachable - i := s.Size - s.Size += 4 - Symgrow(s, s.Size) - r := Addrel(s) - r.Sym = t - r.Off = int32(i) - r.Add = add - r.Type = objabi.R_PCREL - r.Siz = 4 - if SysArch.Family == sys.S390X { - r.Variant = RV_390_DBL - } - return i + int64(r.Siz) -} - -func Addaddr(ctxt *Link, s *Symbol, t *Symbol) int64 { - return Addaddrplus(ctxt, s, t, 0) -} - -func setaddrplus(ctxt *Link, s *Symbol, off int64, t *Symbol, add int64) int64 { - if s.Type == 0 { - s.Type = SDATA - } - s.Attr |= AttrReachable - if off+int64(ctxt.Arch.PtrSize) > s.Size { - s.Size = off + int64(ctxt.Arch.PtrSize) - Symgrow(s, s.Size) - } - - r := Addrel(s) - r.Sym = t - r.Off = int32(off) - r.Siz = uint8(ctxt.Arch.PtrSize) - r.Type = objabi.R_ADDR - r.Add = add - return off + int64(r.Siz) -} - -func setaddr(ctxt *Link, s *Symbol, off int64, t *Symbol) int64 { - return setaddrplus(ctxt, s, off, t, 0) -} - -func addsize(ctxt *Link, s *Symbol, t *Symbol) int64 { - if s.Type == 0 { - s.Type = SDATA - } - s.Attr |= AttrReachable - i := s.Size - s.Size += int64(ctxt.Arch.PtrSize) - Symgrow(s, s.Size) - r := Addrel(s) - r.Sym = t - r.Off = int32(i) - r.Siz = uint8(ctxt.Arch.PtrSize) - r.Type = objabi.R_SIZE - return i + int64(r.Siz) -} - -func addaddrplus4(ctxt *Link, s *Symbol, t *Symbol, add int64) int64 { - if s.Type == 0 { - s.Type = SDATA - } - s.Attr |= AttrReachable - i := s.Size - s.Size += 4 - Symgrow(s, s.Size) - r := Addrel(s) - r.Sym = t - r.Off = int32(i) - r.Siz = 4 - r.Type = objabi.R_ADDR - r.Add = add - return i + int64(r.Siz) -} - -/* - * divide-and-conquer list-link (by Sub) sort of Symbol* by Value. - * Used for sub-symbols when loading host objects (see e.g. ldelf.go). - */ - -func listsort(l *Symbol) *Symbol { - if l == nil || l.Sub == nil { - return l - } - - l1 := l - l2 := l - for { - l2 = l2.Sub - if l2 == nil { - break - } - l2 = l2.Sub - if l2 == nil { - break - } - l1 = l1.Sub - } - - l2 = l1.Sub - l1.Sub = nil - l1 = listsort(l) - l2 = listsort(l2) - - /* set up lead element */ - if l1.Value < l2.Value { - l = l1 - l1 = l1.Sub - } else { - l = l2 - l2 = l2.Sub - } - - le := l - - for { - if l1 == nil { - for l2 != nil { - le.Sub = l2 - le = l2 - l2 = l2.Sub - } - - le.Sub = nil - break - } - - if l2 == nil { - for l1 != nil { - le.Sub = l1 - le = l1 - l1 = l1.Sub - } - - break - } - - if l1.Value < l2.Value { - le.Sub = l1 - le = l1 - l1 = l1.Sub - } else { - le.Sub = l2 - le = l2 - l2 = l2.Sub - } - } - - le.Sub = nil - return l -} - // isRuntimeDepPkg returns whether pkg is the runtime package or its dependency func isRuntimeDepPkg(pkg string) bool { switch pkg { @@ -331,7 +58,7 @@ func isRuntimeDepPkg(pkg string) bool { // Estimate the max size needed to hold any new trampolines created for this function. This // is used to determine when the section can be split if it becomes too large, to ensure that // the trampolines are in the same section as the function that uses them. -func maxSizeTrampolinesPPC64(s *Symbol, isTramp bool) uint64 { +func maxSizeTrampolinesPPC64(s *sym.Symbol, isTramp bool) uint64 { // If Thearch.Trampoline is nil, then trampoline support is not available on this arch. // A trampoline does not need any dependent trampolines. if Thearch.Trampoline == nil || isTramp { @@ -353,7 +80,7 @@ func maxSizeTrampolinesPPC64(s *Symbol, isTramp bool) uint64 { // ARM, PPC64 & PPC64LE support trampoline insertion for internal and external linking // On PPC64 & PPC64LE the text sections might be split but will still insert trampolines // where necessary. -func trampoline(ctxt *Link, s *Symbol) { +func trampoline(ctxt *Link, s *sym.Symbol) { if Thearch.Trampoline == nil { return // no need or no support of trampolines on this arch } @@ -363,7 +90,7 @@ func trampoline(ctxt *Link, s *Symbol) { if !r.Type.IsDirectJump() { continue } - if Symaddr(r.Sym) == 0 && r.Sym.Type != SDYNIMPORT { + if Symaddr(r.Sym) == 0 && r.Sym.Type != sym.SDYNIMPORT { if r.Sym.File != s.File { if !isRuntimeDepPkg(s.File) || !isRuntimeDepPkg(r.Sym.File) { Errorf(s, "unresolved inter-package jump to %s(%s)", r.Sym, r.Sym.File) @@ -380,21 +107,16 @@ func trampoline(ctxt *Link, s *Symbol) { } // resolve relocations in s. -func relocsym(ctxt *Link, s *Symbol) { - var r *Reloc - var rs *Symbol - var i16 int16 - var off int32 - var siz int32 - var fl int32 - var o int64 - +func relocsym(ctxt *Link, s *sym.Symbol) { for ri := int32(0); ri < int32(len(s.R)); ri++ { - r = &s.R[ri] - - r.Done = 1 - off = r.Off - siz = int32(r.Siz) + r := &s.R[ri] + if r.Done { + // Relocation already processed by an earlier phase. + continue + } + r.Done = true + off := r.Off + siz := int32(r.Siz) if off < 0 || off+siz > int32(len(s.P)) { rname := "" if r.Sym != nil { @@ -404,12 +126,12 @@ func relocsym(ctxt *Link, s *Symbol) { continue } - if r.Sym != nil && (r.Sym.Type&(SMASK|SHIDDEN) == 0 || r.Sym.Type&SMASK == SXREF) { + if r.Sym != nil && ((r.Sym.Type == 0 && !r.Sym.Attr.VisibilityHidden()) || r.Sym.Type == sym.SXREF) { // When putting the runtime but not main into a shared library // these symbols are undefined and that's OK. - if Buildmode == BuildmodeShared { + if ctxt.BuildMode == BuildModeShared { if r.Sym.Name == "main.main" || r.Sym.Name == "main.init" { - r.Sym.Type = SDYNIMPORT + r.Sym.Type = sym.SDYNIMPORT } else if strings.HasPrefix(r.Sym.Name, "go.info.") { // Skip go.info symbols. They are only needed to communicate // DWARF info between the compiler and linker. @@ -427,29 +149,36 @@ func relocsym(ctxt *Link, s *Symbol) { if r.Siz == 0 { // informational relocation - no work to do continue } + if r.Type == objabi.R_DWARFFILEREF { + // These should have been processed previously during + // line table writing. + Errorf(s, "orphan R_DWARFFILEREF reloc to %v", r.Sym.Name) + continue + } // We need to be able to reference dynimport symbols when linking against // shared libraries, and Solaris needs it always - if Headtype != objabi.Hsolaris && r.Sym != nil && r.Sym.Type == SDYNIMPORT && !ctxt.DynlinkingGo() { - if !(SysArch.Family == sys.PPC64 && Linkmode == LinkExternal && r.Sym.Name == ".TOC.") { - Errorf(s, "unhandled relocation for %s (type %d rtype %d)", r.Sym.Name, r.Sym.Type, r.Type) + if ctxt.HeadType != objabi.Hsolaris && r.Sym != nil && r.Sym.Type == sym.SDYNIMPORT && !ctxt.DynlinkingGo() && !r.Sym.Attr.SubSymbol() { + if !(ctxt.Arch.Family == sys.PPC64 && ctxt.LinkMode == LinkExternal && r.Sym.Name == ".TOC.") { + Errorf(s, "unhandled relocation for %s (type %d (%s) rtype %d (%s))", r.Sym.Name, r.Sym.Type, r.Sym.Type, r.Type, sym.RelocName(ctxt.Arch, r.Type)) } } - if r.Sym != nil && r.Sym.Type != STLSBSS && r.Type != objabi.R_WEAKADDROFF && !r.Sym.Attr.Reachable() { + if r.Sym != nil && r.Sym.Type != sym.STLSBSS && r.Type != objabi.R_WEAKADDROFF && !r.Sym.Attr.Reachable() { Errorf(s, "unreachable sym in relocation: %s", r.Sym.Name) } // TODO(mundaym): remove this special case - see issue 14218. - if SysArch.Family == sys.S390X { + if ctxt.Arch.Family == sys.S390X { switch r.Type { case objabi.R_PCRELDBL: r.Type = objabi.R_PCREL - r.Variant = RV_390_DBL + r.Variant = sym.RV_390_DBL case objabi.R_CALL: - r.Variant = RV_390_DBL + r.Variant = sym.RV_390_DBL } } + var o int64 switch r.Type { default: switch siz { @@ -464,28 +193,27 @@ func relocsym(ctxt *Link, s *Symbol) { case 8: o = int64(ctxt.Arch.ByteOrder.Uint64(s.P[off:])) } - if Thearch.Archreloc(ctxt, r, s, &o) < 0 { - Errorf(s, "unknown reloc to %v: %v", r.Sym.Name, r.Type) + if !Thearch.Archreloc(ctxt, r, s, &o) { + Errorf(s, "unknown reloc to %v: %d (%s)", r.Sym.Name, r.Type, sym.RelocName(ctxt.Arch, r.Type)) } - case objabi.R_TLS_LE: - isAndroidX86 := objabi.GOOS == "android" && (SysArch.InFamily(sys.AMD64, sys.I386)) + isAndroidX86 := objabi.GOOS == "android" && (ctxt.Arch.InFamily(sys.AMD64, sys.I386)) - if Linkmode == LinkExternal && Iself && !isAndroidX86 { - r.Done = 0 + if ctxt.LinkMode == LinkExternal && ctxt.IsELF && !isAndroidX86 { + r.Done = false if r.Sym == nil { r.Sym = ctxt.Tlsg } r.Xsym = r.Sym r.Xadd = r.Add o = 0 - if SysArch.Family != sys.AMD64 { + if ctxt.Arch.Family != sys.AMD64 { o = r.Add } break } - if Iself && SysArch.Family == sys.ARM { + if ctxt.IsELF && ctxt.Arch.Family == sys.ARM { // On ELF ARM, the thread pointer is 8 bytes before // the start of the thread-local data block, so add 8 // to the actual TLS offset (r->sym->value). @@ -494,51 +222,49 @@ func relocsym(ctxt *Link, s *Symbol) { // related to the fact that our own TLS storage happens // to take up 8 bytes. o = 8 + r.Sym.Value - } else if Iself || Headtype == objabi.Hplan9 || Headtype == objabi.Hdarwin || isAndroidX86 { + } else if ctxt.IsELF || ctxt.HeadType == objabi.Hplan9 || ctxt.HeadType == objabi.Hdarwin || isAndroidX86 { o = int64(ctxt.Tlsoffset) + r.Add - } else if Headtype == objabi.Hwindows { + } else if ctxt.HeadType == objabi.Hwindows { o = r.Add } else { - log.Fatalf("unexpected R_TLS_LE relocation for %v", Headtype) + log.Fatalf("unexpected R_TLS_LE relocation for %v", ctxt.HeadType) } - case objabi.R_TLS_IE: - isAndroidX86 := objabi.GOOS == "android" && (SysArch.InFamily(sys.AMD64, sys.I386)) + isAndroidX86 := objabi.GOOS == "android" && (ctxt.Arch.InFamily(sys.AMD64, sys.I386)) - if Linkmode == LinkExternal && Iself && !isAndroidX86 { - r.Done = 0 + if ctxt.LinkMode == LinkExternal && ctxt.IsELF && !isAndroidX86 { + r.Done = false if r.Sym == nil { r.Sym = ctxt.Tlsg } r.Xsym = r.Sym r.Xadd = r.Add o = 0 - if SysArch.Family != sys.AMD64 { + if ctxt.Arch.Family != sys.AMD64 { o = r.Add } break } - if Buildmode == BuildmodePIE && Iself { + if ctxt.BuildMode == BuildModePIE && ctxt.IsELF { // We are linking the final executable, so we // can optimize any TLS IE relocation to LE. if Thearch.TLSIEtoLE == nil { - log.Fatalf("internal linking of TLS IE not supported on %v", SysArch.Family) + log.Fatalf("internal linking of TLS IE not supported on %v", ctxt.Arch.Family) } Thearch.TLSIEtoLE(s, int(off), int(r.Siz)) o = int64(ctxt.Tlsoffset) - // TODO: o += r.Add when SysArch.Family != sys.AMD64? + // TODO: o += r.Add when ctxt.Arch.Family != sys.AMD64? // Why do we treat r.Add differently on AMD64? // Is the external linker using Xadd at all? } else { log.Fatalf("cannot handle R_TLS_IE (sym %s) when linking internally", s.Name) } - case objabi.R_ADDR: - if Linkmode == LinkExternal && r.Sym.Type != SCONST { - r.Done = 0 + if ctxt.LinkMode == LinkExternal && r.Sym.Type != sym.SCONST { + r.Done = false // set up addend for eventual relocation via outer symbol. - rs = r.Sym + rs := r.Sym r.Xadd = r.Add for rs.Outer != nil { @@ -546,34 +272,34 @@ func relocsym(ctxt *Link, s *Symbol) { rs = rs.Outer } - if rs.Type != SHOSTOBJ && rs.Type != SDYNIMPORT && rs.Sect == nil { + if rs.Type != sym.SHOSTOBJ && rs.Type != sym.SDYNIMPORT && rs.Sect == nil { Errorf(s, "missing section for relocation target %s", rs.Name) } r.Xsym = rs o = r.Xadd - if Iself { - if SysArch.Family == sys.AMD64 { + if ctxt.IsELF { + if ctxt.Arch.Family == sys.AMD64 { o = 0 } - } else if Headtype == objabi.Hdarwin { + } else if ctxt.HeadType == objabi.Hdarwin { // ld64 for arm64 has a bug where if the address pointed to by o exists in the // symbol table (dynid >= 0), or is inside a symbol that exists in the symbol // table, then it will add o twice into the relocated value. // The workaround is that on arm64 don't ever add symaddr to o and always use // extern relocation by requiring rs->dynid >= 0. - if rs.Type != SHOSTOBJ { - if SysArch.Family == sys.ARM64 && rs.Dynid < 0 { + if rs.Type != sym.SHOSTOBJ { + if ctxt.Arch.Family == sys.ARM64 && rs.Dynid < 0 { Errorf(s, "R_ADDR reloc to %s+%d is not supported on darwin/arm64", rs.Name, o) } - if SysArch.Family != sys.ARM64 { + if ctxt.Arch.Family != sys.ARM64 { o += Symaddr(rs) } } - } else if Headtype == objabi.Hwindows { + } else if ctxt.HeadType == objabi.Hwindows { // nothing to do } else { - Errorf(s, "unhandled pcrel relocation to %s on %v", rs.Name, Headtype) + Errorf(s, "unhandled pcrel relocation to %s on %v", rs.Name, ctxt.HeadType) } break @@ -586,47 +312,46 @@ func relocsym(ctxt *Link, s *Symbol) { // fail at runtime. See https://golang.org/issue/7980. // Instead of special casing only amd64, we treat this as an error on all // 64-bit architectures so as to be future-proof. - if int32(o) < 0 && SysArch.PtrSize > 4 && siz == 4 { + if int32(o) < 0 && ctxt.Arch.PtrSize > 4 && siz == 4 { Errorf(s, "non-pc-relative relocation address for %s is too big: %#x (%#x + %#x)", r.Sym.Name, uint64(o), Symaddr(r.Sym), r.Add) errorexit() } - - case objabi.R_DWARFREF: - var sectName string - var vaddr int64 - switch { - case r.Sym.Sect != nil: - sectName = r.Sym.Sect.Name - vaddr = int64(r.Sym.Sect.Vaddr) - case r.Sym.Type == SDWARFRANGE: - sectName = ".debug_ranges" - default: + case objabi.R_DWARFSECREF: + if r.Sym.Sect == nil { Errorf(s, "missing DWARF section for relocation target %s", r.Sym.Name) } - if Linkmode == LinkExternal { - r.Done = 0 + if ctxt.LinkMode == LinkExternal { + r.Done = false + + // On most platforms, the external linker needs to adjust DWARF references + // as it combines DWARF sections. However, on Darwin, dsymutil does the + // DWARF linking, and it understands how to follow section offsets. + // Leaving in the relocation records confuses it (see + // https://golang.org/issue/22068) so drop them for Darwin. + if ctxt.HeadType == objabi.Hdarwin { + r.Done = true + } + // PE code emits IMAGE_REL_I386_SECREL and IMAGE_REL_AMD64_SECREL - // for R_DWARFREF relocations, while R_ADDR is replaced with + // for R_DWARFSECREF relocations, while R_ADDR is replaced with // IMAGE_REL_I386_DIR32, IMAGE_REL_AMD64_ADDR64 and IMAGE_REL_AMD64_ADDR32. - // Do not replace R_DWARFREF with R_ADDR for windows - + // Do not replace R_DWARFSECREF with R_ADDR for windows - // let PE code emit correct relocations. - if Headtype != objabi.Hwindows { + if ctxt.HeadType != objabi.Hwindows { r.Type = objabi.R_ADDR } - r.Xsym = ctxt.Syms.ROLookup(sectName, 0) - r.Xadd = r.Add + Symaddr(r.Sym) - vaddr + r.Xsym = ctxt.Syms.ROLookup(r.Sym.Sect.Name, 0) + r.Xadd = r.Add + Symaddr(r.Sym) - int64(r.Sym.Sect.Vaddr) o = r.Xadd - rs = r.Xsym - if Iself && SysArch.Family == sys.AMD64 { + if ctxt.IsELF && ctxt.Arch.Family == sys.AMD64 { o = 0 } break } - o = Symaddr(r.Sym) + r.Add - vaddr - + o = Symaddr(r.Sym) + r.Add - int64(r.Sym.Sect.Vaddr) case objabi.R_WEAKADDROFF: if !r.Sym.Attr.Reachable() { continue @@ -642,10 +367,15 @@ func relocsym(ctxt *Link, s *Symbol) { o = Symaddr(r.Sym) - int64(r.Sym.Sect.Vaddr) + r.Add } + case objabi.R_ADDRCUOFF: + // debug_range and debug_loc elements use this relocation type to get an + // offset from the start of the compile unit. + o = Symaddr(r.Sym) + r.Add - Symaddr(r.Sym.Lib.Textp[0]) + // r->sym can be null when CALL $(constant) is transformed from absolute PC to relative PC call. case objabi.R_GOTPCREL: - if ctxt.DynlinkingGo() && Headtype == objabi.Hdarwin && r.Sym != nil && r.Sym.Type != SCONST { - r.Done = 0 + if ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin && r.Sym != nil && r.Sym.Type != sym.SCONST { + r.Done = false r.Xadd = r.Add r.Xadd -= int64(r.Siz) // relative to address after the relocated chunk r.Xsym = r.Sym @@ -656,11 +386,11 @@ func relocsym(ctxt *Link, s *Symbol) { } fallthrough case objabi.R_CALL, objabi.R_PCREL: - if Linkmode == LinkExternal && r.Sym != nil && r.Sym.Type != SCONST && (r.Sym.Sect != s.Sect || r.Type == objabi.R_GOTPCREL) { - r.Done = 0 + if ctxt.LinkMode == LinkExternal && r.Sym != nil && r.Sym.Type != sym.SCONST && (r.Sym.Sect != s.Sect || r.Type == objabi.R_GOTPCREL) { + r.Done = false // set up addend for eventual relocation via outer symbol. - rs = r.Sym + rs := r.Sym r.Xadd = r.Add for rs.Outer != nil { @@ -669,34 +399,34 @@ func relocsym(ctxt *Link, s *Symbol) { } r.Xadd -= int64(r.Siz) // relative to address after the relocated chunk - if rs.Type != SHOSTOBJ && rs.Type != SDYNIMPORT && rs.Sect == nil { + if rs.Type != sym.SHOSTOBJ && rs.Type != sym.SDYNIMPORT && rs.Sect == nil { Errorf(s, "missing section for relocation target %s", rs.Name) } r.Xsym = rs o = r.Xadd - if Iself { - if SysArch.Family == sys.AMD64 { + if ctxt.IsELF { + if ctxt.Arch.Family == sys.AMD64 { o = 0 } - } else if Headtype == objabi.Hdarwin { + } else if ctxt.HeadType == objabi.Hdarwin { if r.Type == objabi.R_CALL { - if rs.Type != SHOSTOBJ { + if rs.Type != sym.SHOSTOBJ { o += int64(uint64(Symaddr(rs)) - rs.Sect.Vaddr) } o -= int64(r.Off) // relative to section offset, not symbol - } else if SysArch.Family == sys.ARM { + } else if ctxt.Arch.Family == sys.ARM { // see ../arm/asm.go:/machoreloc1 o += Symaddr(rs) - int64(s.Value) - int64(r.Off) } else { o += int64(r.Siz) } - } else if Headtype == objabi.Hwindows && SysArch.Family == sys.AMD64 { // only amd64 needs PCREL + } else if ctxt.HeadType == objabi.Hwindows && ctxt.Arch.Family == sys.AMD64 { // only amd64 needs PCREL // PE/COFF's PC32 relocation uses the address after the relocated // bytes as the base. Compensate by skewing the addend. o += int64(r.Siz) } else { - Errorf(s, "unhandled pcrel relocation to %s on %v", rs.Name, Headtype) + Errorf(s, "unhandled pcrel relocation to %s on %v", rs.Name, ctxt.HeadType) } break @@ -708,12 +438,11 @@ func relocsym(ctxt *Link, s *Symbol) { } o += r.Add - (s.Value + int64(r.Off) + int64(r.Siz)) - case objabi.R_SIZE: o = r.Sym.Size + r.Add } - if r.Variant != RV_NONE { + if r.Variant != sym.RV_NONE { o = Thearch.Archrelocvariant(ctxt, r, s, o) } @@ -722,7 +451,7 @@ func relocsym(ctxt *Link, s *Symbol) { if r.Sym != nil { nam = r.Sym.Name } - fmt.Printf("relocate %s %#x (%#x+%#x, size %d) => %s %#x +%#x [type %d/%d, %x]\n", s.Name, s.Value+int64(off), s.Value, r.Off, r.Siz, nam, Symaddr(r.Sym), r.Add, r.Type, r.Variant, o) + fmt.Printf("relocate %s %#x (%#x+%#x, size %d) => %s %#x +%#x [type %d (%s)/%d, %x]\n", s.Name, s.Value+int64(off), s.Value, r.Off, r.Siz, nam, Symaddr(r.Sym), r.Add, r.Type, sym.RelocName(ctxt.Arch, r.Type), r.Variant, o) } switch siz { default: @@ -732,14 +461,12 @@ func relocsym(ctxt *Link, s *Symbol) { // TODO(rsc): Remove. case 1: s.P[off] = byte(int8(o)) - case 2: if o != int64(int16(o)) { Errorf(s, "relocation address for %s is too big: %#x", r.Sym.Name, o) } - i16 = int16(o) + i16 := int16(o) ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(i16)) - case 4: if r.Type == objabi.R_PCREL || r.Type == objabi.R_CALL { if o != int64(int32(o)) { @@ -751,9 +478,8 @@ func relocsym(ctxt *Link, s *Symbol) { } } - fl = int32(o) + fl := int32(o) ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(fl)) - case 8: ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(o)) } @@ -768,99 +494,104 @@ func (ctxt *Link) reloc() { for _, s := range ctxt.Textp { relocsym(ctxt, s) } - for _, sym := range datap { - relocsym(ctxt, sym) + for _, s := range datap { + relocsym(ctxt, s) } for _, s := range dwarfp { relocsym(ctxt, s) } } -func dynrelocsym(ctxt *Link, s *Symbol) { - if Headtype == objabi.Hwindows && Linkmode != LinkExternal { - rel := ctxt.Syms.Lookup(".rel", 0) - if s == rel { - return +func windynrelocsym(ctxt *Link, s *sym.Symbol) { + rel := ctxt.Syms.Lookup(".rel", 0) + if s == rel { + return + } + for ri := 0; ri < len(s.R); ri++ { + r := &s.R[ri] + targ := r.Sym + if targ == nil { + continue } - for ri := 0; ri < len(s.R); ri++ { - r := &s.R[ri] - targ := r.Sym - if targ == nil { + if !targ.Attr.Reachable() { + if r.Type == objabi.R_WEAKADDROFF { continue } - if !targ.Attr.Reachable() { - if r.Type == objabi.R_WEAKADDROFF { - continue - } - Errorf(s, "dynamic relocation to unreachable symbol %s", targ.Name) - } - if r.Sym.Plt == -2 && r.Sym.Got != -2 { // make dynimport JMP table for PE object files. - targ.Plt = int32(rel.Size) - r.Sym = rel - r.Add = int64(targ.Plt) - - // jmp *addr - if SysArch.Family == sys.I386 { - Adduint8(ctxt, rel, 0xff) - Adduint8(ctxt, rel, 0x25) - Addaddr(ctxt, rel, targ) - Adduint8(ctxt, rel, 0x90) - Adduint8(ctxt, rel, 0x90) - } else { - Adduint8(ctxt, rel, 0xff) - Adduint8(ctxt, rel, 0x24) - Adduint8(ctxt, rel, 0x25) - addaddrplus4(ctxt, rel, targ, 0) - Adduint8(ctxt, rel, 0x90) - } - } else if r.Sym.Plt >= 0 { - r.Sym = rel - r.Add = int64(targ.Plt) - } + Errorf(s, "dynamic relocation to unreachable symbol %s", targ.Name) } + if r.Sym.Plt == -2 && r.Sym.Got != -2 { // make dynimport JMP table for PE object files. + targ.Plt = int32(rel.Size) + r.Sym = rel + r.Add = int64(targ.Plt) + // jmp *addr + if ctxt.Arch.Family == sys.I386 { + rel.AddUint8(0xff) + rel.AddUint8(0x25) + rel.AddAddr(ctxt.Arch, targ) + rel.AddUint8(0x90) + rel.AddUint8(0x90) + } else { + rel.AddUint8(0xff) + rel.AddUint8(0x24) + rel.AddUint8(0x25) + rel.AddAddrPlus4(targ, 0) + rel.AddUint8(0x90) + } + } else if r.Sym.Plt >= 0 { + r.Sym = rel + r.Add = int64(targ.Plt) + } + } +} + +func dynrelocsym(ctxt *Link, s *sym.Symbol) { + if ctxt.HeadType == objabi.Hwindows { + if ctxt.LinkMode == LinkInternal { + windynrelocsym(ctxt, s) + } return } for ri := 0; ri < len(s.R); ri++ { r := &s.R[ri] - if Buildmode == BuildmodePIE && Linkmode == LinkInternal { + if ctxt.BuildMode == BuildModePIE && ctxt.LinkMode == LinkInternal { // It's expected that some relocations will be done // later by relocsym (R_TLS_LE, R_ADDROFF), so // don't worry if Adddynrel returns false. Thearch.Adddynrel(ctxt, s, r) continue } - if r.Sym != nil && r.Sym.Type == SDYNIMPORT || r.Type >= 256 { + if r.Sym != nil && r.Sym.Type == sym.SDYNIMPORT || r.Type >= 256 { if r.Sym != nil && !r.Sym.Attr.Reachable() { Errorf(s, "dynamic relocation to unreachable symbol %s", r.Sym.Name) } if !Thearch.Adddynrel(ctxt, s, r) { - Errorf(s, "unsupported dynamic relocation for symbol %s (type=%d stype=%d)", r.Sym.Name, r.Type, r.Sym.Type) + Errorf(s, "unsupported dynamic relocation for symbol %s (type=%d (%s) stype=%d (%s))", r.Sym.Name, r.Type, sym.RelocName(ctxt.Arch, r.Type), r.Sym.Type, r.Sym.Type) } } } } -func dynreloc(ctxt *Link, data *[SXREF][]*Symbol) { +func dynreloc(ctxt *Link, data *[sym.SXREF][]*sym.Symbol) { // -d suppresses dynamic loader format, so we may as well not // compute these sections or mark their symbols as reachable. - if *FlagD && Headtype != objabi.Hwindows { + if *FlagD && ctxt.HeadType != objabi.Hwindows { return } if ctxt.Debugvlog != 0 { - ctxt.Logf("%5.2f reloc\n", Cputime()) + ctxt.Logf("%5.2f dynreloc\n", Cputime()) } for _, s := range ctxt.Textp { dynrelocsym(ctxt, s) } for _, syms := range data { - for _, sym := range syms { - dynrelocsym(ctxt, sym) + for _, s := range syms { + dynrelocsym(ctxt, s) } } - if Iself { + if ctxt.IsELF { elfdynhash(ctxt) } } @@ -870,7 +601,7 @@ func Codeblk(ctxt *Link, addr int64, size int64) { } func CodeblkPad(ctxt *Link, addr int64, size int64, pad []byte) { if *flagA { - ctxt.Logf("codeblk [%#x,%#x) at offset %#x\n", addr, addr+size, coutbuf.Offset()) + ctxt.Logf("codeblk [%#x,%#x) at offset %#x\n", addr, addr+size, ctxt.Out.Offset()) } blk(ctxt, ctxt.Textp, addr, size, pad) @@ -881,11 +612,11 @@ func CodeblkPad(ctxt *Link, addr int64, size int64, pad []byte) { } syms := ctxt.Textp - for i, sym := range syms { - if !sym.Attr.Reachable() { + for i, s := range syms { + if !s.Attr.Reachable() { continue } - if sym.Value >= addr { + if s.Value >= addr { syms = syms[i:] break } @@ -893,24 +624,24 @@ func CodeblkPad(ctxt *Link, addr int64, size int64, pad []byte) { eaddr := addr + size var q []byte - for _, sym := range syms { - if !sym.Attr.Reachable() { + for _, s := range syms { + if !s.Attr.Reachable() { continue } - if sym.Value >= eaddr { + if s.Value >= eaddr { break } - if addr < sym.Value { + if addr < s.Value { ctxt.Logf("%-20s %.8x|", "_", uint64(addr)) - for ; addr < sym.Value; addr++ { + for ; addr < s.Value; addr++ { ctxt.Logf(" %.2x", 0) } ctxt.Logf("\n") } - ctxt.Logf("%.6x\t%-20s\n", uint64(addr), sym.Name) - q = sym.P + ctxt.Logf("%.6x\t%-20s\n", uint64(addr), s.Name) + q = s.P for len(q) >= 16 { ctxt.Logf("%.6x\t% x\n", uint64(addr), q[:16]) @@ -932,9 +663,9 @@ func CodeblkPad(ctxt *Link, addr int64, size int64, pad []byte) { } } -func blk(ctxt *Link, syms []*Symbol, addr, size int64, pad []byte) { +func blk(ctxt *Link, syms []*sym.Symbol, addr, size int64, pad []byte) { for i, s := range syms { - if s.Type&SSUB == 0 && s.Value >= addr { + if !s.Attr.SubSymbol() && s.Value >= addr { syms = syms[i:] break } @@ -942,7 +673,7 @@ func blk(ctxt *Link, syms []*Symbol, addr, size int64, pad []byte) { eaddr := addr + size for _, s := range syms { - if s.Type&SSUB != 0 { + if s.Attr.SubSymbol() { continue } if s.Value >= eaddr { @@ -953,13 +684,13 @@ func blk(ctxt *Link, syms []*Symbol, addr, size int64, pad []byte) { errorexit() } if addr < s.Value { - strnputPad("", int(s.Value-addr), pad) + ctxt.Out.WriteStringPad("", int(s.Value-addr), pad) addr = s.Value } - Cwrite(s.P) + ctxt.Out.Write(s.P) addr += int64(len(s.P)) if addr < s.Value+s.Size { - strnputPad("", int(s.Value+s.Size-addr), pad) + ctxt.Out.WriteStringPad("", int(s.Value+s.Size-addr), pad) addr = s.Value + s.Size } if addr != s.Value+s.Size { @@ -972,14 +703,14 @@ func blk(ctxt *Link, syms []*Symbol, addr, size int64, pad []byte) { } if addr < eaddr { - strnputPad("", int(eaddr-addr), pad) + ctxt.Out.WriteStringPad("", int(eaddr-addr), pad) } - Cflush() + ctxt.Out.Flush() } func Datblk(ctxt *Link, addr int64, size int64) { if *flagA { - ctxt.Logf("datblk [%#x,%#x) at offset %#x\n", addr, addr+size, coutbuf.Offset()) + ctxt.Logf("datblk [%#x,%#x) at offset %#x\n", addr, addr+size, ctxt.Out.Offset()) } blk(ctxt, datap, addr, size, zeros[:]) @@ -1021,7 +752,7 @@ func Datblk(ctxt *Link, addr int64, size int64) { } ctxt.Logf("\n") - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { continue } for _, r := range sym.R { @@ -1050,7 +781,7 @@ func Datblk(ctxt *Link, addr int64, size int64) { func Dwarfblk(ctxt *Link, addr int64, size int64) { if *flagA { - ctxt.Logf("dwarfblk [%#x,%#x) at offset %#x\n", addr, addr+size, coutbuf.Offset()) + ctxt.Logf("dwarfblk [%#x,%#x) at offset %#x\n", addr, addr+size, ctxt.Out.Offset()) } blk(ctxt, dwarfp, addr, size, zeros[:]) @@ -1058,32 +789,7 @@ func Dwarfblk(ctxt *Link, addr int64, size int64) { var zeros [512]byte -// strnput writes the first n bytes of s. -// If n is larger than len(s), -// it is padded with NUL bytes. -func strnput(s string, n int) { - strnputPad(s, n, zeros[:]) -} - -// strnput writes the first n bytes of s. -// If n is larger than len(s), -// it is padded with the bytes in pad (repeated as needed). -func strnputPad(s string, n int, pad []byte) { - if len(s) >= n { - Cwritestring(s[:n]) - } else { - Cwritestring(s) - n -= len(s) - for n > len(pad) { - Cwrite(pad) - n -= len(pad) - - } - Cwrite(pad[:n]) - } -} - -var strdata []*Symbol +var strdata []*sym.Symbol func addstrdata1(ctxt *Link, arg string) { eq := strings.Index(arg, "=") @@ -1091,7 +797,12 @@ func addstrdata1(ctxt *Link, arg string) { if eq < 0 || dot < 0 { Exitf("-X flag requires argument of the form importpath.name=value") } - addstrdata(ctxt, objabi.PathToPrefix(arg[:dot])+arg[dot:eq], arg[eq+1:]) + pkg := arg[:dot] + if ctxt.BuildMode == BuildModePlugin && pkg == "main" { + pkg = *flagPluginPath + } + pkg = objabi.PathToPrefix(pkg) + addstrdata(ctxt, pkg+arg[dot:eq], arg[eq+1:]) } func addstrdata(ctxt *Link, name string, value string) { @@ -1099,28 +810,28 @@ func addstrdata(ctxt *Link, name string, value string) { sp := ctxt.Syms.Lookup(p, 0) Addstring(sp, value) - sp.Type = SRODATA + sp.Type = sym.SRODATA s := ctxt.Syms.Lookup(name, 0) s.Size = 0 - s.Attr |= AttrDuplicateOK + s.Attr |= sym.AttrDuplicateOK reachable := s.Attr.Reachable() - Addaddr(ctxt, s, sp) - adduintxx(ctxt, s, uint64(len(value)), SysArch.PtrSize) + s.AddAddr(ctxt.Arch, sp) + s.AddUint(ctxt.Arch, uint64(len(value))) // addstring, addaddr, etc., mark the symbols as reachable. // In this case that is not necessarily true, so stick to what // we know before entering this function. - s.Attr.Set(AttrReachable, reachable) + s.Attr.Set(sym.AttrReachable, reachable) strdata = append(strdata, s) - sp.Attr.Set(AttrReachable, reachable) + sp.Attr.Set(sym.AttrReachable, reachable) } func (ctxt *Link) checkstrdata() { for _, s := range strdata { - if s.Type == STEXT { + if s.Type == sym.STEXT { Errorf(s, "cannot use -X with text symbol") } else if s.Gotype != nil && s.Gotype.Name != "type.string" { Errorf(s, "cannot use -X with non-string symbol") @@ -1128,11 +839,11 @@ func (ctxt *Link) checkstrdata() { } } -func Addstring(s *Symbol, str string) int64 { +func Addstring(s *sym.Symbol, str string) int64 { if s.Type == 0 { - s.Type = SNOPTRDATA + s.Type = sym.SNOPTRDATA } - s.Attr |= AttrReachable + s.Attr |= sym.AttrReachable r := s.Size if s.Name == ".shstrtab" { elfsetstring(s, str, int(r)) @@ -1145,47 +856,44 @@ func Addstring(s *Symbol, str string) int64 { // addgostring adds str, as a Go string value, to s. symname is the name of the // symbol used to define the string data and must be unique per linked object. -func addgostring(ctxt *Link, s *Symbol, symname, str string) { - sym := ctxt.Syms.Lookup(symname, 0) - if sym.Type != Sxxx { +func addgostring(ctxt *Link, s *sym.Symbol, symname, str string) { + sdata := ctxt.Syms.Lookup(symname, 0) + if sdata.Type != sym.Sxxx { Errorf(s, "duplicate symname in addgostring: %s", symname) } - sym.Attr |= AttrReachable - sym.Attr |= AttrLocal - sym.Type = SRODATA - sym.Size = int64(len(str)) - sym.P = []byte(str) - Addaddr(ctxt, s, sym) - adduint(ctxt, s, uint64(len(str))) + sdata.Attr |= sym.AttrReachable + sdata.Attr |= sym.AttrLocal + sdata.Type = sym.SRODATA + sdata.Size = int64(len(str)) + sdata.P = []byte(str) + s.AddAddr(ctxt.Arch, sdata) + s.AddUint(ctxt.Arch, uint64(len(str))) } -func addinitarrdata(ctxt *Link, s *Symbol) { +func addinitarrdata(ctxt *Link, s *sym.Symbol) { p := s.Name + ".ptr" sp := ctxt.Syms.Lookup(p, 0) - sp.Type = SINITARR + sp.Type = sym.SINITARR sp.Size = 0 - sp.Attr |= AttrDuplicateOK - Addaddr(ctxt, sp, s) + sp.Attr |= sym.AttrDuplicateOK + sp.AddAddr(ctxt.Arch, s) } func dosymtype(ctxt *Link) { - switch Buildmode { - case BuildmodeCArchive, BuildmodeCShared: + switch ctxt.BuildMode { + case BuildModeCArchive, BuildModeCShared: for _, s := range ctxt.Syms.Allsym { // Create a new entry in the .init_array section that points to the // library initializer function. - switch Buildmode { - case BuildmodeCArchive, BuildmodeCShared: - if s.Name == *flagEntrySymbol { - addinitarrdata(ctxt, s) - } + if s.Name == *flagEntrySymbol { + addinitarrdata(ctxt, s) } } } } // symalign returns the required alignment for the given symbol s. -func symalign(s *Symbol) int32 { +func symalign(s *sym.Symbol) int32 { min := int32(Thearch.Minalign) if s.Align >= min { return s.Align @@ -1204,7 +912,7 @@ func symalign(s *Symbol) int32 { return align } -func aligndatsize(datsize int64, s *Symbol) int64 { +func aligndatsize(datsize int64, s *sym.Symbol) int64 { return Rnd(datsize, int64(symalign(s))) } @@ -1212,7 +920,7 @@ const debugGCProg = false type GCProg struct { ctxt *Link - sym *Symbol + sym *sym.Symbol w gcprog.Writer } @@ -1228,21 +936,21 @@ func (p *GCProg) Init(ctxt *Link, name string) { func (p *GCProg) writeByte(ctxt *Link) func(x byte) { return func(x byte) { - Adduint8(ctxt, p.sym, x) + p.sym.AddUint8(x) } } func (p *GCProg) End(size int64) { - p.w.ZeroUntil(size / int64(SysArch.PtrSize)) + p.w.ZeroUntil(size / int64(p.ctxt.Arch.PtrSize)) p.w.End() if debugGCProg { fmt.Fprintf(os.Stderr, "ld: end GCProg\n") } } -func (p *GCProg) AddSym(s *Symbol) { +func (p *GCProg) AddSym(s *sym.Symbol) { typ := s.Gotype - // Things without pointers should be in SNOPTRDATA or SNOPTRBSS; + // Things without pointers should be in sym.SNOPTRDATA or sym.SNOPTRBSS; // everything we see should have pointers and should therefore have a type. if typ == nil { switch s.Name { @@ -1256,14 +964,14 @@ func (p *GCProg) AddSym(s *Symbol) { return } - ptrsize := int64(SysArch.PtrSize) + ptrsize := int64(p.ctxt.Arch.PtrSize) nptr := decodetypePtrdata(p.ctxt.Arch, typ) / ptrsize if debugGCProg { fmt.Fprintf(os.Stderr, "gcprog sym: %s at %d (ptr=%d+%d)\n", s.Name, s.Value, s.Value/ptrsize, nptr) } - if decodetypeUsegcprog(typ) == 0 { + if decodetypeUsegcprog(p.ctxt.Arch, typ) == 0 { // Copy pointers from mask into program. mask := decodetypeGcmask(p.ctxt, typ) for i := int64(0); i < nptr; i++ { @@ -1280,12 +988,12 @@ func (p *GCProg) AddSym(s *Symbol) { p.w.Append(prog[4:], nptr) } -// dataSortKey is used to sort a slice of data symbol *Symbol pointers. +// dataSortKey is used to sort a slice of data symbol *sym.Symbol pointers. // The sort keys are kept inline to improve cache behavior while sorting. type dataSortKey struct { size int64 name string - sym *Symbol + sym *sym.Symbol } type bySizeAndName []dataSortKey @@ -1300,24 +1008,26 @@ func (d bySizeAndName) Less(i, j int) bool { return s1.name < s2.name } -const cutoff int64 = 2e9 // 2 GB (or so; looks better in errors than 2^31) +// cutoff is the maximum data section size permitted by the linker +// (see issue #9862). +const cutoff = 2e9 // 2 GB (or so; looks better in errors than 2^31) -func checkdatsize(ctxt *Link, datsize int64, symn SymKind) { +func checkdatsize(ctxt *Link, datsize int64, symn sym.SymKind) { if datsize > cutoff { - Errorf(nil, "too much data in section %v (over %d bytes)", symn, cutoff) + Errorf(nil, "too much data in section %v (over %v bytes)", symn, cutoff) } } // datap is a collection of reachable data symbols in address order. // Generated by dodata. -var datap []*Symbol +var datap []*sym.Symbol func (ctxt *Link) dodata() { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f dodata\n", Cputime()) } - if ctxt.DynlinkingGo() && Headtype == objabi.Hdarwin { + if ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin { // The values in moduledata are filled out by relocations // pointing to the addresses of these special symbols. // Typically these symbols have no size and are not laid @@ -1337,33 +1047,33 @@ func (ctxt *Link) dodata() { // as normal symbols, and give them a little size. bss := ctxt.Syms.Lookup("runtime.bss", 0) bss.Size = 8 - bss.Attr.Set(AttrSpecial, false) + bss.Attr.Set(sym.AttrSpecial, false) - ctxt.Syms.Lookup("runtime.ebss", 0).Attr.Set(AttrSpecial, false) + ctxt.Syms.Lookup("runtime.ebss", 0).Attr.Set(sym.AttrSpecial, false) data := ctxt.Syms.Lookup("runtime.data", 0) data.Size = 8 - data.Attr.Set(AttrSpecial, false) + data.Attr.Set(sym.AttrSpecial, false) - ctxt.Syms.Lookup("runtime.edata", 0).Attr.Set(AttrSpecial, false) + ctxt.Syms.Lookup("runtime.edata", 0).Attr.Set(sym.AttrSpecial, false) types := ctxt.Syms.Lookup("runtime.types", 0) - types.Type = STYPE + types.Type = sym.STYPE types.Size = 8 - types.Attr.Set(AttrSpecial, false) + types.Attr.Set(sym.AttrSpecial, false) etypes := ctxt.Syms.Lookup("runtime.etypes", 0) - etypes.Type = SFUNCTAB - etypes.Attr.Set(AttrSpecial, false) + etypes.Type = sym.SFUNCTAB + etypes.Attr.Set(sym.AttrSpecial, false) } // Collect data symbols by type into data. - var data [SXREF][]*Symbol + var data [sym.SXREF][]*sym.Symbol for _, s := range ctxt.Syms.Allsym { - if !s.Attr.Reachable() || s.Attr.Special() { + if !s.Attr.Reachable() || s.Attr.Special() || s.Attr.SubSymbol() { continue } - if s.Type <= STEXT || s.Type >= SXREF { + if s.Type <= sym.STEXT || s.Type >= sym.SXREF { continue } data[s.Type] = append(data[s.Type], s) @@ -1375,25 +1085,25 @@ func (ctxt *Link) dodata() { // symbol, which is itself data. // // On darwin, we need the symbol table numbers for dynreloc. - if Headtype == objabi.Hdarwin { + if ctxt.HeadType == objabi.Hdarwin { machosymorder(ctxt) } dynreloc(ctxt, &data) - if UseRelro() { + if ctxt.UseRelro() { // "read only" data with relocations needs to go in its own section // when building a shared library. We do this by boosting objects of // type SXXX with relocations to type SXXXRELRO. - for _, symnro := range readOnly { - symnrelro := relROMap[symnro] + for _, symnro := range sym.ReadOnly { + symnrelro := sym.RelROMap[symnro] - ro := []*Symbol{} + ro := []*sym.Symbol{} relro := data[symnrelro] for _, s := range data[symnro] { isRelro := len(s.R) > 0 switch s.Type { - case STYPE, STYPERELRO, SGOFUNCRELRO: + case sym.STYPE, sym.STYPERELRO, sym.SGOFUNCRELRO: // Symbols are not sorted yet, so it is possible // that an Outer symbol has been changed to a // relro Type before it reaches here. @@ -1427,10 +1137,10 @@ func (ctxt *Link) dodata() { } // Sort symbols. - var dataMaxAlign [SXREF]int32 + var dataMaxAlign [sym.SXREF]int32 var wg sync.WaitGroup for symn := range data { - symn := SymKind(symn) + symn := sym.SymKind(symn) wg.Add(1) go func() { data[symn], dataMaxAlign[symn] = dodataSect(ctxt, symn, data[symn]) @@ -1446,20 +1156,20 @@ func (ctxt *Link) dodata() { datsize := int64(0) // Writable data sections that do not need any specialized handling. - writable := []SymKind{ - SELFSECT, - SMACHO, - SMACHOGOT, - SWINDOWS, + writable := []sym.SymKind{ + sym.SELFSECT, + sym.SMACHO, + sym.SMACHOGOT, + sym.SWINDOWS, } for _, symn := range writable { for _, s := range data[symn] { - sect := addsection(&Segdata, s.Name, 06) + sect := addsection(ctxt.Arch, &Segdata, s.Name, 06) sect.Align = symalign(s) datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) s.Sect = sect - s.Type = SDATA + s.Type = sym.SDATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size sect.Length = uint64(datsize) - sect.Vaddr @@ -1468,16 +1178,16 @@ func (ctxt *Link) dodata() { } // .got (and .toc on ppc64) - if len(data[SELFGOT]) > 0 { - sect := addsection(&Segdata, ".got", 06) - sect.Align = dataMaxAlign[SELFGOT] + if len(data[sym.SELFGOT]) > 0 { + sect := addsection(ctxt.Arch, &Segdata, ".got", 06) + sect.Align = dataMaxAlign[sym.SELFGOT] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) - var toc *Symbol - for _, s := range data[SELFGOT] { + var toc *sym.Symbol + for _, s := range data[sym.SELFGOT] { datsize = aligndatsize(datsize, s) s.Sect = sect - s.Type = SDATA + s.Type = sym.SDATA s.Value = int64(uint64(datsize) - sect.Vaddr) // Resolve .TOC. symbol for this object file (ppc64) @@ -1493,98 +1203,98 @@ func (ctxt *Link) dodata() { datsize += s.Size } - checkdatsize(ctxt, datsize, SELFGOT) + checkdatsize(ctxt, datsize, sym.SELFGOT) sect.Length = uint64(datsize) - sect.Vaddr } /* pointer-free data */ - sect := addsection(&Segdata, ".noptrdata", 06) - sect.Align = dataMaxAlign[SNOPTRDATA] + sect := addsection(ctxt.Arch, &Segdata, ".noptrdata", 06) + sect.Align = dataMaxAlign[sym.SNOPTRDATA] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) ctxt.Syms.Lookup("runtime.noptrdata", 0).Sect = sect ctxt.Syms.Lookup("runtime.enoptrdata", 0).Sect = sect - for _, s := range data[SNOPTRDATA] { + for _, s := range data[sym.SNOPTRDATA] { datsize = aligndatsize(datsize, s) s.Sect = sect - s.Type = SDATA + s.Type = sym.SDATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size } - checkdatsize(ctxt, datsize, SNOPTRDATA) + checkdatsize(ctxt, datsize, sym.SNOPTRDATA) sect.Length = uint64(datsize) - sect.Vaddr - hasinitarr := *FlagLinkshared + hasinitarr := ctxt.linkShared /* shared library initializer */ - switch Buildmode { - case BuildmodeCArchive, BuildmodeCShared, BuildmodeShared, BuildmodePlugin: + switch ctxt.BuildMode { + case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePlugin: hasinitarr = true } if hasinitarr { - sect := addsection(&Segdata, ".init_array", 06) - sect.Align = dataMaxAlign[SINITARR] + sect := addsection(ctxt.Arch, &Segdata, ".init_array", 06) + sect.Align = dataMaxAlign[sym.SINITARR] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) - for _, s := range data[SINITARR] { + for _, s := range data[sym.SINITARR] { datsize = aligndatsize(datsize, s) s.Sect = sect s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size } sect.Length = uint64(datsize) - sect.Vaddr - checkdatsize(ctxt, datsize, SINITARR) + checkdatsize(ctxt, datsize, sym.SINITARR) } /* data */ - sect = addsection(&Segdata, ".data", 06) - sect.Align = dataMaxAlign[SDATA] + sect = addsection(ctxt.Arch, &Segdata, ".data", 06) + sect.Align = dataMaxAlign[sym.SDATA] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) ctxt.Syms.Lookup("runtime.data", 0).Sect = sect ctxt.Syms.Lookup("runtime.edata", 0).Sect = sect var gc GCProg gc.Init(ctxt, "runtime.gcdata") - for _, s := range data[SDATA] { + for _, s := range data[sym.SDATA] { s.Sect = sect - s.Type = SDATA + s.Type = sym.SDATA datsize = aligndatsize(datsize, s) s.Value = int64(uint64(datsize) - sect.Vaddr) gc.AddSym(s) datsize += s.Size } - checkdatsize(ctxt, datsize, SDATA) + checkdatsize(ctxt, datsize, sym.SDATA) sect.Length = uint64(datsize) - sect.Vaddr gc.End(int64(sect.Length)) /* bss */ - sect = addsection(&Segdata, ".bss", 06) - sect.Align = dataMaxAlign[SBSS] + sect = addsection(ctxt.Arch, &Segdata, ".bss", 06) + sect.Align = dataMaxAlign[sym.SBSS] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) ctxt.Syms.Lookup("runtime.bss", 0).Sect = sect ctxt.Syms.Lookup("runtime.ebss", 0).Sect = sect gc = GCProg{} gc.Init(ctxt, "runtime.gcbss") - for _, s := range data[SBSS] { + for _, s := range data[sym.SBSS] { s.Sect = sect datsize = aligndatsize(datsize, s) s.Value = int64(uint64(datsize) - sect.Vaddr) gc.AddSym(s) datsize += s.Size } - checkdatsize(ctxt, datsize, SBSS) + checkdatsize(ctxt, datsize, sym.SBSS) sect.Length = uint64(datsize) - sect.Vaddr gc.End(int64(sect.Length)) /* pointer-free bss */ - sect = addsection(&Segdata, ".noptrbss", 06) - sect.Align = dataMaxAlign[SNOPTRBSS] + sect = addsection(ctxt.Arch, &Segdata, ".noptrbss", 06) + sect.Align = dataMaxAlign[sym.SNOPTRBSS] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) ctxt.Syms.Lookup("runtime.noptrbss", 0).Sect = sect ctxt.Syms.Lookup("runtime.enoptrbss", 0).Sect = sect - for _, s := range data[SNOPTRBSS] { + for _, s := range data[sym.SNOPTRBSS] { datsize = aligndatsize(datsize, s) s.Sect = sect s.Value = int64(uint64(datsize) - sect.Vaddr) @@ -1593,24 +1303,24 @@ func (ctxt *Link) dodata() { sect.Length = uint64(datsize) - sect.Vaddr ctxt.Syms.Lookup("runtime.end", 0).Sect = sect - checkdatsize(ctxt, datsize, SNOPTRBSS) + checkdatsize(ctxt, datsize, sym.SNOPTRBSS) - if len(data[STLSBSS]) > 0 { - var sect *Section - if Iself && (Linkmode == LinkExternal || !*FlagD) { - sect = addsection(&Segdata, ".tbss", 06) - sect.Align = int32(SysArch.PtrSize) + if len(data[sym.STLSBSS]) > 0 { + var sect *sym.Section + if ctxt.IsELF && (ctxt.LinkMode == LinkExternal || !*FlagD) { + sect = addsection(ctxt.Arch, &Segdata, ".tbss", 06) + sect.Align = int32(ctxt.Arch.PtrSize) sect.Vaddr = 0 } datsize = 0 - for _, s := range data[STLSBSS] { + for _, s := range data[sym.STLSBSS] { datsize = aligndatsize(datsize, s) s.Sect = sect s.Value = datsize datsize += s.Size } - checkdatsize(ctxt, datsize, STLSBSS) + checkdatsize(ctxt, datsize, sym.STLSBSS) if sect != nil { sect.Length = uint64(datsize) @@ -1627,8 +1337,8 @@ func (ctxt *Link) dodata() { * since it's not our decision; that code expects the sections in * segtext. */ - var segro *Segment - if Iself && Linkmode == LinkInternal { + var segro *sym.Segment + if ctxt.IsELF && ctxt.LinkMode == LinkInternal { segro = &Segrodata } else { segro = &Segtext @@ -1637,44 +1347,44 @@ func (ctxt *Link) dodata() { datsize = 0 /* read-only executable ELF, Mach-O sections */ - if len(data[STEXT]) != 0 { - Errorf(nil, "dodata found an STEXT symbol: %s", data[STEXT][0].Name) + if len(data[sym.STEXT]) != 0 { + Errorf(nil, "dodata found an sym.STEXT symbol: %s", data[sym.STEXT][0].Name) } - for _, s := range data[SELFRXSECT] { - sect := addsection(&Segtext, s.Name, 04) + for _, s := range data[sym.SELFRXSECT] { + sect := addsection(ctxt.Arch, &Segtext, s.Name, 04) sect.Align = symalign(s) datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size sect.Length = uint64(datsize) - sect.Vaddr - checkdatsize(ctxt, datsize, SELFRXSECT) + checkdatsize(ctxt, datsize, sym.SELFRXSECT) } /* read-only data */ - sect = addsection(segro, ".rodata", 04) + sect = addsection(ctxt.Arch, segro, ".rodata", 04) sect.Vaddr = 0 ctxt.Syms.Lookup("runtime.rodata", 0).Sect = sect ctxt.Syms.Lookup("runtime.erodata", 0).Sect = sect - if !UseRelro() { + if !ctxt.UseRelro() { ctxt.Syms.Lookup("runtime.types", 0).Sect = sect ctxt.Syms.Lookup("runtime.etypes", 0).Sect = sect } - for _, symn := range readOnly { + for _, symn := range sym.ReadOnly { align := dataMaxAlign[symn] if sect.Align < align { sect.Align = align } } datsize = Rnd(datsize, int64(sect.Align)) - for _, symn := range readOnly { + for _, symn := range sym.ReadOnly { for _, s := range data[symn] { datsize = aligndatsize(datsize, s) s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size } @@ -1683,31 +1393,31 @@ func (ctxt *Link) dodata() { sect.Length = uint64(datsize) - sect.Vaddr /* read-only ELF, Mach-O sections */ - for _, s := range data[SELFROSECT] { - sect = addsection(segro, s.Name, 04) + for _, s := range data[sym.SELFROSECT] { + sect = addsection(ctxt.Arch, segro, s.Name, 04) sect.Align = symalign(s) datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size sect.Length = uint64(datsize) - sect.Vaddr } - checkdatsize(ctxt, datsize, SELFROSECT) + checkdatsize(ctxt, datsize, sym.SELFROSECT) - for _, s := range data[SMACHOPLT] { - sect = addsection(segro, s.Name, 04) + for _, s := range data[sym.SMACHOPLT] { + sect = addsection(ctxt.Arch, segro, s.Name, 04) sect.Align = symalign(s) datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size sect.Length = uint64(datsize) - sect.Vaddr } - checkdatsize(ctxt, datsize, SMACHOPLT) + checkdatsize(ctxt, datsize, sym.SMACHOPLT) // There is some data that are conceptually read-only but are written to by // relocations. On GNU systems, we can arrange for the dynamic linker to @@ -1719,14 +1429,14 @@ func (ctxt *Link) dodata() { // situation. // TODO(mwhudson): It would make sense to do this more widely, but it makes // the system linker segfault on darwin. - addrelrosection := func(suffix string) *Section { - return addsection(segro, suffix, 04) + addrelrosection := func(suffix string) *sym.Section { + return addsection(ctxt.Arch, segro, suffix, 04) } - if UseRelro() { - addrelrosection = func(suffix string) *Section { + if ctxt.UseRelro() { + addrelrosection = func(suffix string) *sym.Section { seg := &Segrelrodata - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { // Using a separate segment with an external // linker results in some programs moving // their data sections unexpectedly, which @@ -1735,7 +1445,7 @@ func (ctxt *Link) dodata() { // sort out a rel.ro segment. seg = &Segrodata } - return addsection(seg, ".data.rel.ro"+suffix, 06) + return addsection(ctxt.Arch, seg, ".data.rel.ro"+suffix, 06) } /* data only written by relocations */ sect = addrelrosection("") @@ -1743,23 +1453,23 @@ func (ctxt *Link) dodata() { sect.Vaddr = 0 ctxt.Syms.Lookup("runtime.types", 0).Sect = sect ctxt.Syms.Lookup("runtime.etypes", 0).Sect = sect - for _, symnro := range readOnly { - symn := relROMap[symnro] + for _, symnro := range sym.ReadOnly { + symn := sym.RelROMap[symnro] align := dataMaxAlign[symn] if sect.Align < align { sect.Align = align } } datsize = Rnd(datsize, int64(sect.Align)) - for _, symnro := range readOnly { - symn := relROMap[symnro] + for _, symnro := range sym.ReadOnly { + symn := sym.RelROMap[symnro] for _, s := range data[symn] { datsize = aligndatsize(datsize, s) if s.Outer != nil && s.Outer.Sect != nil && s.Outer.Sect != sect { Errorf(s, "s.Outer (%s) in different section from s, %s != %s", s.Outer.Name, s.Outer.Sect.Name, sect.Name) } s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size } @@ -1771,65 +1481,65 @@ func (ctxt *Link) dodata() { /* typelink */ sect = addrelrosection(".typelink") - sect.Align = dataMaxAlign[STYPELINK] + sect.Align = dataMaxAlign[sym.STYPELINK] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) typelink := ctxt.Syms.Lookup("runtime.typelink", 0) typelink.Sect = sect - typelink.Type = SRODATA + typelink.Type = sym.SRODATA datsize += typelink.Size - checkdatsize(ctxt, datsize, STYPELINK) + checkdatsize(ctxt, datsize, sym.STYPELINK) sect.Length = uint64(datsize) - sect.Vaddr /* itablink */ sect = addrelrosection(".itablink") - sect.Align = dataMaxAlign[SITABLINK] + sect.Align = dataMaxAlign[sym.SITABLINK] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) ctxt.Syms.Lookup("runtime.itablink", 0).Sect = sect ctxt.Syms.Lookup("runtime.eitablink", 0).Sect = sect - for _, s := range data[SITABLINK] { + for _, s := range data[sym.SITABLINK] { datsize = aligndatsize(datsize, s) s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size } - checkdatsize(ctxt, datsize, SITABLINK) + checkdatsize(ctxt, datsize, sym.SITABLINK) sect.Length = uint64(datsize) - sect.Vaddr /* gosymtab */ sect = addrelrosection(".gosymtab") - sect.Align = dataMaxAlign[SSYMTAB] + sect.Align = dataMaxAlign[sym.SSYMTAB] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) ctxt.Syms.Lookup("runtime.symtab", 0).Sect = sect ctxt.Syms.Lookup("runtime.esymtab", 0).Sect = sect - for _, s := range data[SSYMTAB] { + for _, s := range data[sym.SSYMTAB] { datsize = aligndatsize(datsize, s) s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size } - checkdatsize(ctxt, datsize, SSYMTAB) + checkdatsize(ctxt, datsize, sym.SSYMTAB) sect.Length = uint64(datsize) - sect.Vaddr /* gopclntab */ sect = addrelrosection(".gopclntab") - sect.Align = dataMaxAlign[SPCLNTAB] + sect.Align = dataMaxAlign[sym.SPCLNTAB] datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) ctxt.Syms.Lookup("runtime.pclntab", 0).Sect = sect ctxt.Syms.Lookup("runtime.epclntab", 0).Sect = sect - for _, s := range data[SPCLNTAB] { + for _, s := range data[sym.SPCLNTAB] { datsize = aligndatsize(datsize, s) s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size } - checkdatsize(ctxt, datsize, SRODATA) + checkdatsize(ctxt, datsize, sym.SRODATA) sect.Length = uint64(datsize) - sect.Vaddr // 6g uses 4-byte relocation offsets, so the entire segment must fit in 32 bits. @@ -1837,53 +1547,61 @@ func (ctxt *Link) dodata() { Errorf(nil, "read-only data segment too large: %d", datsize) } - for symn := SELFRXSECT; symn < SXREF; symn++ { + for symn := sym.SELFRXSECT; symn < sym.SXREF; symn++ { datap = append(datap, data[symn]...) } dwarfgeneratedebugsyms(ctxt) - var s *Symbol var i int - for i, s = range dwarfp { - if s.Type != SDWARFSECT { + for ; i < len(dwarfp); i++ { + s := dwarfp[i] + if s.Type != sym.SDWARFSECT { break } - sect = addsection(&Segdwarf, s.Name, 04) + sect = addsection(ctxt.Arch, &Segdwarf, s.Name, 04) sect.Align = 1 datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) datsize += s.Size sect.Length = uint64(datsize) - sect.Vaddr } - checkdatsize(ctxt, datsize, SDWARFSECT) + checkdatsize(ctxt, datsize, sym.SDWARFSECT) + + for i < len(dwarfp) { + curType := dwarfp[i].Type + var sect *sym.Section + switch curType { + case sym.SDWARFINFO: + sect = addsection(ctxt.Arch, &Segdwarf, ".debug_info", 04) + case sym.SDWARFRANGE: + sect = addsection(ctxt.Arch, &Segdwarf, ".debug_ranges", 04) + case sym.SDWARFLOC: + sect = addsection(ctxt.Arch, &Segdwarf, ".debug_loc", 04) + default: + Errorf(dwarfp[i], "unknown DWARF section %v", curType) + } - if i < len(dwarfp) { - sect = addsection(&Segdwarf, ".debug_info", 04) sect.Align = 1 datsize = Rnd(datsize, int64(sect.Align)) sect.Vaddr = uint64(datsize) - for _, s := range dwarfp[i:] { - // Syms can (incorrectly) appear twice on the list. Ignore repeats. - // See golang.org/issue/21566. - if s.Type == SRODATA { - continue - } - if s.Type != SDWARFINFO { + for ; i < len(dwarfp); i++ { + s := dwarfp[i] + if s.Type != curType { break } s.Sect = sect - s.Type = SRODATA + s.Type = sym.SRODATA s.Value = int64(uint64(datsize) - sect.Vaddr) - s.Attr |= AttrLocal + s.Attr |= sym.AttrLocal datsize += s.Size } sect.Length = uint64(datsize) - sect.Vaddr - checkdatsize(ctxt, datsize, SDWARFINFO) + checkdatsize(ctxt, datsize, curType) } /* number the sections */ @@ -1911,11 +1629,11 @@ func (ctxt *Link) dodata() { } } -func dodataSect(ctxt *Link, symn SymKind, syms []*Symbol) (result []*Symbol, maxAlign int32) { - if Headtype == objabi.Hdarwin { +func dodataSect(ctxt *Link, symn sym.SymKind, syms []*sym.Symbol) (result []*sym.Symbol, maxAlign int32) { + if ctxt.HeadType == objabi.Hdarwin { // Some symbols may no longer belong in syms // due to movement in machosymorder. - newSyms := make([]*Symbol, 0, len(syms)) + newSyms := make([]*sym.Symbol, 0, len(syms)) for _, s := range syms { if s.Type == symn { newSyms = append(newSyms, s) @@ -1924,13 +1642,13 @@ func dodataSect(ctxt *Link, symn SymKind, syms []*Symbol) (result []*Symbol, max syms = newSyms } - var head, tail *Symbol + var head, tail *sym.Symbol symsSort := make([]dataSortKey, 0, len(syms)) for _, s := range syms { if s.Attr.OnList() { log.Fatalf("symbol %s listed multiple times", s.Name) } - s.Attr |= AttrOnList + s.Attr |= sym.AttrOnList switch { case s.Size < int64(len(s.P)): Errorf(s, "initialize bounds (%d < %d)", s.Size, len(s.P)) @@ -1943,7 +1661,7 @@ func dodataSect(ctxt *Link, symn SymKind, syms []*Symbol) (result []*Symbol, max // If the usually-special section-marker symbols are being laid // out as regular symbols, put them either at the beginning or // end of their section. - if ctxt.DynlinkingGo() && Headtype == objabi.Hdarwin { + if ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin { switch s.Name { case "runtime.text", "runtime.bss", "runtime.data", "runtime.types": head = s @@ -1961,9 +1679,9 @@ func dodataSect(ctxt *Link, symn SymKind, syms []*Symbol) (result []*Symbol, max } switch s.Type { - case SELFGOT: + case sym.SELFGOT: // For ppc64, we want to interleave the .got and .toc sections - // from input files. Both are type SELFGOT, so in that case + // from input files. Both are type sym.SELFGOT, so in that case // we skip size comparison and fall through to the name // comparison (conveniently, .got sorts before .toc). key.size = 0 @@ -1990,7 +1708,7 @@ func dodataSect(ctxt *Link, symn SymKind, syms []*Symbol) (result []*Symbol, max syms[len(syms)-1] = tail } - if Iself && symn == SELFROSECT { + if ctxt.IsELF && symn == sym.SELFROSECT { // Make .rela and .rela.plt contiguous, the ELF ABI requires this // and Solaris actually cares. reli, plti := -1, -1 @@ -2018,8 +1736,8 @@ func dodataSect(ctxt *Link, symn SymKind, syms []*Symbol) (result []*Symbol, max // Setting the alignment explicitly prevents // symalign from basing it on the size and // getting it wrong. - rel.Align = int32(SysArch.RegSize) - plt.Align = int32(SysArch.RegSize) + rel.Align = int32(ctxt.Arch.RegSize) + plt.Align = int32(ctxt.Arch.RegSize) } } @@ -2032,27 +1750,27 @@ func dodataSect(ctxt *Link, symn SymKind, syms []*Symbol) (result []*Symbol, max // at the very beginning of the text segment. // This ``header'' is read by cmd/go. func (ctxt *Link) textbuildid() { - if Iself || Buildmode == BuildmodePlugin || *flagBuildid == "" { + if ctxt.IsELF || ctxt.BuildMode == BuildModePlugin || *flagBuildid == "" { return } - sym := ctxt.Syms.Lookup("go.buildid", 0) - sym.Attr |= AttrReachable + s := ctxt.Syms.Lookup("go.buildid", 0) + s.Attr |= sym.AttrReachable // The \xff is invalid UTF-8, meant to make it less likely // to find one of these accidentally. data := "\xff Go build ID: " + strconv.Quote(*flagBuildid) + "\n \xff" - sym.Type = STEXT - sym.P = []byte(data) - sym.Size = int64(len(sym.P)) + s.Type = sym.STEXT + s.P = []byte(data) + s.Size = int64(len(s.P)) ctxt.Textp = append(ctxt.Textp, nil) copy(ctxt.Textp[1:], ctxt.Textp) - ctxt.Textp[0] = sym + ctxt.Textp[0] = s } // assign addresses to text func (ctxt *Link) textaddress() { - addsection(&Segtext, ".text", 05) + addsection(ctxt.Arch, &Segtext, ".text", 05) // Assign PCs in text segment. // Could parallelize, by assigning to text @@ -2064,7 +1782,7 @@ func (ctxt *Link) textaddress() { text := ctxt.Syms.Lookup("runtime.text", 0) text.Sect = sect - if ctxt.DynlinkingGo() && Headtype == objabi.Hdarwin { + if ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin { etext := ctxt.Syms.Lookup("runtime.etext", 0) etext.Sect = sect @@ -2077,10 +1795,10 @@ func (ctxt *Link) textaddress() { n := 1 sect.Vaddr = va ntramps := 0 - for _, sym := range ctxt.Textp { - sect, n, va = assignAddress(ctxt, sect, n, sym, va, false) + for _, s := range ctxt.Textp { + sect, n, va = assignAddress(ctxt, sect, n, s, va, false) - trampoline(ctxt, sym) // resolve jumps, may add trampolines if jump too far + trampoline(ctxt, s) // resolve jumps, may add trampolines if jump too far // lay down trampolines after each function for ; ntramps < len(ctxt.tramps); ntramps++ { @@ -2094,13 +1812,13 @@ func (ctxt *Link) textaddress() { // merge tramps into Textp, keeping Textp in address order if ntramps != 0 { - newtextp := make([]*Symbol, 0, len(ctxt.Textp)+ntramps) + newtextp := make([]*sym.Symbol, 0, len(ctxt.Textp)+ntramps) i := 0 - for _, sym := range ctxt.Textp { - for ; i < ntramps && ctxt.tramps[i].Value < sym.Value; i++ { + for _, s := range ctxt.Textp { + for ; i < ntramps && ctxt.tramps[i].Value < s.Value; i++ { newtextp = append(newtextp, ctxt.tramps[i]) } - newtextp = append(newtextp, sym) + newtextp = append(newtextp, s) } newtextp = append(newtextp, ctxt.tramps[i:ntramps]...) @@ -2111,24 +1829,24 @@ func (ctxt *Link) textaddress() { // assigns address for a text symbol, returns (possibly new) section, its number, and the address // Note: once we have trampoline insertion support for external linking, this function // will not need to create new text sections, and so no need to return sect and n. -func assignAddress(ctxt *Link, sect *Section, n int, sym *Symbol, va uint64, isTramp bool) (*Section, int, uint64) { - sym.Sect = sect - if sym.Type&SSUB != 0 { +func assignAddress(ctxt *Link, sect *sym.Section, n int, s *sym.Symbol, va uint64, isTramp bool) (*sym.Section, int, uint64) { + s.Sect = sect + if s.Attr.SubSymbol() { return sect, n, va } - if sym.Align != 0 { - va = uint64(Rnd(int64(va), int64(sym.Align))) + if s.Align != 0 { + va = uint64(Rnd(int64(va), int64(s.Align))) } else { va = uint64(Rnd(int64(va), int64(Funcalign))) } - sym.Value = 0 - for sub := sym; sub != nil; sub = sub.Sub { + s.Value = 0 + for sub := s; sub != nil; sub = sub.Sub { sub.Value += int64(va) } funcsize := uint64(MINFUNC) // spacing required for findfunctab - if sym.Size > MINFUNC { - funcsize = uint64(sym.Size) + if s.Size > MINFUNC { + funcsize = uint64(s.Size) } // On ppc64x a text section should not be larger than 2^26 bytes due to the size of @@ -2140,15 +1858,15 @@ func assignAddress(ctxt *Link, sect *Section, n int, sym *Symbol, va uint64, isT // Only break at outermost syms. - if SysArch.InFamily(sys.PPC64) && sym.Outer == nil && Iself && Linkmode == LinkExternal && va-sect.Vaddr+funcsize+maxSizeTrampolinesPPC64(sym, isTramp) > 0x1c00000 { + if ctxt.Arch.InFamily(sys.PPC64) && s.Outer == nil && ctxt.IsELF && ctxt.LinkMode == LinkExternal && va-sect.Vaddr+funcsize+maxSizeTrampolinesPPC64(s, isTramp) > 0x1c00000 { // Set the length for the previous text section sect.Length = va - sect.Vaddr // Create new section, set the starting Vaddr - sect = addsection(&Segtext, ".text", 05) + sect = addsection(ctxt.Arch, &Segtext, ".text", 05) sect.Vaddr = va - sym.Sect = sect + s.Sect = sect // Create a symbol for the start of the secondary text sections ctxt.Syms.Lookup(fmt.Sprintf("runtime.text.%d", n), 0).Sect = sect @@ -2173,7 +1891,7 @@ func (ctxt *Link) address() { Segtext.Length = va - uint64(*FlagTextAddr) Segtext.Filelen = Segtext.Length - if Headtype == objabi.Hnacl { + if ctxt.HeadType == objabi.Hnacl { va += 32 // room for the "halt sled" } @@ -2230,23 +1948,22 @@ func (ctxt *Link) address() { Segdata.Vaddr = va Segdata.Fileoff = va - Segtext.Vaddr + Segtext.Fileoff Segdata.Filelen = 0 - if Headtype == objabi.Hwindows { + if ctxt.HeadType == objabi.Hwindows { Segdata.Fileoff = Segtext.Fileoff + uint64(Rnd(int64(Segtext.Length), PEFILEALIGN)) } - if Headtype == objabi.Hplan9 { + if ctxt.HeadType == objabi.Hplan9 { Segdata.Fileoff = Segtext.Fileoff + Segtext.Filelen } - var data *Section - var noptr *Section - var bss *Section - var noptrbss *Section - var vlen int64 + var data *sym.Section + var noptr *sym.Section + var bss *sym.Section + var noptrbss *sym.Section for i, s := range Segdata.Sections { - if Iself && s.Name == ".tbss" { + if ctxt.IsELF && s.Name == ".tbss" { continue } - vlen = int64(s.Length) - if i+1 < len(Segdata.Sections) && !(Iself && Segdata.Sections[i+1].Name == ".tbss") { + vlen := int64(s.Length) + if i+1 < len(Segdata.Sections) && !(ctxt.IsELF && Segdata.Sections[i+1].Name == ".tbss") { vlen = int64(Segdata.Sections[i+1].Vaddr - s.Vaddr) } s.Vaddr = va @@ -2273,17 +1990,17 @@ func (ctxt *Link) address() { Segdwarf.Vaddr = va Segdwarf.Fileoff = Segdata.Fileoff + uint64(Rnd(int64(Segdata.Filelen), int64(*FlagRound))) Segdwarf.Filelen = 0 - if Headtype == objabi.Hwindows { + if ctxt.HeadType == objabi.Hwindows { Segdwarf.Fileoff = Segdata.Fileoff + uint64(Rnd(int64(Segdata.Filelen), int64(PEFILEALIGN))) } for i, s := range Segdwarf.Sections { - vlen = int64(s.Length) + vlen := int64(s.Length) if i+1 < len(Segdwarf.Sections) { vlen = int64(Segdwarf.Sections[i+1].Vaddr - s.Vaddr) } s.Vaddr = va va += uint64(vlen) - if Headtype == objabi.Hwindows { + if ctxt.HeadType == objabi.Hwindows { va = uint64(Rnd(int64(va), PEFILEALIGN)) } Segdwarf.Length = va - Segdwarf.Vaddr @@ -2316,75 +2033,74 @@ func (ctxt *Link) address() { } } - for _, sym := range dwarfp { - if sym.Sect != nil { - sym.Value += int64(sym.Sect.Vaddr) + for _, s := range dwarfp { + if s.Sect != nil { + s.Value += int64(s.Sect.Vaddr) } - for sub := sym.Sub; sub != nil; sub = sub.Sub { - sub.Value += sym.Value + for sub := s.Sub; sub != nil; sub = sub.Sub { + sub.Value += s.Value } } - if Buildmode == BuildmodeShared { + if ctxt.BuildMode == BuildModeShared { s := ctxt.Syms.Lookup("go.link.abihashbytes", 0) sectSym := ctxt.Syms.Lookup(".note.go.abihash", 0) s.Sect = sectSym.Sect s.Value = int64(sectSym.Sect.Vaddr + 16) } - ctxt.xdefine("runtime.text", STEXT, int64(text.Vaddr)) - ctxt.xdefine("runtime.etext", STEXT, int64(lasttext.Vaddr+lasttext.Length)) + ctxt.xdefine("runtime.text", sym.STEXT, int64(text.Vaddr)) + ctxt.xdefine("runtime.etext", sym.STEXT, int64(lasttext.Vaddr+lasttext.Length)) // If there are multiple text sections, create runtime.text.n for // their section Vaddr, using n for index n := 1 for _, sect := range Segtext.Sections[1:] { - if sect.Name == ".text" { - symname := fmt.Sprintf("runtime.text.%d", n) - ctxt.xdefine(symname, STEXT, int64(sect.Vaddr)) - n++ - } else { + if sect.Name != ".text" { break } + symname := fmt.Sprintf("runtime.text.%d", n) + ctxt.xdefine(symname, sym.STEXT, int64(sect.Vaddr)) + n++ } - ctxt.xdefine("runtime.rodata", SRODATA, int64(rodata.Vaddr)) - ctxt.xdefine("runtime.erodata", SRODATA, int64(rodata.Vaddr+rodata.Length)) - ctxt.xdefine("runtime.types", SRODATA, int64(types.Vaddr)) - ctxt.xdefine("runtime.etypes", SRODATA, int64(types.Vaddr+types.Length)) - ctxt.xdefine("runtime.itablink", SRODATA, int64(itablink.Vaddr)) - ctxt.xdefine("runtime.eitablink", SRODATA, int64(itablink.Vaddr+itablink.Length)) + ctxt.xdefine("runtime.rodata", sym.SRODATA, int64(rodata.Vaddr)) + ctxt.xdefine("runtime.erodata", sym.SRODATA, int64(rodata.Vaddr+rodata.Length)) + ctxt.xdefine("runtime.types", sym.SRODATA, int64(types.Vaddr)) + ctxt.xdefine("runtime.etypes", sym.SRODATA, int64(types.Vaddr+types.Length)) + ctxt.xdefine("runtime.itablink", sym.SRODATA, int64(itablink.Vaddr)) + ctxt.xdefine("runtime.eitablink", sym.SRODATA, int64(itablink.Vaddr+itablink.Length)) - sym := ctxt.Syms.Lookup("runtime.gcdata", 0) - sym.Attr |= AttrLocal - ctxt.xdefine("runtime.egcdata", SRODATA, Symaddr(sym)+sym.Size) - ctxt.Syms.Lookup("runtime.egcdata", 0).Sect = sym.Sect + s := ctxt.Syms.Lookup("runtime.gcdata", 0) + s.Attr |= sym.AttrLocal + ctxt.xdefine("runtime.egcdata", sym.SRODATA, Symaddr(s)+s.Size) + ctxt.Syms.Lookup("runtime.egcdata", 0).Sect = s.Sect - sym = ctxt.Syms.Lookup("runtime.gcbss", 0) - sym.Attr |= AttrLocal - ctxt.xdefine("runtime.egcbss", SRODATA, Symaddr(sym)+sym.Size) - ctxt.Syms.Lookup("runtime.egcbss", 0).Sect = sym.Sect + s = ctxt.Syms.Lookup("runtime.gcbss", 0) + s.Attr |= sym.AttrLocal + ctxt.xdefine("runtime.egcbss", sym.SRODATA, Symaddr(s)+s.Size) + ctxt.Syms.Lookup("runtime.egcbss", 0).Sect = s.Sect - ctxt.xdefine("runtime.symtab", SRODATA, int64(symtab.Vaddr)) - ctxt.xdefine("runtime.esymtab", SRODATA, int64(symtab.Vaddr+symtab.Length)) - ctxt.xdefine("runtime.pclntab", SRODATA, int64(pclntab.Vaddr)) - ctxt.xdefine("runtime.epclntab", SRODATA, int64(pclntab.Vaddr+pclntab.Length)) - ctxt.xdefine("runtime.noptrdata", SNOPTRDATA, int64(noptr.Vaddr)) - ctxt.xdefine("runtime.enoptrdata", SNOPTRDATA, int64(noptr.Vaddr+noptr.Length)) - ctxt.xdefine("runtime.bss", SBSS, int64(bss.Vaddr)) - ctxt.xdefine("runtime.ebss", SBSS, int64(bss.Vaddr+bss.Length)) - ctxt.xdefine("runtime.data", SDATA, int64(data.Vaddr)) - ctxt.xdefine("runtime.edata", SDATA, int64(data.Vaddr+data.Length)) - ctxt.xdefine("runtime.noptrbss", SNOPTRBSS, int64(noptrbss.Vaddr)) - ctxt.xdefine("runtime.enoptrbss", SNOPTRBSS, int64(noptrbss.Vaddr+noptrbss.Length)) - ctxt.xdefine("runtime.end", SBSS, int64(Segdata.Vaddr+Segdata.Length)) + ctxt.xdefine("runtime.symtab", sym.SRODATA, int64(symtab.Vaddr)) + ctxt.xdefine("runtime.esymtab", sym.SRODATA, int64(symtab.Vaddr+symtab.Length)) + ctxt.xdefine("runtime.pclntab", sym.SRODATA, int64(pclntab.Vaddr)) + ctxt.xdefine("runtime.epclntab", sym.SRODATA, int64(pclntab.Vaddr+pclntab.Length)) + ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, int64(noptr.Vaddr)) + ctxt.xdefine("runtime.enoptrdata", sym.SNOPTRDATA, int64(noptr.Vaddr+noptr.Length)) + ctxt.xdefine("runtime.bss", sym.SBSS, int64(bss.Vaddr)) + ctxt.xdefine("runtime.ebss", sym.SBSS, int64(bss.Vaddr+bss.Length)) + ctxt.xdefine("runtime.data", sym.SDATA, int64(data.Vaddr)) + ctxt.xdefine("runtime.edata", sym.SDATA, int64(data.Vaddr+data.Length)) + ctxt.xdefine("runtime.noptrbss", sym.SNOPTRBSS, int64(noptrbss.Vaddr)) + ctxt.xdefine("runtime.enoptrbss", sym.SNOPTRBSS, int64(noptrbss.Vaddr+noptrbss.Length)) + ctxt.xdefine("runtime.end", sym.SBSS, int64(Segdata.Vaddr+Segdata.Length)) } // add a trampoline with symbol s (to be laid down after the current function) -func (ctxt *Link) AddTramp(s *Symbol) { - s.Type = STEXT - s.Attr |= AttrReachable - s.Attr |= AttrOnList +func (ctxt *Link) AddTramp(s *sym.Symbol) { + s.Type = sym.STEXT + s.Attr |= sym.AttrReachable + s.Attr |= sym.AttrOnList ctxt.tramps = append(ctxt.tramps, s) if *FlagDebugTramp > 0 && ctxt.Debugvlog > 0 { ctxt.Logf("trampoline %s inserted\n", s) diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index dbb96fb77fa..c5c48e5c854 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -7,6 +7,7 @@ package ld import ( "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "fmt" "strings" "unicode" @@ -107,20 +108,27 @@ func deadcode(ctxt *Link) { } } - if Buildmode != BuildmodeShared { + if ctxt.BuildMode != BuildModeShared { // Keep a itablink if the symbol it points at is being kept. - // (When BuildmodeShared, always keep itablinks.) + // (When BuildModeShared, always keep itablinks.) for _, s := range ctxt.Syms.Allsym { if strings.HasPrefix(s.Name, "go.itablink.") { - s.Attr.Set(AttrReachable, len(s.R) == 1 && s.R[0].Sym.Attr.Reachable()) + s.Attr.Set(sym.AttrReachable, len(s.R) == 1 && s.R[0].Sym.Attr.Reachable()) } } } + for _, lib := range ctxt.Library { + lib.Textp = lib.Textp[:0] + } + // Remove dead text but keep file information (z symbols). - textp := make([]*Symbol, 0, len(ctxt.Textp)) + textp := make([]*sym.Symbol, 0, len(ctxt.Textp)) for _, s := range ctxt.Textp { if s.Attr.Reachable() { + if s.Lib != nil { + s.Lib.Textp = append(s.Lib.Textp, s) + } textp = append(textp, s) } } @@ -132,11 +140,11 @@ func deadcode(ctxt *Link) { // the reflect.method struct: mtyp, ifn, and tfn. type methodref struct { m methodsig - src *Symbol // receiver type symbol - r [3]*Reloc // R_METHODOFF relocations to fields of runtime.method + src *sym.Symbol // receiver type symbol + r [3]*sym.Reloc // R_METHODOFF relocations to fields of runtime.method } -func (m methodref) ifn() *Symbol { return m.r[1].Sym } +func (m methodref) ifn() *sym.Symbol { return m.r[1].Sym } func (m methodref) isExported() bool { for _, r := range m.m { @@ -148,13 +156,13 @@ func (m methodref) isExported() bool { // deadcodepass holds state for the deadcode flood fill. type deadcodepass struct { ctxt *Link - markQueue []*Symbol // symbols to flood fill in next pass + markQueue []*sym.Symbol // symbols to flood fill in next pass ifaceMethod map[methodsig]bool // methods declared in reached interfaces markableMethods []methodref // methods of reached types reflectMethod bool } -func (d *deadcodepass) cleanupReloc(r *Reloc) { +func (d *deadcodepass) cleanupReloc(r *sym.Reloc) { if r.Sym.Attr.Reachable() { r.Type = objabi.R_ADDROFF } else { @@ -167,7 +175,7 @@ func (d *deadcodepass) cleanupReloc(r *Reloc) { } // mark appends a symbol to the mark queue for flood filling. -func (d *deadcodepass) mark(s, parent *Symbol) { +func (d *deadcodepass) mark(s, parent *sym.Symbol) { if s == nil || s.Attr.Reachable() { return } @@ -181,7 +189,7 @@ func (d *deadcodepass) mark(s, parent *Symbol) { } fmt.Printf("%s -> %s\n", p, s.Name) } - s.Attr |= AttrReachable + s.Attr |= sym.AttrReachable s.Reachparent = parent d.markQueue = append(d.markQueue, s) } @@ -199,34 +207,45 @@ func (d *deadcodepass) markMethod(m methodref) { func (d *deadcodepass) init() { var names []string - if SysArch.Family == sys.ARM { + if d.ctxt.Arch.Family == sys.ARM { // mark some functions that are only referenced after linker code editing names = append(names, "runtime.read_tls_fallback") } - if Buildmode == BuildmodeShared { + if d.ctxt.BuildMode == BuildModeShared { // Mark all symbols defined in this library as reachable when // building a shared library. for _, s := range d.ctxt.Syms.Allsym { - if s.Type != 0 && s.Type != SDYNIMPORT { + if s.Type != 0 && s.Type != sym.SDYNIMPORT { d.mark(s, nil) } } } else { // In a normal binary, start at main.main and the init // functions and mark what is reachable from there. - names = append(names, *flagEntrySymbol) - if *FlagLinkshared && (Buildmode == BuildmodeExe || Buildmode == BuildmodePIE) { - names = append(names, "main.main", "main.init") - } else if Buildmode == BuildmodePlugin { - names = append(names, *flagPluginPath+".init", *flagPluginPath+".main", "go.plugin.tabs") - // We don't keep the go.plugin.exports symbol, - // but we do keep the symbols it refers to. - exports := d.ctxt.Syms.ROLookup("go.plugin.exports", 0) - if exports != nil { - for _, r := range exports.R { - d.mark(r.Sym, nil) + if d.ctxt.linkShared && (d.ctxt.BuildMode == BuildModeExe || d.ctxt.BuildMode == BuildModePIE) { + names = append(names, "main.main", "main.init") + } else { + // The external linker refers main symbol directly. + if d.ctxt.LinkMode == LinkExternal && (d.ctxt.BuildMode == BuildModeExe || d.ctxt.BuildMode == BuildModePIE) { + if d.ctxt.HeadType == objabi.Hwindows && d.ctxt.Arch.Family == sys.I386 { + *flagEntrySymbol = "_main" + } else { + *flagEntrySymbol = "main" + } + } + names = append(names, *flagEntrySymbol) + if d.ctxt.BuildMode == BuildModePlugin { + names = append(names, objabi.PathToPrefix(*flagPluginPath)+".init", objabi.PathToPrefix(*flagPluginPath)+".main", "go.plugin.tabs") + + // We don't keep the go.plugin.exports symbol, + // but we do keep the symbols it refers to. + exports := d.ctxt.Syms.ROLookup("go.plugin.exports", 0) + if exports != nil { + for _, r := range exports.R { + d.mark(r.Sym, nil) + } } } } @@ -240,13 +259,13 @@ func (d *deadcodepass) init() { } } -// flood flood fills symbols reachable from the markQueue symbols. +// flood fills symbols reachable from the markQueue symbols. // As it goes, it collects methodref and interface method declarations. func (d *deadcodepass) flood() { for len(d.markQueue) > 0 { s := d.markQueue[0] d.markQueue = d.markQueue[1:] - if s.Type == STEXT { + if s.Type == sym.STEXT { if d.ctxt.Debugvlog > 1 { d.ctxt.Logf("marktext %s\n", s.Name) } @@ -264,7 +283,7 @@ func (d *deadcodepass) flood() { // later will give a better error than deadcode. continue } - if decodetypeKind(s)&kindMask == kindInterface { + if decodetypeKind(d.ctxt.Arch, s)&kindMask == kindInterface { for _, sig := range decodeIfaceMethods(d.ctxt.Arch, s) { if d.ctxt.Debugvlog > 1 { d.ctxt.Logf("reached iface method: %s\n", sig) diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index eba8ee30824..3afb38948f5 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -8,6 +8,7 @@ import ( "bytes" "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "debug/elf" "fmt" ) @@ -28,7 +29,7 @@ const ( tflagExtraStar = 1 << 1 ) -func decodeReloc(s *Symbol, off int32) *Reloc { +func decodeReloc(s *sym.Symbol, off int32) *sym.Reloc { for i := range s.R { if s.R[i].Off == off { return &s.R[i] @@ -37,7 +38,7 @@ func decodeReloc(s *Symbol, off int32) *Reloc { return nil } -func decodeRelocSym(s *Symbol, off int32) *Symbol { +func decodeRelocSym(s *sym.Symbol, off int32) *sym.Symbol { r := decodeReloc(s, off) if r == nil { return nil @@ -59,33 +60,33 @@ func decodeInuxi(arch *sys.Arch, p []byte, sz int) uint64 { } } -func commonsize() int { return 4*SysArch.PtrSize + 8 + 8 } // runtime._type -func structfieldSize() int { return 3 * SysArch.PtrSize } // runtime.structfield -func uncommonSize() int { return 4 + 2 + 2 + 4 + 4 } // runtime.uncommontype +func commonsize(arch *sys.Arch) int { return 4*arch.PtrSize + 8 + 8 } // runtime._type +func structfieldSize(arch *sys.Arch) int { return 3 * arch.PtrSize } // runtime.structfield +func uncommonSize() int { return 4 + 2 + 2 + 4 + 4 } // runtime.uncommontype // Type.commonType.kind -func decodetypeKind(s *Symbol) uint8 { - return s.P[2*SysArch.PtrSize+7] & objabi.KindMask // 0x13 / 0x1f +func decodetypeKind(arch *sys.Arch, s *sym.Symbol) uint8 { + return s.P[2*arch.PtrSize+7] & objabi.KindMask // 0x13 / 0x1f } // Type.commonType.kind -func decodetypeUsegcprog(s *Symbol) uint8 { - return s.P[2*SysArch.PtrSize+7] & objabi.KindGCProg // 0x13 / 0x1f +func decodetypeUsegcprog(arch *sys.Arch, s *sym.Symbol) uint8 { + return s.P[2*arch.PtrSize+7] & objabi.KindGCProg // 0x13 / 0x1f } // Type.commonType.size -func decodetypeSize(arch *sys.Arch, s *Symbol) int64 { - return int64(decodeInuxi(arch, s.P, SysArch.PtrSize)) // 0x8 / 0x10 +func decodetypeSize(arch *sys.Arch, s *sym.Symbol) int64 { + return int64(decodeInuxi(arch, s.P, arch.PtrSize)) // 0x8 / 0x10 } // Type.commonType.ptrdata -func decodetypePtrdata(arch *sys.Arch, s *Symbol) int64 { - return int64(decodeInuxi(arch, s.P[SysArch.PtrSize:], SysArch.PtrSize)) // 0x8 / 0x10 +func decodetypePtrdata(arch *sys.Arch, s *sym.Symbol) int64 { + return int64(decodeInuxi(arch, s.P[arch.PtrSize:], arch.PtrSize)) // 0x8 / 0x10 } // Type.commonType.tflag -func decodetypeHasUncommon(s *Symbol) bool { - return s.P[2*SysArch.PtrSize+4]&tflagUncommon != 0 +func decodetypeHasUncommon(arch *sys.Arch, s *sym.Symbol) bool { + return s.P[2*arch.PtrSize+4]&tflagUncommon != 0 } // Find the elf.Section of a given shared library that contains a given address. @@ -103,8 +104,8 @@ func findShlibSection(ctxt *Link, path string, addr uint64) *elf.Section { } // Type.commonType.gc -func decodetypeGcprog(ctxt *Link, s *Symbol) []byte { - if s.Type == SDYNIMPORT { +func decodetypeGcprog(ctxt *Link, s *sym.Symbol) []byte { + if s.Type == sym.SDYNIMPORT { addr := decodetypeGcprogShlib(ctxt, s) sect := findShlibSection(ctxt, s.File, addr) if sect != nil { @@ -119,11 +120,11 @@ func decodetypeGcprog(ctxt *Link, s *Symbol) []byte { Exitf("cannot find gcprog for %s", s.Name) return nil } - return decodeRelocSym(s, 2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize)).P + return decodeRelocSym(s, 2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize)).P } -func decodetypeGcprogShlib(ctxt *Link, s *Symbol) uint64 { - if SysArch.Family == sys.ARM64 { +func decodetypeGcprogShlib(ctxt *Link, s *sym.Symbol) uint64 { + if ctxt.Arch.Family == sys.ARM64 { for _, shlib := range ctxt.Shlibs { if shlib.Path == s.File { return shlib.gcdataAddresses[s] @@ -131,108 +132,108 @@ func decodetypeGcprogShlib(ctxt *Link, s *Symbol) uint64 { } return 0 } - return decodeInuxi(ctxt.Arch, s.P[2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize):], SysArch.PtrSize) + return decodeInuxi(ctxt.Arch, s.P[2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize):], ctxt.Arch.PtrSize) } -func decodetypeGcmask(ctxt *Link, s *Symbol) []byte { - if s.Type == SDYNIMPORT { +func decodetypeGcmask(ctxt *Link, s *sym.Symbol) []byte { + if s.Type == sym.SDYNIMPORT { addr := decodetypeGcprogShlib(ctxt, s) ptrdata := decodetypePtrdata(ctxt.Arch, s) sect := findShlibSection(ctxt, s.File, addr) if sect != nil { - r := make([]byte, ptrdata/int64(SysArch.PtrSize)) + r := make([]byte, ptrdata/int64(ctxt.Arch.PtrSize)) sect.ReadAt(r, int64(addr-sect.Addr)) return r } Exitf("cannot find gcmask for %s", s.Name) return nil } - mask := decodeRelocSym(s, 2*int32(SysArch.PtrSize)+8+1*int32(SysArch.PtrSize)) + mask := decodeRelocSym(s, 2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize)) return mask.P } // Type.ArrayType.elem and Type.SliceType.Elem -func decodetypeArrayElem(s *Symbol) *Symbol { - return decodeRelocSym(s, int32(commonsize())) // 0x1c / 0x30 +func decodetypeArrayElem(arch *sys.Arch, s *sym.Symbol) *sym.Symbol { + return decodeRelocSym(s, int32(commonsize(arch))) // 0x1c / 0x30 } -func decodetypeArrayLen(arch *sys.Arch, s *Symbol) int64 { - return int64(decodeInuxi(arch, s.P[commonsize()+2*SysArch.PtrSize:], SysArch.PtrSize)) +func decodetypeArrayLen(arch *sys.Arch, s *sym.Symbol) int64 { + return int64(decodeInuxi(arch, s.P[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize)) } // Type.PtrType.elem -func decodetypePtrElem(s *Symbol) *Symbol { - return decodeRelocSym(s, int32(commonsize())) // 0x1c / 0x30 +func decodetypePtrElem(arch *sys.Arch, s *sym.Symbol) *sym.Symbol { + return decodeRelocSym(s, int32(commonsize(arch))) // 0x1c / 0x30 } // Type.MapType.key, elem -func decodetypeMapKey(s *Symbol) *Symbol { - return decodeRelocSym(s, int32(commonsize())) // 0x1c / 0x30 +func decodetypeMapKey(arch *sys.Arch, s *sym.Symbol) *sym.Symbol { + return decodeRelocSym(s, int32(commonsize(arch))) // 0x1c / 0x30 } -func decodetypeMapValue(s *Symbol) *Symbol { - return decodeRelocSym(s, int32(commonsize())+int32(SysArch.PtrSize)) // 0x20 / 0x38 +func decodetypeMapValue(arch *sys.Arch, s *sym.Symbol) *sym.Symbol { + return decodeRelocSym(s, int32(commonsize(arch))+int32(arch.PtrSize)) // 0x20 / 0x38 } // Type.ChanType.elem -func decodetypeChanElem(s *Symbol) *Symbol { - return decodeRelocSym(s, int32(commonsize())) // 0x1c / 0x30 +func decodetypeChanElem(arch *sys.Arch, s *sym.Symbol) *sym.Symbol { + return decodeRelocSym(s, int32(commonsize(arch))) // 0x1c / 0x30 } // Type.FuncType.dotdotdot -func decodetypeFuncDotdotdot(arch *sys.Arch, s *Symbol) bool { - return uint16(decodeInuxi(arch, s.P[commonsize()+2:], 2))&(1<<15) != 0 +func decodetypeFuncDotdotdot(arch *sys.Arch, s *sym.Symbol) bool { + return uint16(decodeInuxi(arch, s.P[commonsize(arch)+2:], 2))&(1<<15) != 0 } // Type.FuncType.inCount -func decodetypeFuncInCount(arch *sys.Arch, s *Symbol) int { - return int(decodeInuxi(arch, s.P[commonsize():], 2)) +func decodetypeFuncInCount(arch *sys.Arch, s *sym.Symbol) int { + return int(decodeInuxi(arch, s.P[commonsize(arch):], 2)) } -func decodetypeFuncOutCount(arch *sys.Arch, s *Symbol) int { - return int(uint16(decodeInuxi(arch, s.P[commonsize()+2:], 2)) & (1<<15 - 1)) +func decodetypeFuncOutCount(arch *sys.Arch, s *sym.Symbol) int { + return int(uint16(decodeInuxi(arch, s.P[commonsize(arch)+2:], 2)) & (1<<15 - 1)) } -func decodetypeFuncInType(s *Symbol, i int) *Symbol { - uadd := commonsize() + 4 - if SysArch.PtrSize == 8 { +func decodetypeFuncInType(arch *sys.Arch, s *sym.Symbol, i int) *sym.Symbol { + uadd := commonsize(arch) + 4 + if arch.PtrSize == 8 { uadd += 4 } - if decodetypeHasUncommon(s) { + if decodetypeHasUncommon(arch, s) { uadd += uncommonSize() } - return decodeRelocSym(s, int32(uadd+i*SysArch.PtrSize)) + return decodeRelocSym(s, int32(uadd+i*arch.PtrSize)) } -func decodetypeFuncOutType(arch *sys.Arch, s *Symbol, i int) *Symbol { - return decodetypeFuncInType(s, i+decodetypeFuncInCount(arch, s)) +func decodetypeFuncOutType(arch *sys.Arch, s *sym.Symbol, i int) *sym.Symbol { + return decodetypeFuncInType(arch, s, i+decodetypeFuncInCount(arch, s)) } // Type.StructType.fields.Slice::length -func decodetypeStructFieldCount(arch *sys.Arch, s *Symbol) int { - return int(decodeInuxi(arch, s.P[commonsize()+2*SysArch.PtrSize:], SysArch.PtrSize)) +func decodetypeStructFieldCount(arch *sys.Arch, s *sym.Symbol) int { + return int(decodeInuxi(arch, s.P[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize)) } -func decodetypeStructFieldArrayOff(s *Symbol, i int) int { - off := commonsize() + 4*SysArch.PtrSize - if decodetypeHasUncommon(s) { +func decodetypeStructFieldArrayOff(arch *sys.Arch, s *sym.Symbol, i int) int { + off := commonsize(arch) + 4*arch.PtrSize + if decodetypeHasUncommon(arch, s) { off += uncommonSize() } - off += i * structfieldSize() + off += i * structfieldSize(arch) return off } // decodetypeStr returns the contents of an rtype's str field (a nameOff). -func decodetypeStr(s *Symbol) string { - str := decodetypeName(s, 4*SysArch.PtrSize+8) - if s.P[2*SysArch.PtrSize+4]&tflagExtraStar != 0 { +func decodetypeStr(arch *sys.Arch, s *sym.Symbol) string { + str := decodetypeName(s, 4*arch.PtrSize+8) + if s.P[2*arch.PtrSize+4]&tflagExtraStar != 0 { return str[1:] } return str } // decodetypeName decodes the name from a reflect.name. -func decodetypeName(s *Symbol, off int) string { +func decodetypeName(s *sym.Symbol, off int) string { r := decodeReloc(s, int32(off)) if r == nil { return "" @@ -243,28 +244,28 @@ func decodetypeName(s *Symbol, off int) string { return string(data[3 : 3+namelen]) } -func decodetypeStructFieldName(s *Symbol, i int) string { - off := decodetypeStructFieldArrayOff(s, i) +func decodetypeStructFieldName(arch *sys.Arch, s *sym.Symbol, i int) string { + off := decodetypeStructFieldArrayOff(arch, s, i) return decodetypeName(s, off) } -func decodetypeStructFieldType(s *Symbol, i int) *Symbol { - off := decodetypeStructFieldArrayOff(s, i) - return decodeRelocSym(s, int32(off+SysArch.PtrSize)) +func decodetypeStructFieldType(arch *sys.Arch, s *sym.Symbol, i int) *sym.Symbol { + off := decodetypeStructFieldArrayOff(arch, s, i) + return decodeRelocSym(s, int32(off+arch.PtrSize)) } -func decodetypeStructFieldOffs(arch *sys.Arch, s *Symbol, i int) int64 { +func decodetypeStructFieldOffs(arch *sys.Arch, s *sym.Symbol, i int) int64 { return decodetypeStructFieldOffsAnon(arch, s, i) >> 1 } -func decodetypeStructFieldOffsAnon(arch *sys.Arch, s *Symbol, i int) int64 { - off := decodetypeStructFieldArrayOff(s, i) - return int64(decodeInuxi(arch, s.P[off+2*SysArch.PtrSize:], SysArch.PtrSize)) +func decodetypeStructFieldOffsAnon(arch *sys.Arch, s *sym.Symbol, i int) int64 { + off := decodetypeStructFieldArrayOff(arch, s, i) + return int64(decodeInuxi(arch, s.P[off+2*arch.PtrSize:], arch.PtrSize)) } // InterfaceType.methods.length -func decodetypeIfaceMethodCount(arch *sys.Arch, s *Symbol) int64 { - return int64(decodeInuxi(arch, s.P[commonsize()+2*SysArch.PtrSize:], SysArch.PtrSize)) +func decodetypeIfaceMethodCount(arch *sys.Arch, s *sym.Symbol) int64 { + return int64(decodeInuxi(arch, s.P[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize)) } // methodsig is a fully qualified typed method signature, like @@ -290,7 +291,7 @@ const ( // the function type. // // Conveniently this is the layout of both runtime.method and runtime.imethod. -func decodeMethodSig(arch *sys.Arch, s *Symbol, off, size, count int) []methodsig { +func decodeMethodSig(arch *sys.Arch, s *sym.Symbol, off, size, count int) []methodsig { var buf bytes.Buffer var methods []methodsig for i := 0; i < count; i++ { @@ -303,7 +304,7 @@ func decodeMethodSig(arch *sys.Arch, s *Symbol, off, size, count int) []methodsi if i > 0 { buf.WriteString(", ") } - buf.WriteString(decodetypeFuncInType(mtypSym, i).Name) + buf.WriteString(decodetypeFuncInType(arch, mtypSym, i).Name) } buf.WriteString(") (") outCount := decodetypeFuncOutCount(arch, mtypSym) @@ -322,11 +323,11 @@ func decodeMethodSig(arch *sys.Arch, s *Symbol, off, size, count int) []methodsi return methods } -func decodeIfaceMethods(arch *sys.Arch, s *Symbol) []methodsig { - if decodetypeKind(s)&kindMask != kindInterface { +func decodeIfaceMethods(arch *sys.Arch, s *sym.Symbol) []methodsig { + if decodetypeKind(arch, s)&kindMask != kindInterface { panic(fmt.Sprintf("symbol %q is not an interface", s.Name)) } - r := decodeReloc(s, int32(commonsize()+SysArch.PtrSize)) + r := decodeReloc(s, int32(commonsize(arch)+arch.PtrSize)) if r == nil { return nil } @@ -339,28 +340,28 @@ func decodeIfaceMethods(arch *sys.Arch, s *Symbol) []methodsig { return decodeMethodSig(arch, s, off, sizeofIMethod, numMethods) } -func decodetypeMethods(arch *sys.Arch, s *Symbol) []methodsig { - if !decodetypeHasUncommon(s) { +func decodetypeMethods(arch *sys.Arch, s *sym.Symbol) []methodsig { + if !decodetypeHasUncommon(arch, s) { panic(fmt.Sprintf("no methods on %q", s.Name)) } - off := commonsize() // reflect.rtype - switch decodetypeKind(s) & kindMask { + off := commonsize(arch) // reflect.rtype + switch decodetypeKind(arch, s) & kindMask { case kindStruct: // reflect.structType - off += 4 * SysArch.PtrSize + off += 4 * arch.PtrSize case kindPtr: // reflect.ptrType - off += SysArch.PtrSize + off += arch.PtrSize case kindFunc: // reflect.funcType - off += SysArch.PtrSize // 4 bytes, pointer aligned + off += arch.PtrSize // 4 bytes, pointer aligned case kindSlice: // reflect.sliceType - off += SysArch.PtrSize + off += arch.PtrSize case kindArray: // reflect.arrayType - off += 3 * SysArch.PtrSize + off += 3 * arch.PtrSize case kindChan: // reflect.chanType - off += 2 * SysArch.PtrSize + off += 2 * arch.PtrSize case kindMap: // reflect.mapType - off += 4*SysArch.PtrSize + 8 + off += 4*arch.PtrSize + 8 case kindInterface: // reflect.interfaceType - off += 3 * SysArch.PtrSize + off += 3 * arch.PtrSize default: // just Sizeof(rtype) } diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index ba8ace54c81..4642bdbe7a8 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -16,9 +16,10 @@ package ld import ( "cmd/internal/dwarf" "cmd/internal/objabi" + "cmd/internal/sys" + "cmd/link/internal/sym" "fmt" "log" - "os" "strings" ) @@ -27,66 +28,81 @@ type dwctxt struct { } func (c dwctxt) PtrSize() int { - return SysArch.PtrSize + return c.linkctxt.Arch.PtrSize } func (c dwctxt) AddInt(s dwarf.Sym, size int, i int64) { - ls := s.(*Symbol) - adduintxx(c.linkctxt, ls, uint64(i), size) + ls := s.(*sym.Symbol) + ls.AddUintXX(c.linkctxt.Arch, uint64(i), size) } func (c dwctxt) AddBytes(s dwarf.Sym, b []byte) { - ls := s.(*Symbol) - Addbytes(ls, b) + ls := s.(*sym.Symbol) + ls.AddBytes(b) } func (c dwctxt) AddString(s dwarf.Sym, v string) { - Addstring(s.(*Symbol), v) -} -func (c dwctxt) SymValue(s dwarf.Sym) int64 { - return s.(*Symbol).Value + Addstring(s.(*sym.Symbol), v) } func (c dwctxt) AddAddress(s dwarf.Sym, data interface{}, value int64) { if value != 0 { - value -= (data.(*Symbol)).Value + value -= (data.(*sym.Symbol)).Value } - Addaddrplus(c.linkctxt, s.(*Symbol), data.(*Symbol), value) + s.(*sym.Symbol).AddAddrPlus(c.linkctxt.Arch, data.(*sym.Symbol), value) +} + +func (c dwctxt) AddCURelativeAddress(s dwarf.Sym, data interface{}, value int64) { + if value != 0 { + value -= (data.(*sym.Symbol)).Value + } + s.(*sym.Symbol).AddCURelativeAddrPlus(c.linkctxt.Arch, data.(*sym.Symbol), value) } func (c dwctxt) AddSectionOffset(s dwarf.Sym, size int, t interface{}, ofs int64) { - ls := s.(*Symbol) + ls := s.(*sym.Symbol) switch size { default: Errorf(ls, "invalid size %d in adddwarfref\n", size) fallthrough - case SysArch.PtrSize: - Addaddr(c.linkctxt, ls, t.(*Symbol)) + case c.linkctxt.Arch.PtrSize: + ls.AddAddr(c.linkctxt.Arch, t.(*sym.Symbol)) case 4: - addaddrplus4(c.linkctxt, ls, t.(*Symbol), 0) + ls.AddAddrPlus4(t.(*sym.Symbol), 0) } r := &ls.R[len(ls.R)-1] - r.Type = objabi.R_DWARFREF + r.Type = objabi.R_DWARFSECREF r.Add = ofs } -/* - * Offsets and sizes of the debug_* sections in the cout file. - */ -var abbrevsym *Symbol -var arangessec *Symbol -var framesec *Symbol -var infosec *Symbol -var linesec *Symbol -var rangesec *Symbol +func (c dwctxt) Logf(format string, args ...interface{}) { + c.linkctxt.Logf(format, args...) +} + +// At the moment these interfaces are only used in the compiler. + +func (c dwctxt) AddFileRef(s dwarf.Sym, f interface{}) { + panic("should be used only in the compiler") +} + +func (c dwctxt) CurrentOffset(s dwarf.Sym) int64 { + panic("should be used only in the compiler") +} + +func (c dwctxt) RecordDclReference(s dwarf.Sym, t dwarf.Sym, dclIdx int, inlIndex int) { + panic("should be used only in the compiler") +} + +func (c dwctxt) RecordChildDieOffsets(s dwarf.Sym, vars []*dwarf.Var, offsets []int32) { + panic("should be used only in the compiler") +} var gdbscript string -var dwarfp []*Symbol +var dwarfp []*sym.Symbol -func writeabbrev(ctxt *Link, syms []*Symbol) []*Symbol { +func writeabbrev(ctxt *Link) *sym.Symbol { s := ctxt.Syms.Lookup(".debug_abbrev", 0) - s.Type = SDWARFSECT - abbrevsym = s - Addbytes(s, dwarf.GetAbbrev()) - return append(syms, s) + s.Type = sym.SDWARFSECT + s.AddBytes(dwarf.GetAbbrev()) + return s } /* @@ -134,8 +150,10 @@ func getattr(die *dwarf.DWDie, attr uint16) *dwarf.DWAttr { return nil } -// Every DIE has at least a AT_name attribute (but it will only be -// written out if it is listed in the abbrev). +// Every DIE manufactured by the linker has at least an AT_name +// attribute (but it will only be written out if it is listed in the abbrev). +// The compiler does create nameless DWARF DIEs (ex: concrete subprogram +// instance). func newdie(ctxt *Link, parent *dwarf.DWDie, abbrev int, name string, version int) *dwarf.DWDie { die := new(dwarf.DWDie) die.Abbrev = abbrev @@ -146,10 +164,14 @@ func newdie(ctxt *Link, parent *dwarf.DWDie, abbrev int, name string, version in if name != "" && (abbrev <= dwarf.DW_ABRV_VARIABLE || abbrev >= dwarf.DW_ABRV_NULLTYPE) { if abbrev != dwarf.DW_ABRV_VARIABLE || version == 0 { - sym := ctxt.Syms.Lookup(dwarf.InfoPrefix+name, version) - sym.Attr |= AttrNotInSymbolTable - sym.Type = SDWARFINFO - die.Sym = sym + if abbrev == dwarf.DW_ABRV_COMPUNIT { + // Avoid collisions with "real" symbol names. + name = ".pkg." + name + } + s := ctxt.Syms.Lookup(dwarf.InfoPrefix+name, version) + s.Attr |= sym.AttrNotInSymbolTable + s.Type = sym.SDWARFINFO + die.Sym = s } } @@ -172,7 +194,7 @@ func walktypedef(die *dwarf.DWDie) *dwarf.DWDie { return die } -func walksymtypedef(ctxt *Link, s *Symbol) *Symbol { +func walksymtypedef(ctxt *Link, s *sym.Symbol) *sym.Symbol { if t := ctxt.Syms.ROLookup(s.Name+"..def", int(s.Version)); t != nil { return t } @@ -197,18 +219,18 @@ func findchild(die *dwarf.DWDie, name string) *dwarf.DWDie { // Used to avoid string allocation when looking up dwarf symbols var prefixBuf = []byte(dwarf.InfoPrefix) -func find(ctxt *Link, name string) *Symbol { +func find(ctxt *Link, name string) *sym.Symbol { n := append(prefixBuf, name...) // The string allocation below is optimized away because it is only used in a map lookup. s := ctxt.Syms.ROLookup(string(n), 0) prefixBuf = n[:len(dwarf.InfoPrefix)] - if s != nil && s.Type == SDWARFINFO { + if s != nil && s.Type == sym.SDWARFINFO { return s } return nil } -func mustFind(ctxt *Link, name string) *Symbol { +func mustFind(ctxt *Link, name string) *sym.Symbol { r := find(ctxt, name) if r == nil { Exitf("dwarf find: cannot find %s", name) @@ -216,46 +238,46 @@ func mustFind(ctxt *Link, name string) *Symbol { return r } -func adddwarfref(ctxt *Link, s *Symbol, t *Symbol, size int) int64 { +func adddwarfref(ctxt *Link, s *sym.Symbol, t *sym.Symbol, size int) int64 { var result int64 switch size { default: Errorf(s, "invalid size %d in adddwarfref\n", size) fallthrough - case SysArch.PtrSize: - result = Addaddr(ctxt, s, t) + case ctxt.Arch.PtrSize: + result = s.AddAddr(ctxt.Arch, t) case 4: - result = addaddrplus4(ctxt, s, t, 0) + result = s.AddAddrPlus4(t, 0) } r := &s.R[len(s.R)-1] - r.Type = objabi.R_DWARFREF + r.Type = objabi.R_DWARFSECREF return result } -func newrefattr(die *dwarf.DWDie, attr uint16, ref *Symbol) *dwarf.DWAttr { +func newrefattr(die *dwarf.DWDie, attr uint16, ref *sym.Symbol) *dwarf.DWAttr { if ref == nil { return nil } return newattr(die, attr, dwarf.DW_CLS_REFERENCE, 0, ref) } -func putdies(linkctxt *Link, ctxt dwarf.Context, syms []*Symbol, die *dwarf.DWDie) []*Symbol { +func putdies(linkctxt *Link, ctxt dwarf.Context, syms []*sym.Symbol, die *dwarf.DWDie) []*sym.Symbol { for ; die != nil; die = die.Link { syms = putdie(linkctxt, ctxt, syms, die) } - Adduint8(linkctxt, syms[len(syms)-1], 0) + syms[len(syms)-1].AddUint8(0) return syms } -func dtolsym(s dwarf.Sym) *Symbol { +func dtolsym(s dwarf.Sym) *sym.Symbol { if s == nil { return nil } - return s.(*Symbol) + return s.(*sym.Symbol) } -func putdie(linkctxt *Link, ctxt dwarf.Context, syms []*Symbol, die *dwarf.DWDie) []*Symbol { +func putdie(linkctxt *Link, ctxt dwarf.Context, syms []*sym.Symbol, die *dwarf.DWDie) []*sym.Symbol { s := dtolsym(die.Sym) if s == nil { s = syms[len(syms)-1] @@ -263,7 +285,7 @@ func putdie(linkctxt *Link, ctxt dwarf.Context, syms []*Symbol, die *dwarf.DWDie if s.Attr.OnList() { log.Fatalf("symbol %s listed multiple times", s.Name) } - s.Attr |= AttrOnList + s.Attr |= sym.AttrOnList syms = append(syms, s) } dwarf.Uleb128put(ctxt, s, int64(die.Abbrev)) @@ -278,7 +300,7 @@ func reverselist(list **dwarf.DWDie) { curr := *list var prev *dwarf.DWDie for curr != nil { - var next *dwarf.DWDie = curr.Link + next := curr.Link curr.Link = prev prev = curr curr = next @@ -297,21 +319,18 @@ func reversetree(list **dwarf.DWDie) { } func newmemberoffsetattr(die *dwarf.DWDie, offs int32) { - var block [20]byte - b := append(block[:0], dwarf.DW_OP_plus_uconst) - b = dwarf.AppendUleb128(b, uint64(offs)) - newattr(die, dwarf.DW_AT_data_member_location, dwarf.DW_CLS_BLOCK, int64(len(b)), b) + newattr(die, dwarf.DW_AT_data_member_location, dwarf.DW_CLS_CONSTANT, int64(offs), nil) } // GDB doesn't like FORM_addr for AT_location, so emit a // location expression that evals to a const. -func newabslocexprattr(die *dwarf.DWDie, addr int64, sym *Symbol) { +func newabslocexprattr(die *dwarf.DWDie, addr int64, sym *sym.Symbol) { newattr(die, dwarf.DW_AT_location, dwarf.DW_CLS_ADDRESS, addr, sym) // below } // Lookup predefined types -func lookupOrDiag(ctxt *Link, n string) *Symbol { +func lookupOrDiag(ctxt *Link, n string) *sym.Symbol { s := ctxt.Syms.ROLookup(n, 0) if s == nil || s.Size == 0 { Exitf("dwarf: missing type: %s", n) @@ -338,10 +357,10 @@ func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) { Errorf(nil, "dwarf: bad def in dotypedef") } - sym := ctxt.Syms.Lookup(dtolsym(def.Sym).Name+"..def", 0) - sym.Attr |= AttrNotInSymbolTable - sym.Type = SDWARFINFO - def.Sym = sym + s := ctxt.Syms.Lookup(dtolsym(def.Sym).Name+"..def", 0) + s.Attr |= sym.AttrNotInSymbolTable + s.Type = sym.SDWARFINFO + def.Sym = s // The typedef entry must be created after the def, // so that future lookups will find the typedef instead @@ -349,11 +368,11 @@ func dotypedef(ctxt *Link, parent *dwarf.DWDie, name string, def *dwarf.DWDie) { // circular definition loops, so that gdb can understand them. die := newdie(ctxt, parent, dwarf.DW_ABRV_TYPEDECL, name, 0) - newrefattr(die, dwarf.DW_AT_type, sym) + newrefattr(die, dwarf.DW_AT_type, s) } // Define gotype, for composite ones recurse into constituents. -func defgotype(ctxt *Link, gotype *Symbol) *Symbol { +func defgotype(ctxt *Link, gotype *sym.Symbol) *sym.Symbol { if gotype == nil { return mustFind(ctxt, "") } @@ -371,12 +390,12 @@ func defgotype(ctxt *Link, gotype *Symbol) *Symbol { return sdie } - return newtype(ctxt, gotype).Sym.(*Symbol) + return newtype(ctxt, gotype).Sym.(*sym.Symbol) } -func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { +func newtype(ctxt *Link, gotype *sym.Symbol) *dwarf.DWDie { name := gotype.Name[5:] // could also decode from Type.string - kind := decodetypeKind(gotype) + kind := decodetypeKind(ctxt.Arch, gotype) bytesize := decodetypeSize(ctxt.Arch, gotype) var die *dwarf.DWDie @@ -421,7 +440,7 @@ func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_ARRAYTYPE, name, 0) dotypedef(ctxt, &dwtypes, name, die) newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) - s := decodetypeArrayElem(gotype) + s := decodetypeArrayElem(ctxt.Arch, gotype) newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s)) fld := newdie(ctxt, die, dwarf.DW_ABRV_ARRAYRANGE, "range", 0) @@ -433,7 +452,7 @@ func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { case objabi.KindChan: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_CHANTYPE, name, 0) newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) - s := decodetypeChanElem(gotype) + s := decodetypeChanElem(ctxt.Arch, gotype) newrefattr(die, dwarf.DW_AT_go_elem, defgotype(ctxt, s)) // Save elem type for synthesizechantypes. We could synthesize here // but that would change the order of DIEs we output. @@ -441,13 +460,14 @@ func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { case objabi.KindFunc: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_FUNCTYPE, name, 0) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) dotypedef(ctxt, &dwtypes, name, die) newrefattr(die, dwarf.DW_AT_type, mustFind(ctxt, "void")) nfields := decodetypeFuncInCount(ctxt.Arch, gotype) var fld *dwarf.DWDie - var s *Symbol + var s *sym.Symbol for i := 0; i < nfields; i++ { - s = decodetypeFuncInType(gotype, i) + s = decodetypeFuncInType(ctxt.Arch, gotype, i) fld = newdie(ctxt, die, dwarf.DW_ABRV_FUNCTYPEPARAM, s.Name[5:], 0) newrefattr(fld, dwarf.DW_AT_type, defgotype(ctxt, s)) } @@ -467,7 +487,7 @@ func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { dotypedef(ctxt, &dwtypes, name, die) newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) nfields := int(decodetypeIfaceMethodCount(ctxt.Arch, gotype)) - var s *Symbol + var s *sym.Symbol if nfields == 0 { s = lookupOrDiag(ctxt, "type.runtime.eface") } else { @@ -477,9 +497,9 @@ func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { case objabi.KindMap: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_MAPTYPE, name, 0) - s := decodetypeMapKey(gotype) + s := decodetypeMapKey(ctxt.Arch, gotype) newrefattr(die, dwarf.DW_AT_go_key, defgotype(ctxt, s)) - s = decodetypeMapValue(gotype) + s = decodetypeMapValue(ctxt.Arch, gotype) newrefattr(die, dwarf.DW_AT_go_elem, defgotype(ctxt, s)) // Save gotype for use in synthesizemaptypes. We could synthesize here, // but that would change the order of the DIEs. @@ -488,14 +508,14 @@ func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { case objabi.KindPtr: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_PTRTYPE, name, 0) dotypedef(ctxt, &dwtypes, name, die) - s := decodetypePtrElem(gotype) + s := decodetypePtrElem(ctxt.Arch, gotype) newrefattr(die, dwarf.DW_AT_type, defgotype(ctxt, s)) case objabi.KindSlice: die = newdie(ctxt, &dwtypes, dwarf.DW_ABRV_SLICETYPE, name, 0) dotypedef(ctxt, &dwtypes, name, die) newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) - s := decodetypeArrayElem(gotype) + s := decodetypeArrayElem(ctxt.Arch, gotype) elem := defgotype(ctxt, s) newrefattr(die, dwarf.DW_AT_go_elem, elem) @@ -509,8 +529,8 @@ func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, bytesize, 0) nfields := decodetypeStructFieldCount(ctxt.Arch, gotype) for i := 0; i < nfields; i++ { - f := decodetypeStructFieldName(gotype, i) - s := decodetypeStructFieldType(gotype, i) + f := decodetypeStructFieldName(ctxt.Arch, gotype, i) + s := decodetypeStructFieldType(ctxt.Arch, gotype, i) if f == "" { f = s.Name[5:] // skip "type." } @@ -541,12 +561,12 @@ func newtype(ctxt *Link, gotype *Symbol) *dwarf.DWDie { return die } -func nameFromDIESym(dwtype *Symbol) string { +func nameFromDIESym(dwtype *sym.Symbol) string { return strings.TrimSuffix(dwtype.Name[len(dwarf.InfoPrefix):], "..def") } // Find or construct *T given T. -func defptrto(ctxt *Link, dwtype *Symbol) *Symbol { +func defptrto(ctxt *Link, dwtype *sym.Symbol) *sym.Symbol { ptrname := "*" + nameFromDIESym(dwtype) die := find(ctxt, ptrname) if die == nil { @@ -582,7 +602,7 @@ func copychildren(ctxt *Link, dst *dwarf.DWDie, src *dwarf.DWDie) { // Search children (assumed to have TAG_member) for the one named // field and set its AT_type to dwtype -func substitutetype(structdie *dwarf.DWDie, field string, dwtype *Symbol) { +func substitutetype(structdie *dwarf.DWDie, field string, dwtype *sym.Symbol) { child := findchild(structdie, field) if child == nil { Exitf("dwarf substitutetype: %s does not have member %s", @@ -632,7 +652,7 @@ func synthesizeslicetypes(ctxt *Link, die *dwarf.DWDie) { continue } copychildren(ctxt, die, prototype) - elem := getattr(die, dwarf.DW_AT_go_elem).Data.(*Symbol) + elem := getattr(die, dwarf.DW_AT_go_elem).Data.(*sym.Symbol) substitutetype(die, "array", defptrto(ctxt, elem)) } } @@ -656,11 +676,11 @@ const ( BucketSize = 8 ) -func mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valname string, f func(*dwarf.DWDie)) *Symbol { +func mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valname string, f func(*dwarf.DWDie)) *sym.Symbol { name := mkinternaltypename(typename, keyname, valname) symname := dwarf.InfoPrefix + name s := ctxt.Syms.ROLookup(symname, 0) - if s != nil && s.Type == SDWARFINFO { + if s != nil && s.Type == sym.SDWARFINFO { return s } die := newdie(ctxt, &dwtypes, abbrev, name, 0) @@ -680,20 +700,20 @@ func synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) { if die.Abbrev != dwarf.DW_ABRV_MAPTYPE { continue } - gotype := getattr(die, dwarf.DW_AT_type).Data.(*Symbol) - keytype := decodetypeMapKey(gotype) - valtype := decodetypeMapValue(gotype) + gotype := getattr(die, dwarf.DW_AT_type).Data.(*sym.Symbol) + keytype := decodetypeMapKey(ctxt.Arch, gotype) + valtype := decodetypeMapValue(ctxt.Arch, gotype) keysize, valsize := decodetypeSize(ctxt.Arch, keytype), decodetypeSize(ctxt.Arch, valtype) keytype, valtype = walksymtypedef(ctxt, defgotype(ctxt, keytype)), walksymtypedef(ctxt, defgotype(ctxt, valtype)) // compute size info like hashmap.c does. indirectKey, indirectVal := false, false if keysize > MaxKeySize { - keysize = int64(SysArch.PtrSize) + keysize = int64(ctxt.Arch.PtrSize) indirectKey = true } if valsize > MaxValSize { - valsize = int64(SysArch.PtrSize) + valsize = int64(ctxt.Arch.PtrSize) indirectVal = true } @@ -740,13 +760,13 @@ func synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) { fld = newdie(ctxt, dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow", 0) newrefattr(fld, dwarf.DW_AT_type, defptrto(ctxt, dtolsym(dwhb.Sym))) newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))) - if SysArch.RegSize > SysArch.PtrSize { + if ctxt.Arch.RegSize > ctxt.Arch.PtrSize { fld = newdie(ctxt, dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad", 0) newrefattr(fld, dwarf.DW_AT_type, mustFind(ctxt, "uintptr")) - newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(SysArch.PtrSize)) + newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(ctxt.Arch.PtrSize)) } - newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize+BucketSize*keysize+BucketSize*valsize+int64(SysArch.RegSize), 0) + newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize+BucketSize*keysize+BucketSize*valsize+int64(ctxt.Arch.RegSize), 0) }) // Construct hash @@ -776,21 +796,15 @@ func synthesizechantypes(ctxt *Link, die *dwarf.DWDie) { if die.Abbrev != dwarf.DW_ABRV_CHANTYPE { continue } - elemgotype := getattr(die, dwarf.DW_AT_type).Data.(*Symbol) - elemsize := decodetypeSize(ctxt.Arch, elemgotype) + elemgotype := getattr(die, dwarf.DW_AT_type).Data.(*sym.Symbol) elemname := elemgotype.Name[5:] elemtype := walksymtypedef(ctxt, defgotype(ctxt, elemgotype)) // sudog dwss := mkinternaltype(ctxt, dwarf.DW_ABRV_STRUCTTYPE, "sudog", elemname, "", func(dws *dwarf.DWDie) { copychildren(ctxt, dws, sudog) - substitutetype(dws, "elem", elemtype) - if elemsize > 8 { - elemsize -= 8 - } else { - elemsize = 0 - } - newattr(dws, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(sudogsize)+elemsize, nil) + substitutetype(dws, "elem", defptrto(ctxt, elemtype)) + newattr(dws, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(sudogsize), nil) }) // waitq @@ -815,35 +829,35 @@ func synthesizechantypes(ctxt *Link, die *dwarf.DWDie) { } // For use with pass.c::genasmsym -func defdwsymb(ctxt *Link, sym *Symbol, s string, t SymbolType, v int64, gotype *Symbol) { - if strings.HasPrefix(s, "go.string.") { +func defdwsymb(ctxt *Link, s *sym.Symbol, str string, t SymbolType, v int64, gotype *sym.Symbol) { + if strings.HasPrefix(str, "go.string.") { return } - if strings.HasPrefix(s, "runtime.gcbits.") { + if strings.HasPrefix(str, "runtime.gcbits.") { return } - if strings.HasPrefix(s, "type.") && s != "type.*" && !strings.HasPrefix(s, "type..") { - defgotype(ctxt, sym) + if strings.HasPrefix(str, "type.") && str != "type.*" && !strings.HasPrefix(str, "type..") { + defgotype(ctxt, s) return } var dv *dwarf.DWDie - var dt *Symbol + var dt *sym.Symbol switch t { default: return case DataSym, BSSSym: - dv = newdie(ctxt, &dwglobals, dwarf.DW_ABRV_VARIABLE, s, int(sym.Version)) - newabslocexprattr(dv, v, sym) - if sym.Version == 0 { + dv = newdie(ctxt, &dwglobals, dwarf.DW_ABRV_VARIABLE, str, int(s.Version)) + newabslocexprattr(dv, v, s) + if s.Version == 0 { newattr(dv, dwarf.DW_AT_external, dwarf.DW_CLS_FLAG, 1, 0) } fallthrough - case AutoSym, ParamSym: + case AutoSym, ParamSym, DeletedAutoSym: dt = defgotype(ctxt, gotype) } @@ -852,6 +866,53 @@ func defdwsymb(ctxt *Link, sym *Symbol, s string, t SymbolType, v int64, gotype } } +// compilationUnit is per-compilation unit (equivalently, per-package) +// debug-related data. +type compilationUnit struct { + lib *sym.Library + consts *sym.Symbol // Package constants DIEs + pcs []dwarf.Range // PC ranges, relative to textp[0] + dwinfo *dwarf.DWDie // CU root DIE + funcDIEs []*sym.Symbol // Function DIE subtrees + absFnDIEs []*sym.Symbol // Abstract function DIE subtrees +} + +// getCompilationUnits divides the symbols in ctxt.Textp by package. +func getCompilationUnits(ctxt *Link) []*compilationUnit { + units := []*compilationUnit{} + index := make(map[*sym.Library]*compilationUnit) + var prevUnit *compilationUnit + for _, s := range ctxt.Textp { + if s.FuncInfo == nil { + continue + } + unit := index[s.Lib] + if unit == nil { + unit = &compilationUnit{lib: s.Lib} + if s := ctxt.Syms.ROLookup(dwarf.ConstInfoPrefix+s.Lib.Pkg, 0); s != nil { + importInfoSymbol(ctxt, s) + unit.consts = s + } + units = append(units, unit) + index[s.Lib] = unit + } + + // Update PC ranges. + // + // We don't simply compare the end of the previous + // symbol with the start of the next because there's + // often a little padding between them. Instead, we + // only create boundaries between symbols from + // different units. + if prevUnit != unit { + unit.pcs = append(unit.pcs, dwarf.Range{Start: s.Value - unit.lib.Textp[0].Value}) + prevUnit = unit + } + unit.pcs[len(unit.pcs)-1].End = s.Value - unit.lib.Textp[0].Value + s.Size + } + return units +} + func movetomodule(parent *dwarf.DWDie) { die := dwroot.Child.Child if die == nil { @@ -864,15 +925,18 @@ func movetomodule(parent *dwarf.DWDie) { die.Link = parent.Child } -// If the pcln table contains runtime/runtime.go, use that to set gdbscript path. -func finddebugruntimepath(s *Symbol) { +// If the pcln table contains runtime/proc.go, use that to set gdbscript path. +func finddebugruntimepath(s *sym.Symbol) { if gdbscript != "" { return } for i := range s.FuncInfo.File { f := s.FuncInfo.File[i] - if i := strings.Index(f.Name, "runtime/debug.go"); i >= 0 { + // We can't use something that may be dead-code + // eliminated from a binary here. proc.go contains + // main and the scheduler, so it's not going anywhere. + if i := strings.Index(f.Name, "runtime/proc.go"); i >= 0 { gdbscript = f.Name[:i] + "runtime/runtime-gdb.py" break } @@ -890,7 +954,7 @@ const ( OPCODE_BASE = 10 ) -func putpclcdelta(linkctxt *Link, ctxt dwarf.Context, s *Symbol, deltaPC uint64, deltaLC int64) { +func putpclcdelta(linkctxt *Link, ctxt dwarf.Context, s *sym.Symbol, deltaPC uint64, deltaLC int64) { // Choose a special opcode that minimizes the number of bytes needed to // encode the remaining PC delta and LC delta. var opcode int64 @@ -960,24 +1024,24 @@ func putpclcdelta(linkctxt *Link, ctxt dwarf.Context, s *Symbol, deltaPC uint64, if opcode < OPCODE_BASE { panic(fmt.Sprintf("produced invalid special opcode %d", opcode)) } - Adduint8(linkctxt, s, dwarf.DW_LNS_const_add_pc) + s.AddUint8(dwarf.DW_LNS_const_add_pc) } else if (1<<14) <= deltaPC && deltaPC < (1<<16) { - Adduint8(linkctxt, s, dwarf.DW_LNS_fixed_advance_pc) - Adduint16(linkctxt, s, uint16(deltaPC)) + s.AddUint8(dwarf.DW_LNS_fixed_advance_pc) + s.AddUint16(linkctxt.Arch, uint16(deltaPC)) } else { - Adduint8(linkctxt, s, dwarf.DW_LNS_advance_pc) + s.AddUint8(dwarf.DW_LNS_advance_pc) dwarf.Uleb128put(ctxt, s, int64(deltaPC)) } } // Encode deltaLC. if deltaLC != 0 { - Adduint8(linkctxt, s, dwarf.DW_LNS_advance_line) + s.AddUint8(dwarf.DW_LNS_advance_line) dwarf.Sleb128put(ctxt, s, deltaLC) } // Output the special opcode. - Adduint8(linkctxt, s, uint8(opcode)) + s.AddUint8(uint8(opcode)) } /* @@ -985,127 +1049,198 @@ func putpclcdelta(linkctxt *Link, ctxt dwarf.Context, s *Symbol, deltaPC uint64, */ func getCompilationDir() string { - if dir, err := os.Getwd(); err == nil { - return dir - } - return "/" + // OSX requires this be set to something, but it's not easy to choose + // a value. Linking takes place in a temporary directory, so there's + // no point including it here. Paths in the file table are usually + // absolute, in which case debuggers will ignore this value. -trimpath + // produces relative paths, but we don't know where they start, so + // all we can do here is try not to make things worse. + return "." } -func writelines(ctxt *Link, syms []*Symbol) ([]*Symbol, []*Symbol) { - var dwarfctxt dwarf.Context = dwctxt{ctxt} - if linesec == nil { - linesec = ctxt.Syms.Lookup(".debug_line", 0) +func importInfoSymbol(ctxt *Link, dsym *sym.Symbol) { + dsym.Attr |= sym.AttrNotInSymbolTable | sym.AttrReachable + dsym.Type = sym.SDWARFINFO + for _, r := range dsym.R { + if r.Type == objabi.R_DWARFSECREF && r.Sym.Size == 0 { + if ctxt.BuildMode == BuildModeShared { + // These type symbols may not be present in BuildModeShared. Skip. + continue + } + n := nameFromDIESym(r.Sym) + defgotype(ctxt, ctxt.Syms.Lookup("type."+n, 0)) + } } - linesec.Type = SDWARFSECT - linesec.R = linesec.R[:0] +} - ls := linesec - syms = append(syms, ls) - var funcs []*Symbol +// For the specified function, collect symbols corresponding to any +// "abstract" subprogram DIEs referenced. The first case of interest +// is a concrete subprogram DIE, which will refer to its corresponding +// abstract subprogram DIE, and then there can be references from a +// non-abstract subprogram DIE to the abstract subprogram DIEs for any +// functions inlined into this one. +// +// A given abstract subprogram DIE can be referenced in numerous +// places (even within the same DIE), so it is important to make sure +// it gets imported and added to the absfuncs lists only once. + +func collectAbstractFunctions(ctxt *Link, fn *sym.Symbol, dsym *sym.Symbol, absfuncs []*sym.Symbol) []*sym.Symbol { + + var newabsfns []*sym.Symbol + + // Walk the relocations on the primary subprogram DIE and look for + // references to abstract funcs. + for _, reloc := range dsym.R { + candsym := reloc.Sym + if reloc.Type != objabi.R_DWARFSECREF { + continue + } + if !strings.HasPrefix(candsym.Name, dwarf.InfoPrefix) { + continue + } + if !strings.HasSuffix(candsym.Name, dwarf.AbstractFuncSuffix) { + continue + } + if candsym.Attr.OnList() { + continue + } + candsym.Attr |= sym.AttrOnList + newabsfns = append(newabsfns, candsym) + } + + // Import any new symbols that have turned up. + for _, absdsym := range newabsfns { + importInfoSymbol(ctxt, absdsym) + absfuncs = append(absfuncs, absdsym) + } + + return absfuncs +} + +func writelines(ctxt *Link, lib *sym.Library, textp []*sym.Symbol, ls *sym.Symbol) (dwinfo *dwarf.DWDie, funcs []*sym.Symbol, absfuncs []*sym.Symbol) { + + var dwarfctxt dwarf.Context = dwctxt{ctxt} unitstart := int64(-1) headerstart := int64(-1) headerend := int64(-1) - epc := int64(0) - var epcs *Symbol - var dwinfo *dwarf.DWDie lang := dwarf.DW_LANG_Go - s := ctxt.Textp[0] - if ctxt.DynlinkingGo() && Headtype == objabi.Hdarwin { - s = ctxt.Textp[1] // skip runtime.text - } - - dwinfo = newdie(ctxt, &dwroot, dwarf.DW_ABRV_COMPUNIT, "go", 0) + dwinfo = newdie(ctxt, &dwroot, dwarf.DW_ABRV_COMPUNIT, lib.Pkg, 0) newattr(dwinfo, dwarf.DW_AT_language, dwarf.DW_CLS_CONSTANT, int64(lang), 0) - newattr(dwinfo, dwarf.DW_AT_stmt_list, dwarf.DW_CLS_PTR, 0, linesec) - newattr(dwinfo, dwarf.DW_AT_low_pc, dwarf.DW_CLS_ADDRESS, s.Value, s) + newattr(dwinfo, dwarf.DW_AT_stmt_list, dwarf.DW_CLS_PTR, ls.Size, ls) // OS X linker requires compilation dir or absolute path in comp unit name to output debug info. compDir := getCompilationDir() + // TODO: Make this be the actual compilation directory, not + // the linker directory. If we move CU construction into the + // compiler, this should happen naturally. newattr(dwinfo, dwarf.DW_AT_comp_dir, dwarf.DW_CLS_STRING, int64(len(compDir)), compDir) + producerExtra := ctxt.Syms.Lookup(dwarf.CUInfoPrefix+"producer."+lib.Pkg, 0) producer := "Go cmd/compile " + objabi.Version + if len(producerExtra.P) > 0 { + // We put a semicolon before the flags to clearly + // separate them from the version, which can be long + // and have lots of weird things in it in development + // versions. We promise not to put a semicolon in the + // version, so it should be safe for readers to scan + // forward to the semicolon. + producer += "; " + string(producerExtra.P) + } newattr(dwinfo, dwarf.DW_AT_producer, dwarf.DW_CLS_STRING, int64(len(producer)), producer) // Write .debug_line Line Number Program Header (sec 6.2.4) // Fields marked with (*) must be changed for 64-bit dwarf unitLengthOffset := ls.Size - Adduint32(ctxt, ls, 0) // unit_length (*), filled in at end. + ls.AddUint32(ctxt.Arch, 0) // unit_length (*), filled in at end. unitstart = ls.Size - Adduint16(ctxt, ls, 2) // dwarf version (appendix F) + ls.AddUint16(ctxt.Arch, 2) // dwarf version (appendix F) headerLengthOffset := ls.Size - Adduint32(ctxt, ls, 0) // header_length (*), filled in at end. + ls.AddUint32(ctxt.Arch, 0) // header_length (*), filled in at end. headerstart = ls.Size // cpos == unitstart + 4 + 2 + 4 - Adduint8(ctxt, ls, 1) // minimum_instruction_length - Adduint8(ctxt, ls, 1) // default_is_stmt - Adduint8(ctxt, ls, LINE_BASE&0xFF) // line_base - Adduint8(ctxt, ls, LINE_RANGE) // line_range - Adduint8(ctxt, ls, OPCODE_BASE) // opcode_base - Adduint8(ctxt, ls, 0) // standard_opcode_lengths[1] - Adduint8(ctxt, ls, 1) // standard_opcode_lengths[2] - Adduint8(ctxt, ls, 1) // standard_opcode_lengths[3] - Adduint8(ctxt, ls, 1) // standard_opcode_lengths[4] - Adduint8(ctxt, ls, 1) // standard_opcode_lengths[5] - Adduint8(ctxt, ls, 0) // standard_opcode_lengths[6] - Adduint8(ctxt, ls, 0) // standard_opcode_lengths[7] - Adduint8(ctxt, ls, 0) // standard_opcode_lengths[8] - Adduint8(ctxt, ls, 1) // standard_opcode_lengths[9] - Adduint8(ctxt, ls, 0) // include_directories (empty) + ls.AddUint8(1) // minimum_instruction_length + ls.AddUint8(1) // default_is_stmt + ls.AddUint8(LINE_BASE & 0xFF) // line_base + ls.AddUint8(LINE_RANGE) // line_range + ls.AddUint8(OPCODE_BASE) // opcode_base + ls.AddUint8(0) // standard_opcode_lengths[1] + ls.AddUint8(1) // standard_opcode_lengths[2] + ls.AddUint8(1) // standard_opcode_lengths[3] + ls.AddUint8(1) // standard_opcode_lengths[4] + ls.AddUint8(1) // standard_opcode_lengths[5] + ls.AddUint8(0) // standard_opcode_lengths[6] + ls.AddUint8(0) // standard_opcode_lengths[7] + ls.AddUint8(0) // standard_opcode_lengths[8] + ls.AddUint8(1) // standard_opcode_lengths[9] + ls.AddUint8(0) // include_directories (empty) - for _, f := range ctxt.Filesyms { - Addstring(ls, f.Name) - Adduint8(ctxt, ls, 0) - Adduint8(ctxt, ls, 0) - Adduint8(ctxt, ls, 0) + // Create the file table. fileNums maps from global file + // indexes (created by numberfile) to CU-local indexes. + fileNums := make(map[int]int) + for _, s := range textp { + for _, f := range s.FuncInfo.File { + if _, ok := fileNums[int(f.Value)]; ok { + continue + } + // File indexes are 1-based. + fileNums[int(f.Value)] = len(fileNums) + 1 + Addstring(ls, f.Name) + ls.AddUint8(0) + ls.AddUint8(0) + ls.AddUint8(0) + } + + // Look up the .debug_info sym for the function. We do this + // now so that we can walk the sym's relocations to discover + // files that aren't mentioned in S.FuncInfo.File (for + // example, files mentioned only in an inlined subroutine). + dsym := ctxt.Syms.Lookup(dwarf.InfoPrefix+s.Name, int(s.Version)) + importInfoSymbol(ctxt, dsym) + for ri := 0; ri < len(dsym.R); ri++ { + r := &dsym.R[ri] + if r.Type != objabi.R_DWARFFILEREF { + continue + } + _, ok := fileNums[int(r.Sym.Value)] + if !ok { + fileNums[int(r.Sym.Value)] = len(fileNums) + 1 + Addstring(ls, r.Sym.Name) + ls.AddUint8(0) + ls.AddUint8(0) + ls.AddUint8(0) + } + } } // 4 zeros: the string termination + 3 fields. - Adduint8(ctxt, ls, 0) + ls.AddUint8(0) // terminate file_names. headerend = ls.Size - Adduint8(ctxt, ls, 0) // start extended opcode - dwarf.Uleb128put(dwarfctxt, ls, 1+int64(SysArch.PtrSize)) - Adduint8(ctxt, ls, dwarf.DW_LNE_set_address) + ls.AddUint8(0) // start extended opcode + dwarf.Uleb128put(dwarfctxt, ls, 1+int64(ctxt.Arch.PtrSize)) + ls.AddUint8(dwarf.DW_LNE_set_address) + s := textp[0] pc := s.Value line := 1 file := 1 - Addaddr(ctxt, ls, s) + ls.AddAddr(ctxt.Arch, s) var pcfile Pciter var pcline Pciter - for _, s := range ctxt.Textp { - - epc = s.Value + s.Size - epcs = s - + for _, s := range textp { dsym := ctxt.Syms.Lookup(dwarf.InfoPrefix+s.Name, int(s.Version)) - dsym.Attr |= AttrNotInSymbolTable | AttrReachable - dsym.Type = SDWARFINFO - for _, r := range dsym.R { - if r.Type == objabi.R_DWARFREF && r.Sym.Size == 0 { - if Buildmode == BuildmodeShared { - // These type symbols may not be present in BuildmodeShared. Skip. - continue - } - n := nameFromDIESym(r.Sym) - defgotype(ctxt, ctxt.Syms.Lookup("type."+n, 0)) - } - } funcs = append(funcs, dsym) - - if s.FuncInfo == nil { - continue - } + absfuncs = collectAbstractFunctions(ctxt, s, dsym, absfuncs) finddebugruntimepath(s) pciterinit(ctxt, &pcfile, &s.FuncInfo.Pcfile) pciterinit(ctxt, &pcline, &s.FuncInfo.Pcline) - epc = pc + epc := pc for pcfile.done == 0 && pcline.done == 0 { if epc-s.Value >= int64(pcfile.nextpc) { pciternext(&pcfile) @@ -1118,8 +1253,12 @@ func writelines(ctxt *Link, syms []*Symbol) ([]*Symbol, []*Symbol) { } if int32(file) != pcfile.value { - Adduint8(ctxt, ls, dwarf.DW_LNS_set_file) - dwarf.Uleb128put(dwarfctxt, ls, int64(pcfile.value)) + ls.AddUint8(dwarf.DW_LNS_set_file) + idx, ok := fileNums[int(pcfile.value)] + if !ok { + Exitf("pcln table file missing from DWARF line table") + } + dwarf.Uleb128put(dwarfctxt, ls, int64(idx)) file = int(pcfile.value) } @@ -1136,16 +1275,63 @@ func writelines(ctxt *Link, syms []*Symbol) ([]*Symbol, []*Symbol) { } } - Adduint8(ctxt, ls, 0) // start extended opcode + ls.AddUint8(0) // start extended opcode dwarf.Uleb128put(dwarfctxt, ls, 1) - Adduint8(ctxt, ls, dwarf.DW_LNE_end_sequence) + ls.AddUint8(dwarf.DW_LNE_end_sequence) - newattr(dwinfo, dwarf.DW_AT_high_pc, dwarf.DW_CLS_ADDRESS, epc+1, epcs) + ls.SetUint32(ctxt.Arch, unitLengthOffset, uint32(ls.Size-unitstart)) + ls.SetUint32(ctxt.Arch, headerLengthOffset, uint32(headerend-headerstart)) - setuint32(ctxt, ls, unitLengthOffset, uint32(ls.Size-unitstart)) - setuint32(ctxt, ls, headerLengthOffset, uint32(headerend-headerstart)) + // Apply any R_DWARFFILEREF relocations, since we now know the + // line table file indices for this compilation unit. Note that + // this loop visits only subprogram DIEs: if the compiler is + // changed to generate DW_AT_decl_file attributes for other + // DIE flavors (ex: variables) then those DIEs would need to + // be included below. + missing := make(map[int]interface{}) + for fidx := 0; fidx < len(funcs); fidx++ { + f := funcs[fidx] + for ri := 0; ri < len(f.R); ri++ { + r := &f.R[ri] + if r.Type != objabi.R_DWARFFILEREF { + continue + } + // Mark relocation as applied (signal to relocsym) + r.Done = true + idx, ok := fileNums[int(r.Sym.Value)] + if ok { + if int(int32(idx)) != idx { + Errorf(f, "bad R_DWARFFILEREF relocation: file index overflow") + } + if r.Siz != 4 { + Errorf(f, "bad R_DWARFFILEREF relocation: has size %d, expected 4", r.Siz) + } + if r.Off < 0 || r.Off+4 > int32(len(f.P)) { + Errorf(f, "bad R_DWARFFILEREF relocation offset %d + 4 would write past length %d", r.Off, len(s.P)) + continue + } + ctxt.Arch.ByteOrder.PutUint32(f.P[r.Off:r.Off+4], uint32(idx)) + } else { + _, found := missing[int(r.Sym.Value)] + if !found { + Errorf(f, "R_DWARFFILEREF relocation file missing: %v idx %d", r.Sym, r.Sym.Value) + missing[int(r.Sym.Value)] = nil + } + } + } + } - return syms, funcs + return dwinfo, funcs, absfuncs +} + +// writepcranges generates the DW_AT_ranges table for compilation unit cu. +func writepcranges(ctxt *Link, cu *dwarf.DWDie, base *sym.Symbol, pcs []dwarf.Range, ranges *sym.Symbol) { + var dwarfctxt dwarf.Context = dwctxt{ctxt} + + // Create PC ranges for this CU. + newattr(cu, dwarf.DW_AT_ranges, dwarf.DW_CLS_PTR, ranges.Size, ranges) + newattr(cu, dwarf.DW_AT_low_pc, dwarf.DW_CLS_ADDRESS, base.Value, base) + dwarf.PutRanges(dwarfctxt, ranges, nil, pcs) } /* @@ -1156,7 +1342,7 @@ const ( ) // appendPCDeltaCFA appends per-PC CFA deltas to b and returns the final slice. -func appendPCDeltaCFA(b []byte, deltapc, cfa int64) []byte { +func appendPCDeltaCFA(arch *sys.Arch, b []byte, deltapc, cfa int64) []byte { b = append(b, dwarf.DW_CFA_def_cfa_offset_sf) b = dwarf.AppendSleb128(b, cfa/dataAlignmentFactor) @@ -1167,23 +1353,19 @@ func appendPCDeltaCFA(b []byte, deltapc, cfa int64) []byte { b = append(b, dwarf.DW_CFA_advance_loc1) b = append(b, uint8(deltapc)) case deltapc < 0x10000: - b = append(b, dwarf.DW_CFA_advance_loc2) - b = Thearch.Append16(b, uint16(deltapc)) + b = append(b, dwarf.DW_CFA_advance_loc2, 0, 0) + arch.ByteOrder.PutUint16(b[len(b)-2:], uint16(deltapc)) default: - b = append(b, dwarf.DW_CFA_advance_loc4) - b = Thearch.Append32(b, uint32(deltapc)) + b = append(b, dwarf.DW_CFA_advance_loc4, 0, 0, 0, 0) + arch.ByteOrder.PutUint32(b[len(b)-4:], uint32(deltapc)) } return b } -func writeframes(ctxt *Link, syms []*Symbol) []*Symbol { +func writeframes(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { var dwarfctxt dwarf.Context = dwctxt{ctxt} - if framesec == nil { - framesec = ctxt.Syms.Lookup(".debug_frame", 0) - } - framesec.Type = SDWARFSECT - framesec.R = framesec.R[:0] - fs := framesec + fs := ctxt.Syms.Lookup(".debug_frame", 0) + fs.Type = sym.SDWARFSECT syms = append(syms, fs) // Emit the CIE, Section 6.4.1 @@ -1191,31 +1373,31 @@ func writeframes(ctxt *Link, syms []*Symbol) []*Symbol { if haslinkregister(ctxt) { cieReserve = 32 } - Adduint32(ctxt, fs, cieReserve) // initial length, must be multiple of thearch.ptrsize - Adduint32(ctxt, fs, 0xffffffff) // cid. - Adduint8(ctxt, fs, 3) // dwarf version (appendix F) - Adduint8(ctxt, fs, 0) // augmentation "" + fs.AddUint32(ctxt.Arch, cieReserve) // initial length, must be multiple of thearch.ptrsize + fs.AddUint32(ctxt.Arch, 0xffffffff) // cid. + fs.AddUint8(3) // dwarf version (appendix F) + fs.AddUint8(0) // augmentation "" dwarf.Uleb128put(dwarfctxt, fs, 1) // code_alignment_factor dwarf.Sleb128put(dwarfctxt, fs, dataAlignmentFactor) // all CFI offset calculations include multiplication with this factor dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfreglr)) // return_address_register - Adduint8(ctxt, fs, dwarf.DW_CFA_def_cfa) // Set the current frame address.. + fs.AddUint8(dwarf.DW_CFA_def_cfa) // Set the current frame address.. dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfregsp)) // ...to use the value in the platform's SP register (defined in l.go)... if haslinkregister(ctxt) { dwarf.Uleb128put(dwarfctxt, fs, int64(0)) // ...plus a 0 offset. - Adduint8(ctxt, fs, dwarf.DW_CFA_same_value) // The platform's link register is unchanged during the prologue. + fs.AddUint8(dwarf.DW_CFA_same_value) // The platform's link register is unchanged during the prologue. dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfreglr)) - Adduint8(ctxt, fs, dwarf.DW_CFA_val_offset) // The previous value... + fs.AddUint8(dwarf.DW_CFA_val_offset) // The previous value... dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfregsp)) // ...of the platform's SP register... dwarf.Uleb128put(dwarfctxt, fs, int64(0)) // ...is CFA+0. } else { - dwarf.Uleb128put(dwarfctxt, fs, int64(SysArch.PtrSize)) // ...plus the word size (because the call instruction implicitly adds one word to the frame). + dwarf.Uleb128put(dwarfctxt, fs, int64(ctxt.Arch.PtrSize)) // ...plus the word size (because the call instruction implicitly adds one word to the frame). - Adduint8(ctxt, fs, dwarf.DW_CFA_offset_extended) // The previous value... - dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfreglr)) // ...of the return address... - dwarf.Uleb128put(dwarfctxt, fs, int64(-SysArch.PtrSize)/dataAlignmentFactor) // ...is saved at [CFA - (PtrSize/4)]. + fs.AddUint8(dwarf.DW_CFA_offset_extended) // The previous value... + dwarf.Uleb128put(dwarfctxt, fs, int64(Thearch.Dwarfreglr)) // ...of the return address... + dwarf.Uleb128put(dwarfctxt, fs, int64(-ctxt.Arch.PtrSize)/dataAlignmentFactor) // ...is saved at [CFA - (PtrSize/4)]. } // 4 is to exclude the length field. @@ -1225,7 +1407,7 @@ func writeframes(ctxt *Link, syms []*Symbol) []*Symbol { Exitf("dwarf: cieReserve too small by %d bytes.", -pad) } - Addbytes(fs, zeros[:pad]) + fs.AddBytes(zeros[:pad]) var deltaBuf []byte var pcsp Pciter @@ -1265,12 +1447,12 @@ func writeframes(ctxt *Link, syms []*Symbol) []*Symbol { deltaBuf = append(deltaBuf, dwarf.DW_CFA_same_value) deltaBuf = dwarf.AppendUleb128(deltaBuf, uint64(Thearch.Dwarfreglr)) } - deltaBuf = appendPCDeltaCFA(deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(pcsp.value)) + deltaBuf = appendPCDeltaCFA(ctxt.Arch, deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(pcsp.value)) } else { - deltaBuf = appendPCDeltaCFA(deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(SysArch.PtrSize)+int64(pcsp.value)) + deltaBuf = appendPCDeltaCFA(ctxt.Arch, deltaBuf, int64(nextpc)-int64(pcsp.pc), int64(ctxt.Arch.PtrSize)+int64(pcsp.value)) } } - pad := int(Rnd(int64(len(deltaBuf)), int64(SysArch.PtrSize))) - len(deltaBuf) + pad := int(Rnd(int64(len(deltaBuf)), int64(ctxt.Arch.PtrSize))) - len(deltaBuf) deltaBuf = append(deltaBuf, zeros[:pad]...) // Emit the FDE header, Section 6.4.1. @@ -1278,42 +1460,28 @@ func writeframes(ctxt *Link, syms []*Symbol) []*Symbol { // 4 bytes: Pointer to the CIE above, at offset 0 // ptrsize: initial location // ptrsize: address range - Adduint32(ctxt, fs, uint32(4+2*SysArch.PtrSize+len(deltaBuf))) // length (excludes itself) - if Linkmode == LinkExternal { - adddwarfref(ctxt, fs, framesec, 4) + fs.AddUint32(ctxt.Arch, uint32(4+2*ctxt.Arch.PtrSize+len(deltaBuf))) // length (excludes itself) + if ctxt.LinkMode == LinkExternal { + adddwarfref(ctxt, fs, fs, 4) } else { - Adduint32(ctxt, fs, 0) // CIE offset + fs.AddUint32(ctxt.Arch, 0) // CIE offset } - Addaddr(ctxt, fs, s) - adduintxx(ctxt, fs, uint64(s.Size), SysArch.PtrSize) // address range - Addbytes(fs, deltaBuf) + fs.AddAddr(ctxt.Arch, s) + fs.AddUintXX(ctxt.Arch, uint64(s.Size), ctxt.Arch.PtrSize) // address range + fs.AddBytes(deltaBuf) } return syms } -func writeranges(ctxt *Link, syms []*Symbol) []*Symbol { - if rangesec == nil { - rangesec = ctxt.Syms.Lookup(".debug_ranges", 0) - } - rangesec.Type = SDWARFSECT - rangesec.Attr |= AttrReachable - rangesec.R = rangesec.R[:0] - +func writeranges(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { for _, s := range ctxt.Textp { - rangeSym := ctxt.Syms.Lookup(dwarf.RangePrefix+s.Name, int(s.Version)) - rangeSym.Attr |= AttrReachable - rangeSym.Type = SDWARFRANGE - rangeSym.Value = rangesec.Size - rangesec.P = append(rangesec.P, rangeSym.P...) - for _, r := range rangeSym.R { - r.Off += int32(rangesec.Size) - rangesec.R = append(rangesec.R, r) + rangeSym := ctxt.Syms.ROLookup(dwarf.RangePrefix+s.Name, int(s.Version)) + if rangeSym == nil || rangeSym.Size == 0 { + continue } - rangesec.Size += rangeSym.Size - } - if rangesec.Size > 0 { - // PE does not like empty sections - syms = append(syms, rangesec) + rangeSym.Attr |= sym.AttrReachable | sym.AttrNotInSymbolTable + rangeSym.Type = sym.SDWARFRANGE + syms = append(syms, rangeSym) } return syms } @@ -1325,43 +1493,43 @@ const ( COMPUNITHEADERSIZE = 4 + 2 + 4 + 1 ) -func writeinfo(ctxt *Link, syms []*Symbol, funcs []*Symbol) []*Symbol { - if infosec == nil { - infosec = ctxt.Syms.Lookup(".debug_info", 0) - } - infosec.R = infosec.R[:0] - infosec.Type = SDWARFINFO - infosec.Attr |= AttrReachable +func writeinfo(ctxt *Link, syms []*sym.Symbol, units []*compilationUnit, abbrevsym *sym.Symbol) []*sym.Symbol { + infosec := ctxt.Syms.Lookup(".debug_info", 0) + infosec.Type = sym.SDWARFINFO + infosec.Attr |= sym.AttrReachable syms = append(syms, infosec) - if arangessec == nil { - arangessec = ctxt.Syms.Lookup(".dwarfaranges", 0) - } - arangessec.R = arangessec.R[:0] - var dwarfctxt dwarf.Context = dwctxt{ctxt} + // Re-index per-package information by its CU die. + unitByDIE := make(map[*dwarf.DWDie]*compilationUnit) + for _, u := range units { + unitByDIE[u.dwinfo] = u + } + for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link { s := dtolsym(compunit.Sym) + u := unitByDIE[compunit] // Write .debug_info Compilation Unit Header (sec 7.5.1) // Fields marked with (*) must be changed for 64-bit dwarf // This must match COMPUNITHEADERSIZE above. - Adduint32(ctxt, s, 0) // unit_length (*), will be filled in later. - Adduint16(ctxt, s, 4) // dwarf version (appendix F) + s.AddUint32(ctxt.Arch, 0) // unit_length (*), will be filled in later. + s.AddUint16(ctxt.Arch, 4) // dwarf version (appendix F) // debug_abbrev_offset (*) adddwarfref(ctxt, s, abbrevsym, 4) - Adduint8(ctxt, s, uint8(SysArch.PtrSize)) // address_size + s.AddUint8(uint8(ctxt.Arch.PtrSize)) // address_size dwarf.Uleb128put(dwarfctxt, s, int64(compunit.Abbrev)) dwarf.PutAttrs(dwarfctxt, s, compunit.Abbrev, compunit.Attr) - cu := []*Symbol{s} - if funcs != nil { - cu = append(cu, funcs...) - funcs = nil + cu := []*sym.Symbol{s} + cu = append(cu, u.absFnDIEs...) + cu = append(cu, u.funcDIEs...) + if u.consts != nil { + cu = append(cu, u.consts) } cu = putdies(ctxt, dwarfctxt, cu, compunit.Child) var cusize int64 @@ -1369,7 +1537,9 @@ func writeinfo(ctxt *Link, syms []*Symbol, funcs []*Symbol) []*Symbol { cusize += child.Size } cusize -= 4 // exclude the length field. - setuint32(ctxt, s, 0, uint32(cusize)) + s.SetUint32(ctxt.Arch, 0, uint32(cusize)) + // Leave a breadcrumb for writepub. This does not + // appear in the DWARF output. newattr(compunit, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, cusize, 0) syms = append(syms, cu...) } @@ -1394,9 +1564,9 @@ func ispubtype(die *dwarf.DWDie) bool { return die.Abbrev >= dwarf.DW_ABRV_NULLTYPE } -func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*Symbol) []*Symbol { +func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*sym.Symbol) []*sym.Symbol { s := ctxt.Syms.Lookup(sname, 0) - s.Type = SDWARFSECT + s.Type = sym.SDWARFSECT syms = append(syms, s) for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link { @@ -1404,10 +1574,10 @@ func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*S culength := uint32(getattr(compunit, dwarf.DW_AT_byte_size).Value) + 4 // Write .debug_pubnames/types Header (sec 6.1.1) - Adduint32(ctxt, s, 0) // unit_length (*), will be filled in later. - Adduint16(ctxt, s, 2) // dwarf version (appendix F) + s.AddUint32(ctxt.Arch, 0) // unit_length (*), will be filled in later. + s.AddUint16(ctxt.Arch, 2) // dwarf version (appendix F) adddwarfref(ctxt, s, dtolsym(compunit.Sym), 4) // debug_info_offset (of the Comp unit Header) - Adduint32(ctxt, s, culength) // debug_info_length + s.AddUint32(ctxt.Arch, culength) // debug_info_length for die := compunit.Child; die != nil; die = die.Link { if !ispub(die) { @@ -1422,62 +1592,16 @@ func writepub(ctxt *Link, sname string, ispub func(*dwarf.DWDie) bool, syms []*S Addstring(s, name) } - Adduint32(ctxt, s, 0) + s.AddUint32(ctxt.Arch, 0) - setuint32(ctxt, s, sectionstart, uint32(s.Size-sectionstart)-4) // exclude the length field. + s.SetUint32(ctxt.Arch, sectionstart, uint32(s.Size-sectionstart)-4) // exclude the length field. } return syms } -/* - * emit .debug_aranges. _info must have been written before, - * because we need die->offs of dwarf.DW_globals. - */ -func writearanges(ctxt *Link, syms []*Symbol) []*Symbol { - s := ctxt.Syms.Lookup(".debug_aranges", 0) - s.Type = SDWARFSECT - // The first tuple is aligned to a multiple of the size of a single tuple - // (twice the size of an address) - headersize := int(Rnd(4+2+4+1+1, int64(SysArch.PtrSize*2))) // don't count unit_length field itself - - for compunit := dwroot.Child; compunit != nil; compunit = compunit.Link { - b := getattr(compunit, dwarf.DW_AT_low_pc) - if b == nil { - continue - } - e := getattr(compunit, dwarf.DW_AT_high_pc) - if e == nil { - continue - } - - // Write .debug_aranges Header + entry (sec 6.1.2) - unitlength := uint32(headersize) + 4*uint32(SysArch.PtrSize) - 4 - Adduint32(ctxt, s, unitlength) // unit_length (*) - Adduint16(ctxt, s, 2) // dwarf version (appendix F) - - adddwarfref(ctxt, s, dtolsym(compunit.Sym), 4) - - Adduint8(ctxt, s, uint8(SysArch.PtrSize)) // address_size - Adduint8(ctxt, s, 0) // segment_size - padding := headersize - (4 + 2 + 4 + 1 + 1) - for i := 0; i < padding; i++ { - Adduint8(ctxt, s, 0) - } - - Addaddrplus(ctxt, s, b.Data.(*Symbol), b.Value-(b.Data.(*Symbol)).Value) - adduintxx(ctxt, s, uint64(e.Value-b.Value), SysArch.PtrSize) - adduintxx(ctxt, s, 0, SysArch.PtrSize) - adduintxx(ctxt, s, 0, SysArch.PtrSize) - } - if s.Size > 0 { - syms = append(syms, s) - } - return syms -} - -func writegdbscript(ctxt *Link, syms []*Symbol) []*Symbol { - if Linkmode == LinkExternal && Headtype == objabi.Hwindows && Buildmode == BuildmodeCArchive { +func writegdbscript(ctxt *Link, syms []*sym.Symbol) []*sym.Symbol { + if ctxt.LinkMode == LinkExternal && ctxt.HeadType == objabi.Hwindows && ctxt.BuildMode == BuildModeCArchive { // gcc on Windows places .debug_gdb_scripts in the wrong location, which // causes the program not to run. See https://golang.org/issue/20183 // Non c-archives can avoid this issue via a linker script @@ -1489,9 +1613,9 @@ func writegdbscript(ctxt *Link, syms []*Symbol) []*Symbol { if gdbscript != "" { s := ctxt.Syms.Lookup(".debug_gdb_scripts", 0) - s.Type = SDWARFSECT + s.Type = sym.SDWARFSECT syms = append(syms, s) - Adduint8(ctxt, s, 1) // magic 1 byte? + s.AddUint8(1) // magic 1 byte? Addstring(s, gdbscript) } @@ -1513,18 +1637,18 @@ func dwarfgeneratedebugsyms(ctxt *Link) { if *FlagW { // disable dwarf return } - if *FlagS && Headtype != objabi.Hdarwin { + if *FlagS && ctxt.HeadType != objabi.Hdarwin { return } - if Headtype == objabi.Hplan9 { + if ctxt.HeadType == objabi.Hplan9 { return } - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { switch { - case Iself: - case Headtype == objabi.Hdarwin: - case Headtype == objabi.Hwindows: + case ctxt.IsELF: + case ctxt.HeadType == objabi.Hdarwin: + case ctxt.HeadType == objabi.Hwindows: default: return } @@ -1545,7 +1669,7 @@ func dwarfgeneratedebugsyms(ctxt *Link) { die := newdie(ctxt, &dwtypes, dwarf.DW_ABRV_BASETYPE, "uintptr", 0) // needed for array size newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_unsigned, 0) - newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(SysArch.PtrSize), 0) + newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(ctxt.Arch.PtrSize), 0) newattr(die, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, objabi.KindUintptr, 0) // Prototypes needed for type synthesis. @@ -1577,16 +1701,31 @@ func dwarfgeneratedebugsyms(ctxt *Link) { genasmsym(ctxt, defdwsymb) - syms := writeabbrev(ctxt, nil) - syms, funcs := writelines(ctxt, syms) - syms = writeframes(ctxt, syms) - syms = writeranges(ctxt, syms) + abbrev := writeabbrev(ctxt) + syms := []*sym.Symbol{abbrev} + + units := getCompilationUnits(ctxt) + + // Write per-package line and range tables and start their CU DIEs. + debugLine := ctxt.Syms.Lookup(".debug_line", 0) + debugLine.Type = sym.SDWARFSECT + debugRanges := ctxt.Syms.Lookup(".debug_ranges", 0) + debugRanges.Type = sym.SDWARFRANGE + debugRanges.Attr |= sym.AttrReachable + syms = append(syms, debugLine) + for _, u := range units { + u.dwinfo, u.funcDIEs, u.absFnDIEs = writelines(ctxt, u.lib, u.lib.Textp, debugLine) + writepcranges(ctxt, u.dwinfo, u.lib.Textp[0], u.pcs, debugRanges) + } synthesizestringtypes(ctxt, dwtypes.Child) synthesizeslicetypes(ctxt, dwtypes.Child) synthesizemaptypes(ctxt, dwtypes.Child) synthesizechantypes(ctxt, dwtypes.Child) + // newdie adds DIEs to the *beginning* of the parent's DIE list. + // Now that we're done creating DIEs, reverse the trees so DIEs + // appear in the order they were created. reversetree(&dwroot.Child) reversetree(&dwtypes.Child) reversetree(&dwglobals.Child) @@ -1594,38 +1733,68 @@ func dwarfgeneratedebugsyms(ctxt *Link) { movetomodule(&dwtypes) movetomodule(&dwglobals) - // Need to reorder symbols so SDWARFINFO is after all SDWARFSECT + // Need to reorder symbols so sym.SDWARFINFO is after all sym.SDWARFSECT // (but we need to generate dies before writepub) - infosyms := writeinfo(ctxt, nil, funcs) + infosyms := writeinfo(ctxt, nil, units, abbrev) + syms = writeframes(ctxt, syms) syms = writepub(ctxt, ".debug_pubnames", ispubname, syms) syms = writepub(ctxt, ".debug_pubtypes", ispubtype, syms) - syms = writearanges(ctxt, syms) syms = writegdbscript(ctxt, syms) + // Now we're done writing SDWARFSECT symbols, so we can write + // other SDWARF* symbols. syms = append(syms, infosyms...) + syms = collectlocs(ctxt, syms, units) + syms = append(syms, debugRanges) + syms = writeranges(ctxt, syms) dwarfp = syms } +func collectlocs(ctxt *Link, syms []*sym.Symbol, units []*compilationUnit) []*sym.Symbol { + empty := true + for _, u := range units { + for _, fn := range u.funcDIEs { + for _, reloc := range fn.R { + if reloc.Type == objabi.R_DWARFSECREF && strings.HasPrefix(reloc.Sym.Name, dwarf.LocPrefix) { + reloc.Sym.Attr |= sym.AttrReachable | sym.AttrNotInSymbolTable + syms = append(syms, reloc.Sym) + empty = false + // One location list entry per function, but many relocations to it. Don't duplicate. + break + } + } + } + } + // Don't emit .debug_loc if it's empty -- it makes the ARM linker mad. + if !empty { + locsym := ctxt.Syms.Lookup(".debug_loc", 0) + locsym.Type = sym.SDWARFLOC + locsym.Attr |= sym.AttrReachable + syms = append(syms, locsym) + } + return syms +} + /* * Elf. */ -func dwarfaddshstrings(ctxt *Link, shstrtab *Symbol) { +func dwarfaddshstrings(ctxt *Link, shstrtab *sym.Symbol) { if *FlagW { // disable dwarf return } Addstring(shstrtab, ".debug_abbrev") - Addstring(shstrtab, ".debug_aranges") Addstring(shstrtab, ".debug_frame") Addstring(shstrtab, ".debug_info") + Addstring(shstrtab, ".debug_loc") Addstring(shstrtab, ".debug_line") Addstring(shstrtab, ".debug_pubnames") Addstring(shstrtab, ".debug_pubtypes") Addstring(shstrtab, ".debug_gdb_scripts") Addstring(shstrtab, ".debug_ranges") - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { Addstring(shstrtab, elfRelType+".debug_info") - Addstring(shstrtab, elfRelType+".debug_aranges") + Addstring(shstrtab, elfRelType+".debug_loc") Addstring(shstrtab, elfRelType+".debug_line") Addstring(shstrtab, elfRelType+".debug_frame") Addstring(shstrtab, elfRelType+".debug_pubnames") @@ -1640,35 +1809,23 @@ func dwarfaddelfsectionsyms(ctxt *Link) { if *FlagW { // disable dwarf return } - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { return } - sym := ctxt.Syms.Lookup(".debug_info", 0) - putelfsectionsym(sym, sym.Sect.Elfsect.shnum) - sym = ctxt.Syms.Lookup(".debug_abbrev", 0) - putelfsectionsym(sym, sym.Sect.Elfsect.shnum) - sym = ctxt.Syms.Lookup(".debug_line", 0) - putelfsectionsym(sym, sym.Sect.Elfsect.shnum) - sym = ctxt.Syms.Lookup(".debug_frame", 0) - putelfsectionsym(sym, sym.Sect.Elfsect.shnum) - sym = ctxt.Syms.Lookup(".debug_ranges", 0) - if sym.Sect != nil { - putelfsectionsym(sym, sym.Sect.Elfsect.shnum) - } -} - -/* - * Windows PE - */ -func dwarfaddpeheaders(ctxt *Link) { - if *FlagW { // disable dwarf - return - } - for _, sect := range Segdwarf.Sections { - h := newPEDWARFSection(ctxt, sect.Name, int64(sect.Length)) - fileoff := sect.Vaddr - Segdwarf.Vaddr + Segdwarf.Fileoff - if uint64(h.PointerToRawData) != fileoff { - Exitf("%s.PointerToRawData = %#x, want %#x", sect.Name, h.PointerToRawData, fileoff) - } + s := ctxt.Syms.Lookup(".debug_info", 0) + putelfsectionsym(ctxt.Out, s, s.Sect.Elfsect.(*ElfShdr).shnum) + s = ctxt.Syms.Lookup(".debug_abbrev", 0) + putelfsectionsym(ctxt.Out, s, s.Sect.Elfsect.(*ElfShdr).shnum) + s = ctxt.Syms.Lookup(".debug_line", 0) + putelfsectionsym(ctxt.Out, s, s.Sect.Elfsect.(*ElfShdr).shnum) + s = ctxt.Syms.Lookup(".debug_frame", 0) + putelfsectionsym(ctxt.Out, s, s.Sect.Elfsect.(*ElfShdr).shnum) + s = ctxt.Syms.Lookup(".debug_loc", 0) + if s.Sect != nil { + putelfsectionsym(ctxt.Out, s, s.Sect.Elfsect.(*ElfShdr).shnum) + } + s = ctxt.Syms.Lookup(".debug_ranges", 0) + if s.Sect != nil { + putelfsectionsym(ctxt.Out, s, s.Sect.Elfsect.(*ElfShdr).shnum) } } diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go index 4e7413f7394..0bd5133f483 100644 --- a/src/cmd/link/internal/ld/dwarf_test.go +++ b/src/cmd/link/internal/ld/dwarf_test.go @@ -7,6 +7,8 @@ package ld import ( objfilepkg "cmd/internal/objfile" // renamed to avoid conflict with objfile function "debug/dwarf" + "errors" + "fmt" "internal/testenv" "io/ioutil" "os" @@ -30,7 +32,7 @@ func TestRuntimeTypeDIEs(t *testing.T) { } defer os.RemoveAll(dir) - f := gobuild(t, dir, `package main; func main() { }`) + f := gobuild(t, dir, `package main; func main() { }`, false) defer f.Close() dwarf, err := f.DWARF() @@ -75,7 +77,7 @@ func findTypes(t *testing.T, dw *dwarf.Data, want map[string]bool) (found map[st return } -func gobuild(t *testing.T, dir string, testfile string) *objfilepkg.File { +func gobuild(t *testing.T, dir string, testfile string, opt bool) *objfilepkg.File { src := filepath.Join(dir, "test.go") dst := filepath.Join(dir, "out") @@ -83,7 +85,11 @@ func gobuild(t *testing.T, dir string, testfile string) *objfilepkg.File { t.Fatal(err) } - cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", dst, src) + gcflags := "-gcflags=-N -l" + if opt { + gcflags = "-gcflags=-l=4" + } + cmd := exec.Command(testenv.GoToolPath(t), "build", gcflags, "-o", dst, src) if b, err := cmd.CombinedOutput(); err != nil { t.Logf("build: %s\n", b) t.Fatalf("build error: %v", err) @@ -136,7 +142,7 @@ func main() { } defer os.RemoveAll(dir) - f := gobuild(t, dir, prog) + f := gobuild(t, dir, prog, false) defer f.Close() @@ -192,3 +198,410 @@ func findMembers(rdr *dwarf.Reader) (map[string]bool, error) { } return memberEmbedded, nil } + +func TestSizes(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; no DWARF symbol table in executables") + } + + // DWARF sizes should never be -1. + // See issue #21097 + const prog = ` +package main +var x func() +var y [4]func() +func main() { + x = nil + y[0] = nil +} +` + dir, err := ioutil.TempDir("", "TestSizes") + if err != nil { + t.Fatalf("could not create directory: %v", err) + } + defer os.RemoveAll(dir) + f := gobuild(t, dir, prog, false) + defer f.Close() + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + rdr := d.Reader() + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + switch entry.Tag { + case dwarf.TagArrayType, dwarf.TagPointerType, dwarf.TagStructType, dwarf.TagBaseType, dwarf.TagSubroutineType, dwarf.TagTypedef: + default: + continue + } + typ, err := d.Type(entry.Offset) + if err != nil { + t.Fatalf("can't read type: %v", err) + } + if typ.Size() < 0 { + t.Errorf("subzero size %s %s %T", typ, entry.Tag, typ) + } + } +} + +func TestFieldOverlap(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; no DWARF symbol table in executables") + } + + // This test grew out of issue 21094, where specific sudog DWARF types + // had elem fields set to values instead of pointers. + const prog = ` +package main + +var c chan string + +func main() { + c <- "foo" +} +` + dir, err := ioutil.TempDir("", "TestFieldOverlap") + if err != nil { + t.Fatalf("could not create directory: %v", err) + } + defer os.RemoveAll(dir) + + f := gobuild(t, dir, prog, false) + defer f.Close() + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + if entry.Tag != dwarf.TagStructType { + continue + } + typ, err := d.Type(entry.Offset) + if err != nil { + t.Fatalf("can't read type: %v", err) + } + s := typ.(*dwarf.StructType) + for i := 0; i < len(s.Field); i++ { + end := s.Field[i].ByteOffset + s.Field[i].Type.Size() + var limit int64 + if i == len(s.Field)-1 { + limit = s.Size() + } else { + limit = s.Field[i+1].ByteOffset + } + if end > limit { + name := entry.Val(dwarf.AttrName).(string) + t.Fatalf("field %s.%s overlaps next field", name, s.Field[i].Name) + } + } + } +} + +func TestVarDeclCoordsAndSubrogramDeclFile(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; no DWARF symbol table in executables") + } + + const prog = ` +package main + +func main() { + var i int + i = i +} +` + dir, err := ioutil.TempDir("", "TestVarDeclCoords") + if err != nil { + t.Fatalf("could not create directory: %v", err) + } + defer os.RemoveAll(dir) + + f := gobuild(t, dir, prog, false) + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + rdr := d.Reader() + ex := examiner{} + if err := ex.populate(rdr); err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + // Locate the main.main DIE + mains := ex.Named("main.main") + if len(mains) == 0 { + t.Fatalf("unable to locate DIE for main.main") + } + if len(mains) != 1 { + t.Fatalf("more than one main.main DIE") + } + maindie := mains[0] + + // Vet the main.main DIE + if maindie.Tag != dwarf.TagSubprogram { + t.Fatalf("unexpected tag %v on main.main DIE", maindie.Tag) + } + + // Walk main's children and select variable "i". + mainIdx := ex.idxFromOffset(maindie.Offset) + childDies := ex.Children(mainIdx) + var iEntry *dwarf.Entry + for _, child := range childDies { + if child.Tag == dwarf.TagVariable && child.Val(dwarf.AttrName).(string) == "i" { + iEntry = child + break + } + } + if iEntry == nil { + t.Fatalf("didn't find DW_TAG_variable for i in main.main") + } + + // Verify line/file attributes. + line := iEntry.Val(dwarf.AttrDeclLine) + if line == nil || line.(int64) != 5 { + t.Errorf("DW_AT_decl_line for i is %v, want 5", line) + } + + file := maindie.Val(dwarf.AttrDeclFile) + if file == nil || file.(int64) != 1 { + t.Errorf("DW_AT_decl_file for main is %v, want 1", file) + } +} + +// Helper class for supporting queries on DIEs within a DWARF .debug_info +// section. Invoke the populate() method below passing in a dwarf.Reader, +// which will read in all DIEs and keep track of parent/child +// relationships. Queries can then be made to ask for DIEs by name or +// by offset. This will hopefully reduce boilerplate for future test +// writing. + +type examiner struct { + dies []*dwarf.Entry + idxByOffset map[dwarf.Offset]int + kids map[int][]int + byname map[string][]int +} + +// Populate the examiner using the DIEs read from rdr. +func (ex *examiner) populate(rdr *dwarf.Reader) error { + ex.idxByOffset = make(map[dwarf.Offset]int) + ex.kids = make(map[int][]int) + ex.byname = make(map[string][]int) + var nesting []int + for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() { + if err != nil { + return err + } + if entry.Tag == 0 { + // terminator + if len(nesting) == 0 { + return errors.New("nesting stack underflow") + } + nesting = nesting[:len(nesting)-1] + continue + } + idx := len(ex.dies) + ex.dies = append(ex.dies, entry) + if _, found := ex.idxByOffset[entry.Offset]; found { + return errors.New("DIE clash on offset") + } + ex.idxByOffset[entry.Offset] = idx + if name, ok := entry.Val(dwarf.AttrName).(string); ok { + ex.byname[name] = append(ex.byname[name], idx) + } + if len(nesting) > 0 { + parent := nesting[len(nesting)-1] + ex.kids[parent] = append(ex.kids[parent], idx) + } + if entry.Children { + nesting = append(nesting, idx) + } + } + if len(nesting) > 0 { + return errors.New("unterminated child sequence") + } + return nil +} + +func indent(ilevel int) { + for i := 0; i < ilevel; i++ { + fmt.Printf(" ") + } +} + +// For debugging new tests +func (ex *examiner) dumpEntry(idx int, dumpKids bool, ilevel int) error { + if idx >= len(ex.dies) { + msg := fmt.Sprintf("bad DIE %d: index out of range\n", idx) + return errors.New(msg) + } + entry := ex.dies[idx] + indent(ilevel) + fmt.Printf("%d: %v\n", idx, entry.Tag) + for _, f := range entry.Field { + indent(ilevel) + fmt.Printf("at=%v val=%v\n", f.Attr, f.Val) + } + if dumpKids { + ksl := ex.kids[idx] + for _, k := range ksl { + ex.dumpEntry(k, true, ilevel+2) + } + } + return nil +} + +// Given a DIE offset, return the previously read dwarf.Entry, or nil +func (ex *examiner) entryFromOffset(off dwarf.Offset) *dwarf.Entry { + if idx, found := ex.idxByOffset[off]; found && idx != -1 { + return ex.entryFromIdx(idx) + } + return nil +} + +// Return the ID that that examiner uses to refer to the DIE at offset off +func (ex *examiner) idxFromOffset(off dwarf.Offset) int { + if idx, found := ex.idxByOffset[off]; found { + return idx + } + return -1 +} + +// Return the dwarf.Entry pointer for the DIE with id 'idx' +func (ex *examiner) entryFromIdx(idx int) *dwarf.Entry { + if idx >= len(ex.dies) { + return nil + } + return ex.dies[idx] +} + +// Returns a list of child entries for a die with ID 'idx' +func (ex *examiner) Children(idx int) []*dwarf.Entry { + sl := ex.kids[idx] + ret := make([]*dwarf.Entry, len(sl)) + for i, k := range sl { + ret[i] = ex.entryFromIdx(k) + } + return ret +} + +// Return a list of all DIEs with name 'name'. When searching for DIEs +// by name, keep in mind that the returned results will include child +// DIEs such as params/variables. For example, asking for all DIEs named +// "p" for even a small program will give you 400-500 entries. +func (ex *examiner) Named(name string) []*dwarf.Entry { + sl := ex.byname[name] + ret := make([]*dwarf.Entry, len(sl)) + for i, k := range sl { + ret[i] = ex.entryFromIdx(k) + } + return ret +} + +func TestInlinedRoutineRecords(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; no DWARF symbol table in executables") + } + + const prog = ` +package main + +var G int + +func cand(x, y int) int { + return (x + y) ^ (y - x) +} + +func main() { + x := cand(G*G,G|7%G) + G = x +} +` + dir, err := ioutil.TempDir("", "TestInlinedRoutineRecords") + if err != nil { + t.Fatalf("could not create directory: %v", err) + } + defer os.RemoveAll(dir) + + // Note: this is a regular go build here, without "-l -N". The + // test is intended to verify DWARF that is only generated when the + // inliner is active. + f := gobuild(t, dir, prog, true) + + d, err := f.DWARF() + if err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + // The inlined subroutines we expect to visit + expectedInl := []string{"main.cand"} + + rdr := d.Reader() + ex := examiner{} + if err := ex.populate(rdr); err != nil { + t.Fatalf("error reading DWARF: %v", err) + } + + // Locate the main.main DIE + mains := ex.Named("main.main") + if len(mains) == 0 { + t.Fatalf("unable to locate DIE for main.main") + } + if len(mains) != 1 { + t.Fatalf("more than one main.main DIE") + } + maindie := mains[0] + + // Vet the main.main DIE + if maindie.Tag != dwarf.TagSubprogram { + t.Fatalf("unexpected tag %v on main.main DIE", maindie.Tag) + } + + // Walk main's children and pick out the inlined subroutines + mainIdx := ex.idxFromOffset(maindie.Offset) + childDies := ex.Children(mainIdx) + exCount := 0 + for _, child := range childDies { + if child.Tag == dwarf.TagInlinedSubroutine { + // Found an inlined subroutine, locate abstract origin. + ooff, originOK := child.Val(dwarf.AttrAbstractOrigin).(dwarf.Offset) + if !originOK { + t.Fatalf("no abstract origin attr for inlined subroutine at offset %v", child.Offset) + } + originDIE := ex.entryFromOffset(ooff) + if originDIE == nil { + t.Fatalf("can't locate origin DIE at off %v", ooff) + } + + if exCount >= len(expectedInl) { + t.Fatalf("too many inlined subroutines found in main.main") + } + + // Name should check out. + expected := expectedInl[exCount] + if name, ok := originDIE.Val(dwarf.AttrName).(string); ok { + if name != expected { + t.Fatalf("expected inlined routine %s got %s", name, expected) + } + } + exCount++ + } + } + if exCount != len(expectedInl) { + t.Fatalf("not enough inlined subroutines found in main.main") + } +} diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index 0fc947fec29..d56a2359d38 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -7,6 +7,7 @@ package ld import ( "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "crypto/sha1" "encoding/binary" "encoding/hex" @@ -329,438 +330,6 @@ const ( * Relocation types. */ const ( - R_X86_64_NONE = 0 - R_X86_64_64 = 1 - R_X86_64_PC32 = 2 - R_X86_64_GOT32 = 3 - R_X86_64_PLT32 = 4 - R_X86_64_COPY = 5 - R_X86_64_GLOB_DAT = 6 - R_X86_64_JMP_SLOT = 7 - R_X86_64_RELATIVE = 8 - R_X86_64_GOTPCREL = 9 - R_X86_64_32 = 10 - R_X86_64_32S = 11 - R_X86_64_16 = 12 - R_X86_64_PC16 = 13 - R_X86_64_8 = 14 - R_X86_64_PC8 = 15 - R_X86_64_DTPMOD64 = 16 - R_X86_64_DTPOFF64 = 17 - R_X86_64_TPOFF64 = 18 - R_X86_64_TLSGD = 19 - R_X86_64_TLSLD = 20 - R_X86_64_DTPOFF32 = 21 - R_X86_64_GOTTPOFF = 22 - R_X86_64_TPOFF32 = 23 - R_X86_64_PC64 = 24 - R_X86_64_GOTOFF64 = 25 - R_X86_64_GOTPC32 = 26 - R_X86_64_GOT64 = 27 - R_X86_64_GOTPCREL64 = 28 - R_X86_64_GOTPC64 = 29 - R_X86_64_GOTPLT64 = 30 - R_X86_64_PLTOFF64 = 31 - R_X86_64_SIZE32 = 32 - R_X86_64_SIZE64 = 33 - R_X86_64_GOTPC32_TLSDEC = 34 - R_X86_64_TLSDESC_CALL = 35 - R_X86_64_TLSDESC = 36 - R_X86_64_IRELATIVE = 37 - R_X86_64_PC32_BND = 40 - R_X86_64_GOTPCRELX = 41 - R_X86_64_REX_GOTPCRELX = 42 - - R_AARCH64_ABS64 = 257 - R_AARCH64_ABS32 = 258 - R_AARCH64_CALL26 = 283 - R_AARCH64_ADR_PREL_PG_HI21 = 275 - R_AARCH64_ADD_ABS_LO12_NC = 277 - R_AARCH64_LDST8_ABS_LO12_NC = 278 - R_AARCH64_LDST16_ABS_LO12_NC = 284 - R_AARCH64_LDST32_ABS_LO12_NC = 285 - R_AARCH64_LDST64_ABS_LO12_NC = 286 - R_AARCH64_ADR_GOT_PAGE = 311 - R_AARCH64_LD64_GOT_LO12_NC = 312 - R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 = 541 - R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC = 542 - R_AARCH64_TLSLE_MOVW_TPREL_G0 = 547 - - R_ALPHA_NONE = 0 - R_ALPHA_REFLONG = 1 - R_ALPHA_REFQUAD = 2 - R_ALPHA_GPREL32 = 3 - R_ALPHA_LITERAL = 4 - R_ALPHA_LITUSE = 5 - R_ALPHA_GPDISP = 6 - R_ALPHA_BRADDR = 7 - R_ALPHA_HINT = 8 - R_ALPHA_SREL16 = 9 - R_ALPHA_SREL32 = 10 - R_ALPHA_SREL64 = 11 - R_ALPHA_OP_PUSH = 12 - R_ALPHA_OP_STORE = 13 - R_ALPHA_OP_PSUB = 14 - R_ALPHA_OP_PRSHIFT = 15 - R_ALPHA_GPVALUE = 16 - R_ALPHA_GPRELHIGH = 17 - R_ALPHA_GPRELLOW = 18 - R_ALPHA_IMMED_GP_16 = 19 - R_ALPHA_IMMED_GP_HI32 = 20 - R_ALPHA_IMMED_SCN_HI32 = 21 - R_ALPHA_IMMED_BR_HI32 = 22 - R_ALPHA_IMMED_LO32 = 23 - R_ALPHA_COPY = 24 - R_ALPHA_GLOB_DAT = 25 - R_ALPHA_JMP_SLOT = 26 - R_ALPHA_RELATIVE = 27 - - R_ARM_NONE = 0 - R_ARM_PC24 = 1 - R_ARM_ABS32 = 2 - R_ARM_REL32 = 3 - R_ARM_PC13 = 4 - R_ARM_ABS16 = 5 - R_ARM_ABS12 = 6 - R_ARM_THM_ABS5 = 7 - R_ARM_ABS8 = 8 - R_ARM_SBREL32 = 9 - R_ARM_THM_PC22 = 10 - R_ARM_THM_PC8 = 11 - R_ARM_AMP_VCALL9 = 12 - R_ARM_SWI24 = 13 - R_ARM_THM_SWI8 = 14 - R_ARM_XPC25 = 15 - R_ARM_THM_XPC22 = 16 - R_ARM_COPY = 20 - R_ARM_GLOB_DAT = 21 - R_ARM_JUMP_SLOT = 22 - R_ARM_RELATIVE = 23 - R_ARM_GOTOFF = 24 - R_ARM_GOTPC = 25 - R_ARM_GOT32 = 26 - R_ARM_PLT32 = 27 - R_ARM_CALL = 28 - R_ARM_JUMP24 = 29 - R_ARM_V4BX = 40 - R_ARM_GOT_PREL = 96 - R_ARM_GNU_VTENTRY = 100 - R_ARM_GNU_VTINHERIT = 101 - R_ARM_TLS_IE32 = 107 - R_ARM_TLS_LE32 = 108 - R_ARM_RSBREL32 = 250 - R_ARM_THM_RPC22 = 251 - R_ARM_RREL32 = 252 - R_ARM_RABS32 = 253 - R_ARM_RPC24 = 254 - R_ARM_RBASE = 255 - - R_386_NONE = 0 - R_386_32 = 1 - R_386_PC32 = 2 - R_386_GOT32 = 3 - R_386_PLT32 = 4 - R_386_COPY = 5 - R_386_GLOB_DAT = 6 - R_386_JMP_SLOT = 7 - R_386_RELATIVE = 8 - R_386_GOTOFF = 9 - R_386_GOTPC = 10 - R_386_TLS_TPOFF = 14 - R_386_TLS_IE = 15 - R_386_TLS_GOTIE = 16 - R_386_TLS_LE = 17 - R_386_TLS_GD = 18 - R_386_TLS_LDM = 19 - R_386_TLS_GD_32 = 24 - R_386_TLS_GD_PUSH = 25 - R_386_TLS_GD_CALL = 26 - R_386_TLS_GD_POP = 27 - R_386_TLS_LDM_32 = 28 - R_386_TLS_LDM_PUSH = 29 - R_386_TLS_LDM_CALL = 30 - R_386_TLS_LDM_POP = 31 - R_386_TLS_LDO_32 = 32 - R_386_TLS_IE_32 = 33 - R_386_TLS_LE_32 = 34 - R_386_TLS_DTPMOD32 = 35 - R_386_TLS_DTPOFF32 = 36 - R_386_TLS_TPOFF32 = 37 - R_386_TLS_GOTDESC = 39 - R_386_TLS_DESC_CALL = 40 - R_386_TLS_DESC = 41 - R_386_IRELATIVE = 42 - R_386_GOT32X = 43 - - R_MIPS_NONE = 0 - R_MIPS_16 = 1 - R_MIPS_32 = 2 - R_MIPS_REL32 = 3 - R_MIPS_26 = 4 - R_MIPS_HI16 = 5 - R_MIPS_LO16 = 6 - R_MIPS_GPREL16 = 7 - R_MIPS_LITERAL = 8 - R_MIPS_GOT16 = 9 - R_MIPS_PC16 = 10 - R_MIPS_CALL16 = 11 - R_MIPS_GPREL32 = 12 - R_MIPS_SHIFT5 = 16 - R_MIPS_SHIFT6 = 17 - R_MIPS_64 = 18 - R_MIPS_GOT_DISP = 19 - R_MIPS_GOT_PAGE = 20 - R_MIPS_GOT_OFST = 21 - R_MIPS_GOT_HI16 = 22 - R_MIPS_GOT_LO16 = 23 - R_MIPS_SUB = 24 - R_MIPS_INSERT_A = 25 - R_MIPS_INSERT_B = 26 - R_MIPS_DELETE = 27 - R_MIPS_HIGHER = 28 - R_MIPS_HIGHEST = 29 - R_MIPS_CALL_HI16 = 30 - R_MIPS_CALL_LO16 = 31 - R_MIPS_SCN_DISP = 32 - R_MIPS_REL16 = 33 - R_MIPS_ADD_IMMEDIATE = 34 - R_MIPS_PJUMP = 35 - R_MIPS_RELGOT = 36 - R_MIPS_JALR = 37 - R_MIPS_TLS_DTPMOD32 = 38 - R_MIPS_TLS_DTPREL32 = 39 - R_MIPS_TLS_DTPMOD64 = 40 - R_MIPS_TLS_DTPREL64 = 41 - R_MIPS_TLS_GD = 42 - R_MIPS_TLS_LDM = 43 - R_MIPS_TLS_DTPREL_HI16 = 44 - R_MIPS_TLS_DTPREL_LO16 = 45 - R_MIPS_TLS_GOTTPREL = 46 - R_MIPS_TLS_TPREL32 = 47 - R_MIPS_TLS_TPREL64 = 48 - R_MIPS_TLS_TPREL_HI16 = 49 - R_MIPS_TLS_TPREL_LO16 = 50 - - R_PPC_NONE = 0 - R_PPC_ADDR32 = 1 - R_PPC_ADDR24 = 2 - R_PPC_ADDR16 = 3 - R_PPC_ADDR16_LO = 4 - R_PPC_ADDR16_HI = 5 - R_PPC_ADDR16_HA = 6 - R_PPC_ADDR14 = 7 - R_PPC_ADDR14_BRTAKEN = 8 - R_PPC_ADDR14_BRNTAKEN = 9 - R_PPC_REL24 = 10 - R_PPC_REL14 = 11 - R_PPC_REL14_BRTAKEN = 12 - R_PPC_REL14_BRNTAKEN = 13 - R_PPC_GOT16 = 14 - R_PPC_GOT16_LO = 15 - R_PPC_GOT16_HI = 16 - R_PPC_GOT16_HA = 17 - R_PPC_PLTREL24 = 18 - R_PPC_COPY = 19 - R_PPC_GLOB_DAT = 20 - R_PPC_JMP_SLOT = 21 - R_PPC_RELATIVE = 22 - R_PPC_LOCAL24PC = 23 - R_PPC_UADDR32 = 24 - R_PPC_UADDR16 = 25 - R_PPC_REL32 = 26 - R_PPC_PLT32 = 27 - R_PPC_PLTREL32 = 28 - R_PPC_PLT16_LO = 29 - R_PPC_PLT16_HI = 30 - R_PPC_PLT16_HA = 31 - R_PPC_SDAREL16 = 32 - R_PPC_SECTOFF = 33 - R_PPC_SECTOFF_LO = 34 - R_PPC_SECTOFF_HI = 35 - R_PPC_SECTOFF_HA = 36 - R_PPC_TLS = 67 - R_PPC_DTPMOD32 = 68 - R_PPC_TPREL16 = 69 - R_PPC_TPREL16_LO = 70 - R_PPC_TPREL16_HI = 71 - R_PPC_TPREL16_HA = 72 - R_PPC_TPREL32 = 73 - R_PPC_DTPREL16 = 74 - R_PPC_DTPREL16_LO = 75 - R_PPC_DTPREL16_HI = 76 - R_PPC_DTPREL16_HA = 77 - R_PPC_DTPREL32 = 78 - R_PPC_GOT_TLSGD16 = 79 - R_PPC_GOT_TLSGD16_LO = 80 - R_PPC_GOT_TLSGD16_HI = 81 - R_PPC_GOT_TLSGD16_HA = 82 - R_PPC_GOT_TLSLD16 = 83 - R_PPC_GOT_TLSLD16_LO = 84 - R_PPC_GOT_TLSLD16_HI = 85 - R_PPC_GOT_TLSLD16_HA = 86 - R_PPC_GOT_TPREL16 = 87 - R_PPC_GOT_TPREL16_LO = 88 - R_PPC_GOT_TPREL16_HI = 89 - R_PPC_GOT_TPREL16_HA = 90 - R_PPC_EMB_NADDR32 = 101 - R_PPC_EMB_NADDR16 = 102 - R_PPC_EMB_NADDR16_LO = 103 - R_PPC_EMB_NADDR16_HI = 104 - R_PPC_EMB_NADDR16_HA = 105 - R_PPC_EMB_SDAI16 = 106 - R_PPC_EMB_SDA2I16 = 107 - R_PPC_EMB_SDA2REL = 108 - R_PPC_EMB_SDA21 = 109 - R_PPC_EMB_MRKREF = 110 - R_PPC_EMB_RELSEC16 = 111 - R_PPC_EMB_RELST_LO = 112 - R_PPC_EMB_RELST_HI = 113 - R_PPC_EMB_RELST_HA = 114 - R_PPC_EMB_BIT_FLD = 115 - R_PPC_EMB_RELSDA = 116 - - R_PPC64_ADDR32 = R_PPC_ADDR32 - R_PPC64_ADDR16_LO = R_PPC_ADDR16_LO - R_PPC64_ADDR16_HA = R_PPC_ADDR16_HA - R_PPC64_REL24 = R_PPC_REL24 - R_PPC64_GOT16_HA = R_PPC_GOT16_HA - R_PPC64_JMP_SLOT = R_PPC_JMP_SLOT - R_PPC64_TPREL16 = R_PPC_TPREL16 - R_PPC64_ADDR64 = 38 - R_PPC64_TOC16 = 47 - R_PPC64_TOC16_LO = 48 - R_PPC64_TOC16_HI = 49 - R_PPC64_TOC16_HA = 50 - R_PPC64_ADDR16_LO_DS = 57 - R_PPC64_GOT16_LO_DS = 59 - R_PPC64_TOC16_DS = 63 - R_PPC64_TOC16_LO_DS = 64 - R_PPC64_TLS = 67 - R_PPC64_GOT_TPREL16_LO_DS = 88 - R_PPC64_GOT_TPREL16_HA = 90 - R_PPC64_REL16_LO = 250 - R_PPC64_REL16_HI = 251 - R_PPC64_REL16_HA = 252 - - R_SPARC_NONE = 0 - R_SPARC_8 = 1 - R_SPARC_16 = 2 - R_SPARC_32 = 3 - R_SPARC_DISP8 = 4 - R_SPARC_DISP16 = 5 - R_SPARC_DISP32 = 6 - R_SPARC_WDISP30 = 7 - R_SPARC_WDISP22 = 8 - R_SPARC_HI22 = 9 - R_SPARC_22 = 10 - R_SPARC_13 = 11 - R_SPARC_LO10 = 12 - R_SPARC_GOT10 = 13 - R_SPARC_GOT13 = 14 - R_SPARC_GOT22 = 15 - R_SPARC_PC10 = 16 - R_SPARC_PC22 = 17 - R_SPARC_WPLT30 = 18 - R_SPARC_COPY = 19 - R_SPARC_GLOB_DAT = 20 - R_SPARC_JMP_SLOT = 21 - R_SPARC_RELATIVE = 22 - R_SPARC_UA32 = 23 - R_SPARC_PLT32 = 24 - R_SPARC_HIPLT22 = 25 - R_SPARC_LOPLT10 = 26 - R_SPARC_PCPLT32 = 27 - R_SPARC_PCPLT22 = 28 - R_SPARC_PCPLT10 = 29 - R_SPARC_10 = 30 - R_SPARC_11 = 31 - R_SPARC_64 = 32 - R_SPARC_OLO10 = 33 - R_SPARC_HH22 = 34 - R_SPARC_HM10 = 35 - R_SPARC_LM22 = 36 - R_SPARC_PC_HH22 = 37 - R_SPARC_PC_HM10 = 38 - R_SPARC_PC_LM22 = 39 - R_SPARC_WDISP16 = 40 - R_SPARC_WDISP19 = 41 - R_SPARC_GLOB_JMP = 42 - R_SPARC_7 = 43 - R_SPARC_5 = 44 - R_SPARC_6 = 45 - R_SPARC_DISP64 = 46 - R_SPARC_PLT64 = 47 - R_SPARC_HIX22 = 48 - R_SPARC_LOX10 = 49 - R_SPARC_H44 = 50 - R_SPARC_M44 = 51 - R_SPARC_L44 = 52 - R_SPARC_REGISTER = 53 - R_SPARC_UA64 = 54 - R_SPARC_UA16 = 55 - - R_390_NONE = 0 - R_390_8 = 1 - R_390_12 = 2 - R_390_16 = 3 - R_390_32 = 4 - R_390_PC32 = 5 - R_390_GOT12 = 6 - R_390_GOT32 = 7 - R_390_PLT32 = 8 - R_390_COPY = 9 - R_390_GLOB_DAT = 10 - R_390_JMP_SLOT = 11 - R_390_RELATIVE = 12 - R_390_GOTOFF = 13 - R_390_GOTPC = 14 - R_390_GOT16 = 15 - R_390_PC16 = 16 - R_390_PC16DBL = 17 - R_390_PLT16DBL = 18 - R_390_PC32DBL = 19 - R_390_PLT32DBL = 20 - R_390_GOTPCDBL = 21 - R_390_64 = 22 - R_390_PC64 = 23 - R_390_GOT64 = 24 - R_390_PLT64 = 25 - R_390_GOTENT = 26 - R_390_GOTOFF16 = 27 - R_390_GOTOFF64 = 28 - R_390_GOTPLT12 = 29 - R_390_GOTPLT16 = 30 - R_390_GOTPLT32 = 31 - R_390_GOTPLT64 = 32 - R_390_GOTPLTENT = 33 - R_390_GOTPLTOFF16 = 34 - R_390_GOTPLTOFF32 = 35 - R_390_GOTPLTOFF64 = 36 - R_390_TLS_LOAD = 37 - R_390_TLS_GDCALL = 38 - R_390_TLS_LDCALL = 39 - R_390_TLS_GD32 = 40 - R_390_TLS_GD64 = 41 - R_390_TLS_GOTIE12 = 42 - R_390_TLS_GOTIE32 = 43 - R_390_TLS_GOTIE64 = 44 - R_390_TLS_LDM32 = 45 - R_390_TLS_LDM64 = 46 - R_390_TLS_IE32 = 47 - R_390_TLS_IE64 = 48 - R_390_TLS_IEENT = 49 - R_390_TLS_LE32 = 50 - R_390_TLS_LE64 = 51 - R_390_TLS_LDO32 = 52 - R_390_TLS_LDO64 = 53 - R_390_TLS_DTPMOD = 54 - R_390_TLS_DTPOFF = 55 - R_390_TLS_TPOFF = 56 - R_390_20 = 57 - R_390_GOT20 = 58 - R_390_GOTPLT20 = 59 - R_390_TLS_GOTIE20 = 60 - ARM_MAGIC_TRAMP_NUMBER = 0x5c000003 ) @@ -809,7 +378,6 @@ type ElfShdr struct { addralign uint64 entsize uint64 shnum int - secsym *Symbol } /* @@ -885,9 +453,7 @@ const ( ) var ( - Iself bool - - Nelfsym int = 1 + Nelfsym = 1 elf64 bool // Either ".rel" or ".rela" depending on which type of relocation the @@ -917,15 +483,15 @@ var buildinfo []byte we write section and prog headers. */ func Elfinit(ctxt *Link) { - Iself = true + ctxt.IsELF = true - if SysArch.InFamily(sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.S390X) { + if ctxt.Arch.InFamily(sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.S390X) { elfRelType = ".rela" } else { elfRelType = ".rel" } - switch SysArch.Family { + switch ctxt.Arch.Family { // 64-bit architectures case sys.PPC64, sys.S390X: if ctxt.Arch.ByteOrder == binary.BigEndian { @@ -935,7 +501,7 @@ func Elfinit(ctxt *Link) { } fallthrough case sys.AMD64, sys.ARM64, sys.MIPS64: - if SysArch.Family == sys.MIPS64 { + if ctxt.Arch.Family == sys.MIPS64 { ehdr.flags = 0x20000004 /* MIPS 3 CPIC */ } elf64 = true @@ -948,9 +514,9 @@ func Elfinit(ctxt *Link) { // 32-bit architectures case sys.ARM, sys.MIPS: - if SysArch.Family == sys.ARM { + if ctxt.Arch.Family == sys.ARM { // we use EABI on linux/arm, freebsd/arm, netbsd/arm. - if Headtype == objabi.Hlinux || Headtype == objabi.Hfreebsd || Headtype == objabi.Hnetbsd { + if ctxt.HeadType == objabi.Hlinux || ctxt.HeadType == objabi.Hfreebsd || ctxt.HeadType == objabi.Hnetbsd { // We set a value here that makes no indication of which // float ABI the object uses, because this is information // used by the dynamic linker to compare executables and @@ -961,7 +527,7 @@ func Elfinit(ctxt *Link) { // appropriate. ehdr.flags = 0x5000002 // has entry point, Version5 EABI } - } else if SysArch.Family == sys.MIPS { + } else if ctxt.Arch.Family == sys.MIPS { ehdr.flags = 0x50001004 /* MIPS 32 CPIC O32*/ } fallthrough @@ -990,77 +556,77 @@ func fixElfPhdr(e *ElfPhdr) { e.memsz += uint64(frag) } -func elf64phdr(e *ElfPhdr) { +func elf64phdr(out *OutBuf, e *ElfPhdr) { if e.type_ == PT_LOAD { fixElfPhdr(e) } - Thearch.Lput(e.type_) - Thearch.Lput(e.flags) - Thearch.Vput(e.off) - Thearch.Vput(e.vaddr) - Thearch.Vput(e.paddr) - Thearch.Vput(e.filesz) - Thearch.Vput(e.memsz) - Thearch.Vput(e.align) + out.Write32(e.type_) + out.Write32(e.flags) + out.Write64(e.off) + out.Write64(e.vaddr) + out.Write64(e.paddr) + out.Write64(e.filesz) + out.Write64(e.memsz) + out.Write64(e.align) } -func elf32phdr(e *ElfPhdr) { +func elf32phdr(out *OutBuf, e *ElfPhdr) { if e.type_ == PT_LOAD { fixElfPhdr(e) } - Thearch.Lput(e.type_) - Thearch.Lput(uint32(e.off)) - Thearch.Lput(uint32(e.vaddr)) - Thearch.Lput(uint32(e.paddr)) - Thearch.Lput(uint32(e.filesz)) - Thearch.Lput(uint32(e.memsz)) - Thearch.Lput(e.flags) - Thearch.Lput(uint32(e.align)) + out.Write32(e.type_) + out.Write32(uint32(e.off)) + out.Write32(uint32(e.vaddr)) + out.Write32(uint32(e.paddr)) + out.Write32(uint32(e.filesz)) + out.Write32(uint32(e.memsz)) + out.Write32(e.flags) + out.Write32(uint32(e.align)) } -func elf64shdr(e *ElfShdr) { - Thearch.Lput(e.name) - Thearch.Lput(e.type_) - Thearch.Vput(e.flags) - Thearch.Vput(e.addr) - Thearch.Vput(e.off) - Thearch.Vput(e.size) - Thearch.Lput(e.link) - Thearch.Lput(e.info) - Thearch.Vput(e.addralign) - Thearch.Vput(e.entsize) +func elf64shdr(out *OutBuf, e *ElfShdr) { + out.Write32(e.name) + out.Write32(e.type_) + out.Write64(e.flags) + out.Write64(e.addr) + out.Write64(e.off) + out.Write64(e.size) + out.Write32(e.link) + out.Write32(e.info) + out.Write64(e.addralign) + out.Write64(e.entsize) } -func elf32shdr(e *ElfShdr) { - Thearch.Lput(e.name) - Thearch.Lput(e.type_) - Thearch.Lput(uint32(e.flags)) - Thearch.Lput(uint32(e.addr)) - Thearch.Lput(uint32(e.off)) - Thearch.Lput(uint32(e.size)) - Thearch.Lput(e.link) - Thearch.Lput(e.info) - Thearch.Lput(uint32(e.addralign)) - Thearch.Lput(uint32(e.entsize)) +func elf32shdr(out *OutBuf, e *ElfShdr) { + out.Write32(e.name) + out.Write32(e.type_) + out.Write32(uint32(e.flags)) + out.Write32(uint32(e.addr)) + out.Write32(uint32(e.off)) + out.Write32(uint32(e.size)) + out.Write32(e.link) + out.Write32(e.info) + out.Write32(uint32(e.addralign)) + out.Write32(uint32(e.entsize)) } -func elfwriteshdrs() uint32 { +func elfwriteshdrs(out *OutBuf) uint32 { if elf64 { for i := 0; i < int(ehdr.shnum); i++ { - elf64shdr(shdr[i]) + elf64shdr(out, shdr[i]) } return uint32(ehdr.shnum) * ELF64SHDRSIZE } for i := 0; i < int(ehdr.shnum); i++ { - elf32shdr(shdr[i]) + elf32shdr(out, shdr[i]) } return uint32(ehdr.shnum) * ELF32SHDRSIZE } -func elfsetstring(s *Symbol, str string, off int) { +func elfsetstring(s *sym.Symbol, str string, off int) { if nelfstr >= len(elfstr) { Errorf(s, "too many elf strings") errorexit() @@ -1071,16 +637,16 @@ func elfsetstring(s *Symbol, str string, off int) { nelfstr++ } -func elfwritephdrs() uint32 { +func elfwritephdrs(out *OutBuf) uint32 { if elf64 { for i := 0; i < int(ehdr.phnum); i++ { - elf64phdr(phdr[i]) + elf64phdr(out, phdr[i]) } return uint32(ehdr.phnum) * ELF64PHDRSIZE } for i := 0; i < int(ehdr.phnum); i++ { - elf32phdr(phdr[i]) + elf32phdr(out, phdr[i]) } return uint32(ehdr.phnum) * ELF32PHDRSIZE } @@ -1119,51 +685,47 @@ func getElfEhdr() *ElfEhdr { return &ehdr } -func elf64writehdr() uint32 { - for i := 0; i < EI_NIDENT; i++ { - Cput(ehdr.ident[i]) - } - Thearch.Wput(ehdr.type_) - Thearch.Wput(ehdr.machine) - Thearch.Lput(ehdr.version) - Thearch.Vput(ehdr.entry) - Thearch.Vput(ehdr.phoff) - Thearch.Vput(ehdr.shoff) - Thearch.Lput(ehdr.flags) - Thearch.Wput(ehdr.ehsize) - Thearch.Wput(ehdr.phentsize) - Thearch.Wput(ehdr.phnum) - Thearch.Wput(ehdr.shentsize) - Thearch.Wput(ehdr.shnum) - Thearch.Wput(ehdr.shstrndx) +func elf64writehdr(out *OutBuf) uint32 { + out.Write(ehdr.ident[:]) + out.Write16(ehdr.type_) + out.Write16(ehdr.machine) + out.Write32(ehdr.version) + out.Write64(ehdr.entry) + out.Write64(ehdr.phoff) + out.Write64(ehdr.shoff) + out.Write32(ehdr.flags) + out.Write16(ehdr.ehsize) + out.Write16(ehdr.phentsize) + out.Write16(ehdr.phnum) + out.Write16(ehdr.shentsize) + out.Write16(ehdr.shnum) + out.Write16(ehdr.shstrndx) return ELF64HDRSIZE } -func elf32writehdr() uint32 { - for i := 0; i < EI_NIDENT; i++ { - Cput(ehdr.ident[i]) - } - Thearch.Wput(ehdr.type_) - Thearch.Wput(ehdr.machine) - Thearch.Lput(ehdr.version) - Thearch.Lput(uint32(ehdr.entry)) - Thearch.Lput(uint32(ehdr.phoff)) - Thearch.Lput(uint32(ehdr.shoff)) - Thearch.Lput(ehdr.flags) - Thearch.Wput(ehdr.ehsize) - Thearch.Wput(ehdr.phentsize) - Thearch.Wput(ehdr.phnum) - Thearch.Wput(ehdr.shentsize) - Thearch.Wput(ehdr.shnum) - Thearch.Wput(ehdr.shstrndx) +func elf32writehdr(out *OutBuf) uint32 { + out.Write(ehdr.ident[:]) + out.Write16(ehdr.type_) + out.Write16(ehdr.machine) + out.Write32(ehdr.version) + out.Write32(uint32(ehdr.entry)) + out.Write32(uint32(ehdr.phoff)) + out.Write32(uint32(ehdr.shoff)) + out.Write32(ehdr.flags) + out.Write16(ehdr.ehsize) + out.Write16(ehdr.phentsize) + out.Write16(ehdr.phnum) + out.Write16(ehdr.shentsize) + out.Write16(ehdr.shnum) + out.Write16(ehdr.shstrndx) return ELF32HDRSIZE } -func elfwritehdr() uint32 { +func elfwritehdr(out *OutBuf) uint32 { if elf64 { - return elf64writehdr() + return elf64writehdr(out) } - return elf32writehdr() + return elf32writehdr(out) } /* Taken directly from the definition document for ELF64 */ @@ -1179,36 +741,36 @@ func elfhash(name string) uint32 { return h } -func Elfwritedynent(ctxt *Link, s *Symbol, tag int, val uint64) { +func Elfwritedynent(ctxt *Link, s *sym.Symbol, tag int, val uint64) { if elf64 { - Adduint64(ctxt, s, uint64(tag)) - Adduint64(ctxt, s, val) + s.AddUint64(ctxt.Arch, uint64(tag)) + s.AddUint64(ctxt.Arch, val) } else { - Adduint32(ctxt, s, uint32(tag)) - Adduint32(ctxt, s, uint32(val)) + s.AddUint32(ctxt.Arch, uint32(tag)) + s.AddUint32(ctxt.Arch, uint32(val)) } } -func elfwritedynentsym(ctxt *Link, s *Symbol, tag int, t *Symbol) { +func elfwritedynentsym(ctxt *Link, s *sym.Symbol, tag int, t *sym.Symbol) { Elfwritedynentsymplus(ctxt, s, tag, t, 0) } -func Elfwritedynentsymplus(ctxt *Link, s *Symbol, tag int, t *Symbol, add int64) { +func Elfwritedynentsymplus(ctxt *Link, s *sym.Symbol, tag int, t *sym.Symbol, add int64) { if elf64 { - Adduint64(ctxt, s, uint64(tag)) + s.AddUint64(ctxt.Arch, uint64(tag)) } else { - Adduint32(ctxt, s, uint32(tag)) + s.AddUint32(ctxt.Arch, uint32(tag)) } - Addaddrplus(ctxt, s, t, add) + s.AddAddrPlus(ctxt.Arch, t, add) } -func elfwritedynentsymsize(ctxt *Link, s *Symbol, tag int, t *Symbol) { +func elfwritedynentsymsize(ctxt *Link, s *sym.Symbol, tag int, t *sym.Symbol) { if elf64 { - Adduint64(ctxt, s, uint64(tag)) + s.AddUint64(ctxt.Arch, uint64(tag)) } else { - Adduint32(ctxt, s, uint32(tag)) + s.AddUint32(ctxt.Arch, uint32(tag)) } - addsize(ctxt, s, t) + s.AddSize(ctxt.Arch, t) } func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int { @@ -1221,11 +783,11 @@ func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int { return n } -func elfwriteinterp() int { +func elfwriteinterp(out *OutBuf) int { sh := elfshname(".interp") - Cseek(int64(sh.off)) - coutbuf.WriteString(interp) - Cput(0) + out.SeekSet(int64(sh.off)) + out.WriteString(interp) + out.Write8(0) return int(sh.size) } @@ -1244,15 +806,15 @@ func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sz int, alloc bool) int return int(n) } -func elfwritenotehdr(str string, namesz uint32, descsz uint32, tag uint32) *ElfShdr { +func elfwritenotehdr(out *OutBuf, str string, namesz uint32, descsz uint32, tag uint32) *ElfShdr { sh := elfshname(str) // Write Elf_Note header. - Cseek(int64(sh.off)) + out.SeekSet(int64(sh.off)) - Thearch.Lput(namesz) - Thearch.Lput(descsz) - Thearch.Lput(tag) + out.Write32(namesz) + out.Write32(descsz) + out.Write32(tag) return sh } @@ -1272,19 +834,18 @@ func elfnetbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int { return elfnote(sh, startva, resoff, n, true) } -func elfwritenetbsdsig() int { +func elfwritenetbsdsig(out *OutBuf) int { // Write Elf_Note header. - sh := elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG) + sh := elfwritenotehdr(out, ".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG) if sh == nil { return 0 } // Followed by NetBSD string and version. - Cwrite(ELF_NOTE_NETBSD_NAME) - Cput(0) - - Thearch.Lput(ELF_NOTE_NETBSD_VERSION) + out.Write(ELF_NOTE_NETBSD_NAME) + out.Write8(0) + out.Write32(ELF_NOTE_NETBSD_VERSION) return int(sh.size) } @@ -1304,18 +865,18 @@ func elfopenbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int { return elfnote(sh, startva, resoff, n, true) } -func elfwriteopenbsdsig() int { +func elfwriteopenbsdsig(out *OutBuf) int { // Write Elf_Note header. - sh := elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG) + sh := elfwritenotehdr(out, ".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG) if sh == nil { return 0 } // Followed by OpenBSD string and version. - Cwrite(ELF_NOTE_OPENBSD_NAME) + out.Write(ELF_NOTE_OPENBSD_NAME) - Thearch.Lput(ELF_NOTE_OPENBSD_VERSION) + out.Write32(ELF_NOTE_OPENBSD_VERSION) return int(sh.size) } @@ -1365,30 +926,30 @@ func elfgobuildid(sh *ElfShdr, startva uint64, resoff uint64) int { return elfnote(sh, startva, resoff, n, true) } -func elfwritebuildinfo() int { - sh := elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG) +func elfwritebuildinfo(out *OutBuf) int { + sh := elfwritenotehdr(out, ".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG) if sh == nil { return 0 } - Cwrite(ELF_NOTE_BUILDINFO_NAME) - Cwrite(buildinfo) + out.Write(ELF_NOTE_BUILDINFO_NAME) + out.Write(buildinfo) var zero = make([]byte, 4) - Cwrite(zero[:int(Rnd(int64(len(buildinfo)), 4)-int64(len(buildinfo)))]) + out.Write(zero[:int(Rnd(int64(len(buildinfo)), 4)-int64(len(buildinfo)))]) return int(sh.size) } -func elfwritegobuildid() int { - sh := elfwritenotehdr(".note.go.buildid", uint32(len(ELF_NOTE_GO_NAME)), uint32(len(*flagBuildid)), ELF_NOTE_GOBUILDID_TAG) +func elfwritegobuildid(out *OutBuf) int { + sh := elfwritenotehdr(out, ".note.go.buildid", uint32(len(ELF_NOTE_GO_NAME)), uint32(len(*flagBuildid)), ELF_NOTE_GOBUILDID_TAG) if sh == nil { return 0 } - Cwrite(ELF_NOTE_GO_NAME) - Cwrite([]byte(*flagBuildid)) + out.Write(ELF_NOTE_GO_NAME) + out.Write([]byte(*flagBuildid)) var zero = make([]byte, 4) - Cwrite(zero[:int(Rnd(int64(len(*flagBuildid)), 4)-int64(len(*flagBuildid)))]) + out.Write(zero[:int(Rnd(int64(len(*flagBuildid)), 4)-int64(len(*flagBuildid)))]) return int(sh.size) } @@ -1445,14 +1006,14 @@ havelib: } func elfdynhash(ctxt *Link) { - if !Iself { + if !ctxt.IsELF { return } nsym := Nelfsym s := ctxt.Syms.Lookup(".hash", 0) - s.Type = SELFROSECT - s.Attr |= AttrReachable + s.Type = sym.SELFROSECT + s.Attr |= sym.AttrReachable i := nsym nbucket := 1 @@ -1466,7 +1027,6 @@ func elfdynhash(ctxt *Link) { chain := make([]uint32, nsym) buckets := make([]uint32, nbucket) - var b int for _, sy := range ctxt.Syms.Allsym { if sy.Dynid <= 0 { continue @@ -1479,29 +1039,29 @@ func elfdynhash(ctxt *Link) { name := sy.Extname hc := elfhash(name) - b = int(hc % uint32(nbucket)) + b := hc % uint32(nbucket) chain[sy.Dynid] = buckets[b] buckets[b] = uint32(sy.Dynid) } // s390x (ELF64) hash table entries are 8 bytes - if SysArch.Family == sys.S390X { - Adduint64(ctxt, s, uint64(nbucket)) - Adduint64(ctxt, s, uint64(nsym)) + if ctxt.Arch.Family == sys.S390X { + s.AddUint64(ctxt.Arch, uint64(nbucket)) + s.AddUint64(ctxt.Arch, uint64(nsym)) for i := 0; i < nbucket; i++ { - Adduint64(ctxt, s, uint64(buckets[i])) + s.AddUint64(ctxt.Arch, uint64(buckets[i])) } for i := 0; i < nsym; i++ { - Adduint64(ctxt, s, uint64(chain[i])) + s.AddUint64(ctxt.Arch, uint64(chain[i])) } } else { - Adduint32(ctxt, s, uint32(nbucket)) - Adduint32(ctxt, s, uint32(nsym)) + s.AddUint32(ctxt.Arch, uint32(nbucket)) + s.AddUint32(ctxt.Arch, uint32(nsym)) for i := 0; i < nbucket; i++ { - Adduint32(ctxt, s, buckets[i]) + s.AddUint32(ctxt.Arch, buckets[i]) } for i := 0; i < nsym; i++ { - Adduint32(ctxt, s, chain[i]) + s.AddUint32(ctxt.Arch, chain[i]) } } @@ -1511,39 +1071,37 @@ func elfdynhash(ctxt *Link) { s = ctxt.Syms.Lookup(".gnu.version_r", 0) i = 2 nfile := 0 - var j int - var x *Elfaux for l := needlib; l != nil; l = l.next { nfile++ // header - Adduint16(ctxt, s, 1) // table version - j = 0 - for x = l.aux; x != nil; x = x.next { + s.AddUint16(ctxt.Arch, 1) // table version + j := 0 + for x := l.aux; x != nil; x = x.next { j++ } - Adduint16(ctxt, s, uint16(j)) // aux count - Adduint32(ctxt, s, uint32(Addstring(dynstr, l.file))) // file string offset - Adduint32(ctxt, s, 16) // offset from header to first aux + s.AddUint16(ctxt.Arch, uint16(j)) // aux count + s.AddUint32(ctxt.Arch, uint32(Addstring(dynstr, l.file))) // file string offset + s.AddUint32(ctxt.Arch, 16) // offset from header to first aux if l.next != nil { - Adduint32(ctxt, s, 16+uint32(j)*16) // offset from this header to next + s.AddUint32(ctxt.Arch, 16+uint32(j)*16) // offset from this header to next } else { - Adduint32(ctxt, s, 0) + s.AddUint32(ctxt.Arch, 0) } - for x = l.aux; x != nil; x = x.next { + for x := l.aux; x != nil; x = x.next { x.num = i i++ // aux struct - Adduint32(ctxt, s, elfhash(x.vers)) // hash - Adduint16(ctxt, s, 0) // flags - Adduint16(ctxt, s, uint16(x.num)) // other - index we refer to this by - Adduint32(ctxt, s, uint32(Addstring(dynstr, x.vers))) // version string offset + s.AddUint32(ctxt.Arch, elfhash(x.vers)) // hash + s.AddUint16(ctxt.Arch, 0) // flags + s.AddUint16(ctxt.Arch, uint16(x.num)) // other - index we refer to this by + s.AddUint32(ctxt.Arch, uint32(Addstring(dynstr, x.vers))) // version string offset if x.next != nil { - Adduint32(ctxt, s, 16) // offset from this aux to next + s.AddUint32(ctxt.Arch, 16) // offset from this aux to next } else { - Adduint32(ctxt, s, 0) + s.AddUint32(ctxt.Arch, 0) } } } @@ -1553,11 +1111,11 @@ func elfdynhash(ctxt *Link) { for i := 0; i < nsym; i++ { if i == 0 { - Adduint16(ctxt, s, 0) // first entry - no symbol + s.AddUint16(ctxt.Arch, 0) // first entry - no symbol } else if need[i] == nil { - Adduint16(ctxt, s, 1) // global + s.AddUint16(ctxt.Arch, 1) // global } else { - Adduint16(ctxt, s, uint16(need[i].num)) + s.AddUint16(ctxt.Arch, uint16(need[i].num)) } } @@ -1583,7 +1141,7 @@ func elfdynhash(ctxt *Link) { Elfwritedynent(ctxt, s, DT_NULL, 0) } -func elfphload(seg *Segment) *ElfPhdr { +func elfphload(seg *sym.Segment) *ElfPhdr { ph := newElfPhdr() ph.type_ = PT_LOAD if seg.Rwx&4 != 0 { @@ -1605,7 +1163,7 @@ func elfphload(seg *Segment) *ElfPhdr { return ph } -func elfphrelro(seg *Segment) { +func elfphrelro(seg *sym.Segment) { ph := newElfPhdr() ph.type_ = PT_GNU_RELRO ph.vaddr = seg.Vaddr @@ -1617,24 +1175,19 @@ func elfphrelro(seg *Segment) { } func elfshname(name string) *ElfShdr { - var off int - var sh *ElfShdr - for i := 0; i < nelfstr; i++ { - if name == elfstr[i].s { - off = elfstr[i].off - for i = 0; i < int(ehdr.shnum); i++ { - sh = shdr[i] - if sh.name == uint32(off) { - return sh - } - } - - sh = newElfShdr(int64(off)) - return sh + if name != elfstr[i].s { + continue } + off := elfstr[i].off + for i = 0; i < int(ehdr.shnum); i++ { + sh := shdr[i] + if sh.name == uint32(off) { + return sh + } + } + return newElfShdr(int64(off)) } - Exitf("cannot find elf name %s", name) return nil } @@ -1642,14 +1195,10 @@ func elfshname(name string) *ElfShdr { // Create an ElfShdr for the section with name. // Create a duplicate if one already exists with that name func elfshnamedup(name string) *ElfShdr { - var off int - var sh *ElfShdr - for i := 0; i < nelfstr; i++ { if name == elfstr[i].s { - off = elfstr[i].off - sh = newElfShdr(int64(off)) - return sh + off := elfstr[i].off + return newElfShdr(int64(off)) } } @@ -1658,20 +1207,20 @@ func elfshnamedup(name string) *ElfShdr { return nil } -func elfshalloc(sect *Section) *ElfShdr { +func elfshalloc(sect *sym.Section) *ElfShdr { sh := elfshname(sect.Name) sect.Elfsect = sh return sh } -func elfshbits(sect *Section) *ElfShdr { +func elfshbits(linkmode LinkMode, sect *sym.Section) *ElfShdr { var sh *ElfShdr if sect.Name == ".text" { if sect.Elfsect == nil { sect.Elfsect = elfshnamedup(sect.Name) } - sh = sect.Elfsect + sh = sect.Elfsect.(*ElfShdr) } else { sh = elfshalloc(sect) } @@ -1679,7 +1228,7 @@ func elfshbits(sect *Section) *ElfShdr { // If this section has already been set up as a note, we assume type_ and // flags are already correct, but the other fields still need filling in. if sh.type_ == SHT_NOTE { - if Linkmode != LinkExternal { + if linkmode != LinkExternal { // TODO(mwhudson): the approach here will work OK when // linking internally for notes that we want to be included // in a loadable segment (e.g. the abihash note) but not for @@ -1718,7 +1267,7 @@ func elfshbits(sect *Section) *ElfShdr { sh.flags = 0 } - if Linkmode != LinkExternal { + if linkmode != LinkExternal { sh.addr = sect.Vaddr } sh.addralign = uint64(sect.Align) @@ -1730,7 +1279,7 @@ func elfshbits(sect *Section) *ElfShdr { return sh } -func elfshreloc(sect *Section) *ElfShdr { +func elfshreloc(arch *sys.Arch, sect *sym.Section) *ElfShdr { // If main section is SHT_NOBITS, nothing to relocate. // Also nothing to relocate in .shstrtab or notes. if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { @@ -1739,7 +1288,7 @@ func elfshreloc(sect *Section) *ElfShdr { if sect.Name == ".shstrtab" || sect.Name == ".tbss" { return nil } - if sect.Elfsect.type_ == SHT_NOTE { + if sect.Elfsect.(*ElfShdr).type_ == SHT_NOTE { return nil } @@ -1755,25 +1304,25 @@ func elfshreloc(sect *Section) *ElfShdr { // its own .rela.text. if sect.Name == ".text" { - if sh.info != 0 && sh.info != uint32(sect.Elfsect.shnum) { + if sh.info != 0 && sh.info != uint32(sect.Elfsect.(*ElfShdr).shnum) { sh = elfshnamedup(elfRelType + sect.Name) } } sh.type_ = uint32(typ) - sh.entsize = uint64(SysArch.RegSize) * 2 + sh.entsize = uint64(arch.RegSize) * 2 if typ == SHT_RELA { - sh.entsize += uint64(SysArch.RegSize) + sh.entsize += uint64(arch.RegSize) } sh.link = uint32(elfshname(".symtab").shnum) - sh.info = uint32(sect.Elfsect.shnum) + sh.info = uint32(sect.Elfsect.(*ElfShdr).shnum) sh.off = sect.Reloff sh.size = sect.Rellen - sh.addralign = uint64(SysArch.RegSize) + sh.addralign = uint64(arch.RegSize) return sh } -func elfrelocsect(ctxt *Link, sect *Section, syms []*Symbol) { +func elfrelocsect(ctxt *Link, sect *sym.Section, syms []*sym.Symbol) { // If main section is SHT_NOBITS, nothing to relocate. // Also nothing to relocate in .shstrtab. if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { @@ -1783,7 +1332,7 @@ func elfrelocsect(ctxt *Link, sect *Section, syms []*Symbol) { return } - sect.Reloff = uint64(coutbuf.Offset()) + sect.Reloff = uint64(ctxt.Out.Offset()) for i, s := range syms { if !s.Attr.Reachable() { continue @@ -1795,40 +1344,40 @@ func elfrelocsect(ctxt *Link, sect *Section, syms []*Symbol) { } eaddr := int32(sect.Vaddr + sect.Length) - for _, sym := range syms { - if !sym.Attr.Reachable() { + for _, s := range syms { + if !s.Attr.Reachable() { continue } - if sym.Value >= int64(eaddr) { + if s.Value >= int64(eaddr) { break } - for ri := 0; ri < len(sym.R); ri++ { - r := &sym.R[ri] - if r.Done != 0 { + for ri := 0; ri < len(s.R); ri++ { + r := &s.R[ri] + if r.Done { continue } if r.Xsym == nil { - Errorf(sym, "missing xsym in relocation") + Errorf(s, "missing xsym in relocation %#v %#v", r.Sym.Name, s) continue } if r.Xsym.ElfsymForReloc() == 0 { - Errorf(sym, "reloc %d to non-elf symbol %s (outer=%s) %d", r.Type, r.Sym.Name, r.Xsym.Name, r.Sym.Type) + Errorf(s, "reloc %d (%s) to non-elf symbol %s (outer=%s) %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type), r.Sym.Name, r.Xsym.Name, r.Sym.Type, r.Sym.Type) } if !r.Xsym.Attr.Reachable() { - Errorf(sym, "unreachable reloc %v target %v", r.Type, r.Xsym.Name) + Errorf(s, "unreachable reloc %d (%s) target %v", r.Type, sym.RelocName(ctxt.Arch, r.Type), r.Xsym.Name) } - if Thearch.Elfreloc1(ctxt, r, int64(uint64(sym.Value+int64(r.Off))-sect.Vaddr)) < 0 { - Errorf(sym, "unsupported obj reloc %d/%d to %s", r.Type, r.Siz, r.Sym.Name) + if !Thearch.Elfreloc1(ctxt, r, int64(uint64(s.Value+int64(r.Off))-sect.Vaddr)) { + Errorf(s, "unsupported obj reloc %d (%s)/%d to %s", r.Type, sym.RelocName(ctxt.Arch, r.Type), r.Siz, r.Sym.Name) } } } - sect.Rellen = uint64(coutbuf.Offset()) - sect.Reloff + sect.Rellen = uint64(ctxt.Out.Offset()) - sect.Reloff } func Elfemitreloc(ctxt *Link) { - for coutbuf.Offset()&7 != 0 { - Cput(0) + for ctxt.Out.Offset()&7 != 0 { + ctxt.Out.Write8(0) } for _, sect := range Segtext.Sections { @@ -1855,14 +1404,14 @@ func Elfemitreloc(ctxt *Link) { func addgonote(ctxt *Link, sectionName string, tag uint32, desc []byte) { s := ctxt.Syms.Lookup(sectionName, 0) - s.Attr |= AttrReachable - s.Type = SELFROSECT + s.Attr |= sym.AttrReachable + s.Type = sym.SELFROSECT // namesz - Adduint32(ctxt, s, uint32(len(ELF_NOTE_GO_NAME))) + s.AddUint32(ctxt.Arch, uint32(len(ELF_NOTE_GO_NAME))) // descsz - Adduint32(ctxt, s, uint32(len(desc))) + s.AddUint32(ctxt.Arch, uint32(len(desc))) // tag - Adduint32(ctxt, s, tag) + s.AddUint32(ctxt.Arch, tag) // name + padding s.P = append(s.P, ELF_NOTE_GO_NAME...) for len(s.P)%4 != 0 { @@ -1874,18 +1423,19 @@ func addgonote(ctxt *Link, sectionName string, tag uint32, desc []byte) { s.P = append(s.P, 0) } s.Size = int64(len(s.P)) + s.Align = 4 } func (ctxt *Link) doelf() { - if !Iself { + if !ctxt.IsELF { return } /* predefine strings we need for section headers */ shstrtab := ctxt.Syms.Lookup(".shstrtab", 0) - shstrtab.Type = SELFROSECT - shstrtab.Attr |= AttrReachable + shstrtab.Type = sym.SELFROSECT + shstrtab.Attr |= sym.AttrReachable Addstring(shstrtab, "") Addstring(shstrtab, ".text") @@ -1897,13 +1447,13 @@ func (ctxt *Link) doelf() { // generate .tbss section for dynamic internal linker or external // linking, so that various binutils could correctly calculate // PT_TLS size. See https://golang.org/issue/5200. - if !*FlagD || Linkmode == LinkExternal { + if !*FlagD || ctxt.LinkMode == LinkExternal { Addstring(shstrtab, ".tbss") } - if Headtype == objabi.Hnetbsd { + if ctxt.HeadType == objabi.Hnetbsd { Addstring(shstrtab, ".note.netbsd.ident") } - if Headtype == objabi.Hopenbsd { + if ctxt.HeadType == objabi.Hopenbsd { Addstring(shstrtab, ".note.openbsd.ident") } if len(buildinfo) > 0 { @@ -1916,7 +1466,7 @@ func (ctxt *Link) doelf() { Addstring(shstrtab, ".rodata") // See the comment about data.rel.ro.FOO section names in data.go. relro_prefix := "" - if UseRelro() { + if ctxt.UseRelro() { Addstring(shstrtab, ".data.rel.ro") relro_prefix = ".data.rel.ro" } @@ -1925,7 +1475,7 @@ func (ctxt *Link) doelf() { Addstring(shstrtab, relro_prefix+".gosymtab") Addstring(shstrtab, relro_prefix+".gopclntab") - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { *FlagD = true Addstring(shstrtab, elfRelType+".text") @@ -1936,25 +1486,25 @@ func (ctxt *Link) doelf() { Addstring(shstrtab, elfRelType+relro_prefix+".gopclntab") Addstring(shstrtab, elfRelType+".noptrdata") Addstring(shstrtab, elfRelType+".data") - if UseRelro() { + if ctxt.UseRelro() { Addstring(shstrtab, elfRelType+".data.rel.ro") } // add a .note.GNU-stack section to mark the stack as non-executable Addstring(shstrtab, ".note.GNU-stack") - if Buildmode == BuildmodeShared { + if ctxt.BuildMode == BuildModeShared { Addstring(shstrtab, ".note.go.abihash") Addstring(shstrtab, ".note.go.pkg-list") Addstring(shstrtab, ".note.go.deps") } } - hasinitarr := *FlagLinkshared + hasinitarr := ctxt.linkShared /* shared library initializer */ - switch Buildmode { - case BuildmodeCArchive, BuildmodeCShared, BuildmodeShared, BuildmodePlugin: + switch ctxt.BuildMode { + case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePlugin: hasinitarr = true } @@ -1975,7 +1525,7 @@ func (ctxt *Link) doelf() { Addstring(shstrtab, ".interp") Addstring(shstrtab, ".hash") Addstring(shstrtab, ".got") - if SysArch.Family == sys.PPC64 { + if ctxt.Arch.Family == sys.PPC64 { Addstring(shstrtab, ".glink") } Addstring(shstrtab, ".got.plt") @@ -1992,8 +1542,8 @@ func (ctxt *Link) doelf() { /* dynamic symbol table - first entry all zeros */ s := ctxt.Syms.Lookup(".dynsym", 0) - s.Type = SELFROSECT - s.Attr |= AttrReachable + s.Type = sym.SELFROSECT + s.Attr |= sym.AttrReachable if elf64 { s.Size += ELF64SYMSIZE } else { @@ -2003,8 +1553,8 @@ func (ctxt *Link) doelf() { /* dynamic string table */ s = ctxt.Syms.Lookup(".dynstr", 0) - s.Type = SELFROSECT - s.Attr |= AttrReachable + s.Type = sym.SELFROSECT + s.Attr |= sym.AttrReachable if s.Size == 0 { Addstring(s, "") } @@ -2012,62 +1562,62 @@ func (ctxt *Link) doelf() { /* relocation table */ s = ctxt.Syms.Lookup(elfRelType, 0) - s.Attr |= AttrReachable - s.Type = SELFROSECT + s.Attr |= sym.AttrReachable + s.Type = sym.SELFROSECT /* global offset table */ s = ctxt.Syms.Lookup(".got", 0) - s.Attr |= AttrReachable - s.Type = SELFGOT // writable + s.Attr |= sym.AttrReachable + s.Type = sym.SELFGOT // writable /* ppc64 glink resolver */ - if SysArch.Family == sys.PPC64 { + if ctxt.Arch.Family == sys.PPC64 { s := ctxt.Syms.Lookup(".glink", 0) - s.Attr |= AttrReachable - s.Type = SELFRXSECT + s.Attr |= sym.AttrReachable + s.Type = sym.SELFRXSECT } /* hash */ s = ctxt.Syms.Lookup(".hash", 0) - s.Attr |= AttrReachable - s.Type = SELFROSECT + s.Attr |= sym.AttrReachable + s.Type = sym.SELFROSECT s = ctxt.Syms.Lookup(".got.plt", 0) - s.Attr |= AttrReachable - s.Type = SELFSECT // writable + s.Attr |= sym.AttrReachable + s.Type = sym.SELFSECT // writable s = ctxt.Syms.Lookup(".plt", 0) - s.Attr |= AttrReachable - if SysArch.Family == sys.PPC64 { + s.Attr |= sym.AttrReachable + if ctxt.Arch.Family == sys.PPC64 { // In the ppc64 ABI, .plt is a data section // written by the dynamic linker. - s.Type = SELFSECT + s.Type = sym.SELFSECT } else { - s.Type = SELFRXSECT + s.Type = sym.SELFRXSECT } Thearch.Elfsetupplt(ctxt) s = ctxt.Syms.Lookup(elfRelType+".plt", 0) - s.Attr |= AttrReachable - s.Type = SELFROSECT + s.Attr |= sym.AttrReachable + s.Type = sym.SELFROSECT s = ctxt.Syms.Lookup(".gnu.version", 0) - s.Attr |= AttrReachable - s.Type = SELFROSECT + s.Attr |= sym.AttrReachable + s.Type = sym.SELFROSECT s = ctxt.Syms.Lookup(".gnu.version_r", 0) - s.Attr |= AttrReachable - s.Type = SELFROSECT + s.Attr |= sym.AttrReachable + s.Type = sym.SELFROSECT /* define dynamic elf table */ s = ctxt.Syms.Lookup(".dynamic", 0) - s.Attr |= AttrReachable - s.Type = SELFSECT // writable + s.Attr |= sym.AttrReachable + s.Type = sym.SELFSECT // writable /* * .dynamic table @@ -2096,15 +1646,15 @@ func (ctxt *Link) doelf() { Elfwritedynent(ctxt, s, DT_RUNPATH, uint64(Addstring(dynstr, rpath.val))) } - if SysArch.Family == sys.PPC64 { + if ctxt.Arch.Family == sys.PPC64 { elfwritedynentsym(ctxt, s, DT_PLTGOT, ctxt.Syms.Lookup(".plt", 0)) - } else if SysArch.Family == sys.S390X { + } else if ctxt.Arch.Family == sys.S390X { elfwritedynentsym(ctxt, s, DT_PLTGOT, ctxt.Syms.Lookup(".got", 0)) } else { elfwritedynentsym(ctxt, s, DT_PLTGOT, ctxt.Syms.Lookup(".got.plt", 0)) } - if SysArch.Family == sys.PPC64 { + if ctxt.Arch.Family == sys.PPC64 { Elfwritedynent(ctxt, s, DT_PPC64_OPT, 0) } @@ -2115,20 +1665,20 @@ func (ctxt *Link) doelf() { Elfwritedynent(ctxt, s, DT_DEBUG, 0) } - if Buildmode == BuildmodeShared { + if ctxt.BuildMode == BuildModeShared { // The go.link.abihashbytes symbol will be pointed at the appropriate // part of the .note.go.abihash section in data.go:func address(). s := ctxt.Syms.Lookup("go.link.abihashbytes", 0) - s.Attr |= AttrLocal - s.Type = SRODATA - s.Attr |= AttrSpecial - s.Attr |= AttrReachable + s.Attr |= sym.AttrLocal + s.Type = sym.SRODATA + s.Attr |= sym.AttrSpecial + s.Attr |= sym.AttrReachable s.Size = int64(sha1.Size) sort.Sort(byPkg(ctxt.Library)) h := sha1.New() for _, l := range ctxt.Library { - io.WriteString(h, l.hash) + io.WriteString(h, l.Hash) } addgonote(ctxt, ".note.go.abihash", ELF_NOTE_GOABIHASH_TAG, h.Sum([]byte{})) addgonote(ctxt, ".note.go.pkg-list", ELF_NOTE_GOPKGLIST_TAG, pkglistfornote) @@ -2139,13 +1689,13 @@ func (ctxt *Link) doelf() { addgonote(ctxt, ".note.go.deps", ELF_NOTE_GODEPS_TAG, []byte(strings.Join(deplist, "\n"))) } - if Linkmode == LinkExternal && *flagBuildid != "" { + if ctxt.LinkMode == LinkExternal && *flagBuildid != "" { addgonote(ctxt, ".note.go.buildid", ELF_NOTE_GOBUILDID_TAG, []byte(*flagBuildid)) } } // Do not write DT_NULL. elfdynhash will finish it. -func shsym(sh *ElfShdr, s *Symbol) { +func shsym(sh *ElfShdr, s *sym.Symbol) { addr := Symaddr(s) if sh.flags&SHF_ALLOC != 0 { sh.addr = uint64(addr) @@ -2194,9 +1744,9 @@ func Asmbelfsetup() { func Asmbelf(ctxt *Link, symo int64) { eh := getElfEhdr() - switch SysArch.Family { + switch ctxt.Arch.Family { default: - Exitf("unknown architecture in asmbelf: %v", SysArch.Family) + Exitf("unknown architecture in asmbelf: %v", ctxt.Arch.Family) case sys.MIPS, sys.MIPS64: eh.machine = EM_MIPS case sys.ARM: @@ -2236,13 +1786,13 @@ func Asmbelf(ctxt *Link, symo int64) { var pph *ElfPhdr var pnote *ElfPhdr - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { /* skip program headers */ eh.phoff = 0 eh.phentsize = 0 - if Buildmode == BuildmodeShared { + if ctxt.BuildMode == BuildModeShared { sh := elfshname(".note.go.pkg-list") sh.type_ = SHT_NOTE sh = elfshname(".note.go.abihash") @@ -2276,7 +1826,7 @@ func Asmbelf(ctxt *Link, symo int64) { * segment boundaries downwards to include it. * Except on NaCl where it must not be loaded. */ - if Headtype != objabi.Hnacl { + if ctxt.HeadType != objabi.Hnacl { o := int64(Segtext.Vaddr - pph.vaddr) Segtext.Vaddr -= uint64(o) Segtext.Length += uint64(o) @@ -2293,7 +1843,7 @@ func Asmbelf(ctxt *Link, symo int64) { sh.flags = SHF_ALLOC sh.addralign = 1 if interpreter == "" { - switch Headtype { + switch ctxt.HeadType { case objabi.Hlinux: interpreter = Thearch.Linuxdynld @@ -2323,9 +1873,9 @@ func Asmbelf(ctxt *Link, symo int64) { } pnote = nil - if Headtype == objabi.Hnetbsd || Headtype == objabi.Hopenbsd { + if ctxt.HeadType == objabi.Hnetbsd || ctxt.HeadType == objabi.Hopenbsd { var sh *ElfShdr - switch Headtype { + switch ctxt.HeadType { case objabi.Hnetbsd: sh = elfshname(".note.netbsd.ident") resoff -= int64(elfnetbsdsig(sh, uint64(startva), uint64(resoff))) @@ -2386,7 +1936,7 @@ func Asmbelf(ctxt *Link, symo int64) { } else { sh.entsize = ELF32SYMSIZE } - sh.addralign = uint64(SysArch.RegSize) + sh.addralign = uint64(ctxt.Arch.RegSize) sh.link = uint32(elfshname(".dynstr").shnum) // sh->info = index of first non-local symbol (number of local symbols) @@ -2410,7 +1960,7 @@ func Asmbelf(ctxt *Link, symo int64) { sh = elfshname(".gnu.version_r") sh.type_ = SHT_GNU_VERNEED sh.flags = SHF_ALLOC - sh.addralign = uint64(SysArch.RegSize) + sh.addralign = uint64(ctxt.Arch.RegSize) sh.info = uint32(elfverneed) sh.link = uint32(elfshname(".dynstr").shnum) shsym(sh, ctxt.Syms.Lookup(".gnu.version_r", 0)) @@ -2421,7 +1971,7 @@ func Asmbelf(ctxt *Link, symo int64) { sh.type_ = SHT_RELA sh.flags = SHF_ALLOC sh.entsize = ELF64RELASIZE - sh.addralign = uint64(SysArch.RegSize) + sh.addralign = uint64(ctxt.Arch.RegSize) sh.link = uint32(elfshname(".dynsym").shnum) sh.info = uint32(elfshname(".plt").shnum) shsym(sh, ctxt.Syms.Lookup(".rela.plt", 0)) @@ -2485,15 +2035,15 @@ func Asmbelf(ctxt *Link, symo int64) { sh := elfshname(".got") sh.type_ = SHT_PROGBITS sh.flags = SHF_ALLOC + SHF_WRITE - sh.entsize = uint64(SysArch.RegSize) - sh.addralign = uint64(SysArch.RegSize) + sh.entsize = uint64(ctxt.Arch.RegSize) + sh.addralign = uint64(ctxt.Arch.RegSize) shsym(sh, ctxt.Syms.Lookup(".got", 0)) sh = elfshname(".got.plt") sh.type_ = SHT_PROGBITS sh.flags = SHF_ALLOC + SHF_WRITE - sh.entsize = uint64(SysArch.RegSize) - sh.addralign = uint64(SysArch.RegSize) + sh.entsize = uint64(ctxt.Arch.RegSize) + sh.addralign = uint64(ctxt.Arch.RegSize) shsym(sh, ctxt.Syms.Lookup(".got.plt", 0)) } @@ -2501,7 +2051,7 @@ func Asmbelf(ctxt *Link, symo int64) { sh.type_ = SHT_HASH sh.flags = SHF_ALLOC sh.entsize = 4 - sh.addralign = uint64(SysArch.RegSize) + sh.addralign = uint64(ctxt.Arch.RegSize) sh.link = uint32(elfshname(".dynsym").shnum) shsym(sh, ctxt.Syms.Lookup(".hash", 0)) @@ -2510,8 +2060,8 @@ func Asmbelf(ctxt *Link, symo int64) { sh.type_ = SHT_DYNAMIC sh.flags = SHF_ALLOC + SHF_WRITE - sh.entsize = 2 * uint64(SysArch.RegSize) - sh.addralign = uint64(SysArch.RegSize) + sh.entsize = 2 * uint64(ctxt.Arch.RegSize) + sh.addralign = uint64(ctxt.Arch.RegSize) sh.link = uint32(elfshname(".dynstr").shnum) shsym(sh, ctxt.Syms.Lookup(".dynamic", 0)) ph := newElfPhdr() @@ -2533,21 +2083,21 @@ func Asmbelf(ctxt *Link, symo int64) { ph.type_ = PT_TLS ph.flags = PF_R ph.memsz = tlssize - ph.align = uint64(SysArch.RegSize) + ph.align = uint64(ctxt.Arch.RegSize) } } - if Headtype == objabi.Hlinux { + if ctxt.HeadType == objabi.Hlinux { ph := newElfPhdr() ph.type_ = PT_GNU_STACK ph.flags = PF_W + PF_R - ph.align = uint64(SysArch.RegSize) + ph.align = uint64(ctxt.Arch.RegSize) ph = newElfPhdr() ph.type_ = PT_PAX_FLAGS ph.flags = 0x2a00 // mprotect, randexec, emutramp disabled - ph.align = uint64(SysArch.RegSize) - } else if Headtype == objabi.Hsolaris { + ph.align = uint64(ctxt.Arch.RegSize) + } else if ctxt.HeadType == objabi.Hsolaris { ph := newElfPhdr() ph.type_ = PT_SUNWSTACK ph.flags = PF_W + PF_R @@ -2567,40 +2117,37 @@ elfobj: } for _, sect := range Segtext.Sections { - elfshbits(sect) + elfshbits(ctxt.LinkMode, sect) } for _, sect := range Segrodata.Sections { - elfshbits(sect) + elfshbits(ctxt.LinkMode, sect) } for _, sect := range Segrelrodata.Sections { - elfshbits(sect) + elfshbits(ctxt.LinkMode, sect) } for _, sect := range Segdata.Sections { - elfshbits(sect) + elfshbits(ctxt.LinkMode, sect) } for _, sect := range Segdwarf.Sections { - elfshbits(sect) + elfshbits(ctxt.LinkMode, sect) } - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { for _, sect := range Segtext.Sections { - elfshreloc(sect) + elfshreloc(ctxt.Arch, sect) } for _, sect := range Segrodata.Sections { - elfshreloc(sect) + elfshreloc(ctxt.Arch, sect) } for _, sect := range Segrelrodata.Sections { - elfshreloc(sect) + elfshreloc(ctxt.Arch, sect) } for _, sect := range Segdata.Sections { - elfshreloc(sect) + elfshreloc(ctxt.Arch, sect) } for _, s := range dwarfp { - if len(s.R) > 0 || s.Type == SDWARFINFO { - elfshreloc(s.Sect) - } - if s.Type == SDWARFINFO { - break + if len(s.R) > 0 || s.Type == sym.SDWARFINFO || s.Type == sym.SDWARFLOC { + elfshreloc(ctxt.Arch, s.Sect) } } // add a .note.GNU-stack section to mark the stack as non-executable @@ -2616,8 +2163,8 @@ elfobj: sh.type_ = SHT_SYMTAB sh.off = uint64(symo) sh.size = uint64(Symsize) - sh.addralign = uint64(SysArch.RegSize) - sh.entsize = 8 + 2*uint64(SysArch.RegSize) + sh.addralign = uint64(ctxt.Arch.RegSize) + sh.entsize = 8 + 2*uint64(ctxt.Arch.RegSize) sh.link = uint32(elfshname(".strtab").shnum) sh.info = uint32(elfglobalsymndx) @@ -2634,13 +2181,13 @@ elfobj: eh.ident[EI_MAG1] = 'E' eh.ident[EI_MAG2] = 'L' eh.ident[EI_MAG3] = 'F' - if Headtype == objabi.Hfreebsd { + if ctxt.HeadType == objabi.Hfreebsd { eh.ident[EI_OSABI] = ELFOSABI_FREEBSD - } else if Headtype == objabi.Hnetbsd { + } else if ctxt.HeadType == objabi.Hnetbsd { eh.ident[EI_OSABI] = ELFOSABI_NETBSD - } else if Headtype == objabi.Hopenbsd { + } else if ctxt.HeadType == objabi.Hopenbsd { eh.ident[EI_OSABI] = ELFOSABI_OPENBSD - } else if Headtype == objabi.Hdragonfly { + } else if ctxt.HeadType == objabi.Hdragonfly { eh.ident[EI_OSABI] = ELFOSABI_NONE } if elf64 { @@ -2655,15 +2202,15 @@ elfobj: } eh.ident[EI_VERSION] = EV_CURRENT - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { eh.type_ = ET_REL - } else if Buildmode == BuildmodePIE { + } else if ctxt.BuildMode == BuildModePIE { eh.type_ = ET_DYN } else { eh.type_ = ET_EXEC } - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { eh.entry = uint64(Entryvalue(ctxt)) } @@ -2674,26 +2221,26 @@ elfobj: pph.memsz = pph.filesz } - Cseek(0) + ctxt.Out.SeekSet(0) a := int64(0) - a += int64(elfwritehdr()) - a += int64(elfwritephdrs()) - a += int64(elfwriteshdrs()) + a += int64(elfwritehdr(ctxt.Out)) + a += int64(elfwritephdrs(ctxt.Out)) + a += int64(elfwriteshdrs(ctxt.Out)) if !*FlagD { - a += int64(elfwriteinterp()) + a += int64(elfwriteinterp(ctxt.Out)) } - if Linkmode != LinkExternal { - if Headtype == objabi.Hnetbsd { - a += int64(elfwritenetbsdsig()) + if ctxt.LinkMode != LinkExternal { + if ctxt.HeadType == objabi.Hnetbsd { + a += int64(elfwritenetbsdsig(ctxt.Out)) } - if Headtype == objabi.Hopenbsd { - a += int64(elfwriteopenbsdsig()) + if ctxt.HeadType == objabi.Hopenbsd { + a += int64(elfwriteopenbsdsig(ctxt.Out)) } if len(buildinfo) > 0 { - a += int64(elfwritebuildinfo()) + a += int64(elfwritebuildinfo(ctxt.Out)) } if *flagBuildid != "" { - a += int64(elfwritegobuildid()) + a += int64(elfwritegobuildid(ctxt.Out)) } } @@ -2702,7 +2249,7 @@ elfobj: } } -func Elfadddynsym(ctxt *Link, s *Symbol) { +func elfadddynsym(ctxt *Link, s *sym.Symbol) { if elf64 { s.Dynid = int32(Nelfsym) Nelfsym++ @@ -2710,39 +2257,39 @@ func Elfadddynsym(ctxt *Link, s *Symbol) { d := ctxt.Syms.Lookup(".dynsym", 0) name := s.Extname - Adduint32(ctxt, d, uint32(Addstring(ctxt.Syms.Lookup(".dynstr", 0), name))) + d.AddUint32(ctxt.Arch, uint32(Addstring(ctxt.Syms.Lookup(".dynstr", 0), name))) /* type */ t := STB_GLOBAL << 4 - if s.Attr.CgoExport() && s.Type&SMASK == STEXT { + if s.Attr.CgoExport() && s.Type == sym.STEXT { t |= STT_FUNC } else { t |= STT_OBJECT } - Adduint8(ctxt, d, uint8(t)) + d.AddUint8(uint8(t)) /* reserved */ - Adduint8(ctxt, d, 0) + d.AddUint8(0) /* section where symbol is defined */ - if s.Type == SDYNIMPORT { - Adduint16(ctxt, d, SHN_UNDEF) + if s.Type == sym.SDYNIMPORT { + d.AddUint16(ctxt.Arch, SHN_UNDEF) } else { - Adduint16(ctxt, d, 1) + d.AddUint16(ctxt.Arch, 1) } /* value */ - if s.Type == SDYNIMPORT { - Adduint64(ctxt, d, 0) + if s.Type == sym.SDYNIMPORT { + d.AddUint64(ctxt.Arch, 0) } else { - Addaddr(ctxt, d, s) + d.AddAddr(ctxt.Arch, s) } /* size of object */ - Adduint64(ctxt, d, uint64(s.Size)) + d.AddUint64(ctxt.Arch, uint64(s.Size)) - if SysArch.Family == sys.AMD64 && !s.Attr.CgoExportDynamic() && s.Dynimplib != "" && !seenlib[s.Dynimplib] { + if ctxt.Arch.Family == sys.AMD64 && !s.Attr.CgoExportDynamic() && s.Dynimplib != "" && !seenlib[s.Dynimplib] { Elfwritedynent(ctxt, ctxt.Syms.Lookup(".dynamic", 0), DT_NEEDED, uint64(Addstring(ctxt.Syms.Lookup(".dynstr", 0), s.Dynimplib))) } } else { @@ -2754,37 +2301,37 @@ func Elfadddynsym(ctxt *Link, s *Symbol) { /* name */ name := s.Extname - Adduint32(ctxt, d, uint32(Addstring(ctxt.Syms.Lookup(".dynstr", 0), name))) + d.AddUint32(ctxt.Arch, uint32(Addstring(ctxt.Syms.Lookup(".dynstr", 0), name))) /* value */ - if s.Type == SDYNIMPORT { - Adduint32(ctxt, d, 0) + if s.Type == sym.SDYNIMPORT { + d.AddUint32(ctxt.Arch, 0) } else { - Addaddr(ctxt, d, s) + d.AddAddr(ctxt.Arch, s) } /* size of object */ - Adduint32(ctxt, d, uint32(s.Size)) + d.AddUint32(ctxt.Arch, uint32(s.Size)) /* type */ t := STB_GLOBAL << 4 // TODO(mwhudson): presumably the behavior should actually be the same on both arm and 386. - if SysArch.Family == sys.I386 && s.Attr.CgoExport() && s.Type&SMASK == STEXT { + if ctxt.Arch.Family == sys.I386 && s.Attr.CgoExport() && s.Type == sym.STEXT { t |= STT_FUNC - } else if SysArch.Family == sys.ARM && s.Attr.CgoExportDynamic() && s.Type&SMASK == STEXT { + } else if ctxt.Arch.Family == sys.ARM && s.Attr.CgoExportDynamic() && s.Type == sym.STEXT { t |= STT_FUNC } else { t |= STT_OBJECT } - Adduint8(ctxt, d, uint8(t)) - Adduint8(ctxt, d, 0) + d.AddUint8(uint8(t)) + d.AddUint8(0) /* shndx */ - if s.Type == SDYNIMPORT { - Adduint16(ctxt, d, SHN_UNDEF) + if s.Type == sym.SDYNIMPORT { + d.AddUint16(ctxt.Arch, SHN_UNDEF) } else { - Adduint16(ctxt, d, 1) + d.AddUint16(ctxt.Arch, 1) } } } diff --git a/src/cmd/link/internal/ld/go.go b/src/cmd/link/internal/ld/go.go index 2930a6b24f1..6cfbaebb73c 100644 --- a/src/cmd/link/internal/ld/go.go +++ b/src/cmd/link/internal/ld/go.go @@ -10,6 +10,7 @@ import ( "bytes" "cmd/internal/bio" "cmd/internal/objabi" + "cmd/link/internal/sym" "fmt" "io" "os" @@ -29,8 +30,6 @@ func expandpkg(t0 string, pkg string) string { // libmach, so that other linkers and ar can share. func ldpkg(ctxt *Link, f *bio.Reader, pkg string, length int64, filename string, whence int) { - var p0, p1 int - if *flagG { return } @@ -95,7 +94,8 @@ func ldpkg(ctxt *Link, f *bio.Reader, pkg string, length int64, filename string, } // look for cgo section - p0 = strings.Index(data, "\n$$ // cgo") + p0 := strings.Index(data, "\n$$ // cgo") + var p1 int if p0 >= 0 { p0 += p1 i := strings.IndexByte(data[p0+1:], '\n') @@ -128,11 +128,8 @@ func ldpkg(ctxt *Link, f *bio.Reader, pkg string, length int64, filename string, func loadcgo(ctxt *Link, file string, pkg string, p string) { var next string var q string - var f []string - var local string - var remote string var lib string - var s *Symbol + var s *sym.Symbol p0 := "" for ; p != ""; p = next { @@ -143,7 +140,7 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { } p0 = p // save for error message - f = tokenize(p) + f := tokenize(p) if len(f) == 0 { continue } @@ -153,8 +150,8 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { goto err } - local = f[1] - remote = local + local := f[1] + remote := local if len(f) > 2 { remote = f[2] } @@ -174,8 +171,8 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { // to force a link of foo.so. havedynamic = 1 - if Headtype == objabi.Hdarwin { - Machoadddynlib(lib) + if ctxt.HeadType == objabi.Hdarwin { + machoadddynlib(lib, ctxt.LinkMode) } else { dynlib = append(dynlib, lib) } @@ -188,14 +185,12 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { remote, q = remote[:i], remote[i+1:] } s = ctxt.Syms.Lookup(local, 0) - if local != f[1] { - } - if s.Type == 0 || s.Type == SXREF || s.Type == SHOSTOBJ { + if s.Type == 0 || s.Type == sym.SXREF || s.Type == sym.SHOSTOBJ { s.Dynimplib = lib s.Extname = remote s.Dynimpvers = q - if s.Type != SHOSTOBJ { - s.Type = SDYNIMPORT + if s.Type != sym.SHOSTOBJ { + s.Type = sym.SDYNIMPORT } havedynamic = 1 } @@ -207,9 +202,9 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { if len(f) != 2 { goto err } - local = f[1] + local := f[1] s = ctxt.Syms.Lookup(local, 0) - s.Type = SHOSTOBJ + s.Type = sym.SHOSTOBJ s.Size = 0 continue } @@ -218,7 +213,8 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { if len(f) < 2 || len(f) > 3 { goto err } - local = f[1] + local := f[1] + var remote string if len(f) > 2 { remote = f[2] } else { @@ -227,8 +223,8 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { local = expandpkg(local, pkg) s = ctxt.Syms.Lookup(local, 0) - switch Buildmode { - case BuildmodeCShared, BuildmodeCArchive, BuildmodePlugin: + switch ctxt.BuildMode { + case BuildModeCShared, BuildModeCArchive, BuildModePlugin: if s == ctxt.Syms.Lookup("main", 0) { continue } @@ -253,11 +249,9 @@ func loadcgo(ctxt *Link, file string, pkg string, p string) { } if f[0] == "cgo_export_static" { - s.Attr |= AttrCgoExportStatic + s.Attr |= sym.AttrCgoExportStatic } else { - s.Attr |= AttrCgoExportDynamic - } - if local != f[1] { + s.Attr |= sym.AttrCgoExportDynamic } continue } @@ -299,12 +293,12 @@ err: var seenlib = make(map[string]bool) func adddynlib(ctxt *Link, lib string) { - if seenlib[lib] || Linkmode == LinkExternal { + if seenlib[lib] || ctxt.LinkMode == LinkExternal { return } seenlib[lib] = true - if Iself { + if ctxt.IsELF { s := ctxt.Syms.Lookup(".dynstr", 0) if s.Size == 0 { Addstring(s, "") @@ -315,16 +309,16 @@ func adddynlib(ctxt *Link, lib string) { } } -func Adddynsym(ctxt *Link, s *Symbol) { - if s.Dynid >= 0 || Linkmode == LinkExternal { +func Adddynsym(ctxt *Link, s *sym.Symbol) { + if s.Dynid >= 0 || ctxt.LinkMode == LinkExternal { return } - if Iself { - Elfadddynsym(ctxt, s) - } else if Headtype == objabi.Hdarwin { + if ctxt.IsELF { + elfadddynsym(ctxt, s) + } else if ctxt.HeadType == objabi.Hdarwin { Errorf(s, "adddynsym: missed symbol (Extname=%s)", s.Extname) - } else if Headtype == objabi.Hwindows { + } else if ctxt.HeadType == objabi.Hwindows { // already taken care of } else { Errorf(s, "adddynsym: unsupported binary format") @@ -336,8 +330,8 @@ func fieldtrack(ctxt *Link) { var buf bytes.Buffer for _, s := range ctxt.Syms.Allsym { if strings.HasPrefix(s.Name, "go.track.") { - s.Attr |= AttrSpecial // do not lay out in data segment - s.Attr |= AttrNotInSymbolTable + s.Attr |= sym.AttrSpecial // do not lay out in data segment + s.Attr |= sym.AttrNotInSymbolTable if s.Attr.Reachable() { buf.WriteString(s.Name[9:]) for p := s.Reachparent; p != nil; p = p.Reachparent { @@ -347,7 +341,7 @@ func fieldtrack(ctxt *Link) { buf.WriteString("\n") } - s.Type = SCONST + s.Type = sym.SCONST s.Value = 0 } } @@ -360,11 +354,11 @@ func fieldtrack(ctxt *Link) { return } addstrdata(ctxt, *flagFieldTrack, buf.String()) - s.Type = SDATA + s.Type = sym.SDATA } func (ctxt *Link) addexport() { - if Headtype == objabi.Hdarwin { + if ctxt.HeadType == objabi.Hdarwin { return } diff --git a/src/cmd/link/internal/ld/ld.go b/src/cmd/link/internal/ld/ld.go index fc4137213f1..b260ef28c8e 100644 --- a/src/cmd/link/internal/ld/ld.go +++ b/src/cmd/link/internal/ld/ld.go @@ -32,6 +32,7 @@ package ld import ( + "cmd/link/internal/sym" "io/ioutil" "log" "os" @@ -86,39 +87,49 @@ func (ctxt *Link) readImportCfg(file string) { } } -func addlib(ctxt *Link, src string, obj string, pathname string) *Library { - name := path.Clean(pathname) - +func pkgname(lib string) string { + name := path.Clean(lib) // runtime.a -> runtime, runtime.6 -> runtime pkg := name if len(pkg) >= 2 && pkg[len(pkg)-2] == '.' { pkg = pkg[:len(pkg)-2] } + return pkg +} - // already loaded? - if l := ctxt.LibraryByPkg[pkg]; l != nil { - return l - } +func findlib(ctxt *Link, lib string) (string, bool) { + name := path.Clean(lib) var pname string isshlib := false - if *FlagLinkshared && ctxt.PackageShlib[name] != "" { + if ctxt.linkShared && ctxt.PackageShlib[name] != "" { pname = ctxt.PackageShlib[name] isshlib = true } else if ctxt.PackageFile != nil { pname = ctxt.PackageFile[name] if pname == "" { ctxt.Logf("cannot find package %s (using -importcfg)\n", name) - return nil + return "", false } } else { if filepath.IsAbs(name) { pname = name } else { + pkg := pkgname(lib) + // Add .a if needed; the new -importcfg modes + // do not put .a into the package name anymore. + // This only matters when people try to mix + // compiles using -importcfg with links not using -importcfg, + // such as when running quick things like + // 'go tool compile x.go && go tool link x.o' + // by hand against a standard library built using -importcfg. + if !strings.HasSuffix(name, ".a") && !strings.HasSuffix(name, ".o") { + name += ".a" + } // try dot, -L "libdir", and then goroot. for _, dir := range ctxt.Libdir { - if *FlagLinkshared { + if ctxt.linkShared { pname = dir + "/" + pkg + ".shlibname" if _, err := os.Stat(pname); err == nil { isshlib = true @@ -134,6 +145,19 @@ func addlib(ctxt *Link, src string, obj string, pathname string) *Library { pname = path.Clean(pname) } + return pname, isshlib +} + +func addlib(ctxt *Link, src string, obj string, lib string) *sym.Library { + pkg := pkgname(lib) + + // already loaded? + if l := ctxt.LibraryByPkg[pkg]; l != nil { + return l + } + + pname, isshlib := findlib(ctxt, lib) + if ctxt.Debugvlog > 1 { ctxt.Logf("%5.2f addlib: %s %s pulls in %s isshlib %v\n", elapsed(), obj, src, pname, isshlib) } @@ -150,29 +174,33 @@ func addlib(ctxt *Link, src string, obj string, pathname string) *Library { * objref: object file referring to package * file: object file, e.g., /home/rsc/go/pkg/container/vector.a * pkg: package import path, e.g. container/vector + * shlib: path to shared library, or .shlibname file holding path */ -func addlibpath(ctxt *Link, srcref string, objref string, file string, pkg string, shlibnamefile string) *Library { +func addlibpath(ctxt *Link, srcref string, objref string, file string, pkg string, shlib string) *sym.Library { if l := ctxt.LibraryByPkg[pkg]; l != nil { return l } if ctxt.Debugvlog > 1 { - ctxt.Logf("%5.2f addlibpath: srcref: %s objref: %s file: %s pkg: %s shlibnamefile: %s\n", Cputime(), srcref, objref, file, pkg, shlibnamefile) + ctxt.Logf("%5.2f addlibpath: srcref: %s objref: %s file: %s pkg: %s shlib: %s\n", Cputime(), srcref, objref, file, pkg, shlib) } - l := &Library{} + l := &sym.Library{} ctxt.LibraryByPkg[pkg] = l ctxt.Library = append(ctxt.Library, l) l.Objref = objref l.Srcref = srcref l.File = file l.Pkg = pkg - if shlibnamefile != "" { - shlibbytes, err := ioutil.ReadFile(shlibnamefile) - if err != nil { - Errorf(nil, "cannot read %s: %v", shlibnamefile, err) + if shlib != "" { + if strings.HasSuffix(shlib, ".shlibname") { + data, err := ioutil.ReadFile(shlib) + if err != nil { + Errorf(nil, "cannot read %s: %v", shlib, err) + } + shlib = strings.TrimSpace(string(data)) } - l.Shlib = strings.TrimSpace(string(shlibbytes)) + l.Shlib = shlib } return l } diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 59dc4e7acc5..f866a049367 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -36,8 +36,14 @@ import ( "cmd/internal/bio" "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/loadelf" + "cmd/link/internal/loadmacho" + "cmd/link/internal/loadpe" + "cmd/link/internal/objfile" + "cmd/link/internal/sym" "crypto/sha1" "debug/elf" + "encoding/base64" "encoding/binary" "encoding/hex" "fmt" @@ -96,23 +102,17 @@ type Arch struct { Openbsddynld string Dragonflydynld string Solarisdynld string - Adddynrel func(*Link, *Symbol, *Reloc) bool + Adddynrel func(*Link, *sym.Symbol, *sym.Reloc) bool Archinit func(*Link) - Archreloc func(*Link, *Reloc, *Symbol, *int64) int - Archrelocvariant func(*Link, *Reloc, *Symbol, int64) int64 - Trampoline func(*Link, *Reloc, *Symbol) + Archreloc func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool + Archrelocvariant func(*Link, *sym.Reloc, *sym.Symbol, int64) int64 + Trampoline func(*Link, *sym.Reloc, *sym.Symbol) Asmb func(*Link) - Elfreloc1 func(*Link, *Reloc, int64) int + Elfreloc1 func(*Link, *sym.Reloc, int64) bool Elfsetupplt func(*Link) Gentext func(*Link) - Machoreloc1 func(*Symbol, *Reloc, int64) int - PEreloc1 func(*Symbol, *Reloc, int64) bool - Wput func(uint16) - Lput func(uint32) - Vput func(uint64) - Append16 func(b []byte, v uint16) []byte - Append32 func(b []byte, v uint32) []byte - Append64 func(b []byte, v uint64) []byte + Machoreloc1 func(*sys.Arch, *OutBuf, *sym.Symbol, *sym.Reloc, int64) bool + PEreloc1 func(*sys.Arch, *OutBuf, *sym.Symbol, *sym.Reloc, int64) bool // TLSIEtoLE converts a TLS Initial Executable relocation to // a TLS Local Executable relocation. @@ -120,7 +120,7 @@ type Arch struct { // This is possible when a TLS IE relocation refers to a local // symbol in an executable, which is typical when internally // linking PIE binaries. - TLSIEtoLE func(s *Symbol, off, size int) + TLSIEtoLE func(s *sym.Symbol, off, size int) } var ( @@ -131,61 +131,37 @@ var ( Symsize int32 ) -// Terrible but standard terminology. -// A segment describes a block of file to load into memory. -// A section further describes the pieces of that block for -// use in debuggers and such. - const ( MINFUNC = 16 // minimum size for a function ) -type Segment struct { - Rwx uint8 // permission as usual unix bits (5 = r-x etc) - Vaddr uint64 // virtual address - Length uint64 // length in memory - Fileoff uint64 // file offset - Filelen uint64 // length on disk - Sections []*Section -} - -type Section struct { - Rwx uint8 - Extnum int16 - Align int32 - Name string - Vaddr uint64 - Length uint64 - Seg *Segment - Elfsect *ElfShdr - Reloff uint64 - Rellen uint64 -} - // DynlinkingGo returns whether we are producing Go code that can live // in separate shared libraries linked together at runtime. func (ctxt *Link) DynlinkingGo() bool { if !ctxt.Loaded { panic("DynlinkingGo called before all symbols loaded") } - canUsePlugins := ctxt.Syms.ROLookup("plugin.Open", 0) != nil - return Buildmode == BuildmodeShared || *FlagLinkshared || Buildmode == BuildmodePlugin || canUsePlugins + return ctxt.BuildMode == BuildModeShared || ctxt.linkShared || ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() +} + +// CanUsePlugins returns whether a plugins can be used +func (ctxt *Link) CanUsePlugins() bool { + return ctxt.Syms.ROLookup("plugin.Open", 0) != nil } // UseRelro returns whether to make use of "read only relocations" aka // relro. -func UseRelro() bool { - switch Buildmode { - case BuildmodeCArchive, BuildmodeCShared, BuildmodeShared, BuildmodePIE, BuildmodePlugin: - return Iself +func (ctxt *Link) UseRelro() bool { + switch ctxt.BuildMode { + case BuildModeCArchive, BuildModeCShared, BuildModeShared, BuildModePIE, BuildModePlugin: + return ctxt.IsELF default: - return *FlagLinkshared + return ctxt.linkShared } } var ( - SysArch *sys.Arch - dynexp []*Symbol + dynexp []*sym.Symbol dynlib []string ldflag []string havedynamic int @@ -194,20 +170,19 @@ var ( elfglobalsymndx int interpreter string - debug_s bool // backup old value of debug['s'] - HEADR int32 - Headtype objabi.HeadType + debug_s bool // backup old value of debug['s'] + HEADR int32 nerrors int liveness int64 ) var ( - Segtext Segment - Segrodata Segment - Segrelrodata Segment - Segdata Segment - Segdwarf Segment + Segtext sym.Segment + Segrodata sym.Segment + Segrelrodata sym.Segment + Segdata sym.Segment + Segdwarf sym.Segment ) /* whence for ldpkg */ @@ -217,32 +192,7 @@ const ( Pkgdef ) -// TODO(dfc) outBuf duplicates bio.Writer -type outBuf struct { - w *bufio.Writer - f *os.File - off int64 -} - -func (w *outBuf) Write(p []byte) (n int, err error) { - n, err = w.w.Write(p) - w.off += int64(n) - return n, err -} - -func (w *outBuf) WriteString(s string) (n int, err error) { - n, err = coutbuf.w.WriteString(s) - w.off += int64(n) - return n, err -} - -func (w *outBuf) Offset() int64 { - return w.off -} - -var coutbuf outBuf - -const pkgname = "__.PKGDEF" +const pkgdef = "__.PKGDEF" var ( // Set if we see an object compiled by the host compiler that is not @@ -294,48 +244,34 @@ func libinit(ctxt *Link) { Exitf("cannot create %s: %v", *flagOutfile, err) } - coutbuf.w = bufio.NewWriter(f) - coutbuf.f = f + ctxt.Out.w = bufio.NewWriter(f) + ctxt.Out.f = f if *flagEntrySymbol == "" { - switch Buildmode { - case BuildmodeCShared, BuildmodeCArchive: + switch ctxt.BuildMode { + case BuildModeCShared, BuildModeCArchive: *flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s_lib", objabi.GOARCH, objabi.GOOS) - case BuildmodeExe, BuildmodePIE: + case BuildModeExe, BuildModePIE: *flagEntrySymbol = fmt.Sprintf("_rt0_%s_%s", objabi.GOARCH, objabi.GOOS) - case BuildmodeShared, BuildmodePlugin: + case BuildModeShared, BuildModePlugin: // No *flagEntrySymbol for -buildmode=shared and plugin default: - Errorf(nil, "unknown *flagEntrySymbol for buildmode %v", Buildmode) + Errorf(nil, "unknown *flagEntrySymbol for buildmode %v", ctxt.BuildMode) } } } func errorexit() { - if coutbuf.f != nil { - if nerrors != 0 { - Cflush() - } - // For rmtemp run at atexit time on Windows. - if err := coutbuf.f.Close(); err != nil { - Exitf("close: %v", err) - } - } - if nerrors != 0 { - if coutbuf.f != nil { - mayberemoveoutfile() - } Exit(2) } - Exit(0) } -func loadinternal(ctxt *Link, name string) *Library { - if *FlagLinkshared && ctxt.PackageShlib != nil { - if shlibname := ctxt.PackageShlib[name]; shlibname != "" { - return addlibpath(ctxt, "internal", "internal", "", name, shlibname) +func loadinternal(ctxt *Link, name string) *sym.Library { + if ctxt.linkShared && ctxt.PackageShlib != nil { + if shlib := ctxt.PackageShlib[name]; shlib != "" { + return addlibpath(ctxt, "internal", "internal", "", name, shlib) } } if ctxt.PackageFile != nil { @@ -347,7 +283,7 @@ func loadinternal(ctxt *Link, name string) *Library { } for i := 0; i < len(ctxt.Libdir); i++ { - if *FlagLinkshared { + if ctxt.linkShared { shlibname := filepath.Join(ctxt.Libdir[i], name+".shlibname") if ctxt.Debugvlog != 0 { ctxt.Logf("searching for %s.a in %s\n", name, shlibname) @@ -375,7 +311,7 @@ func (ctxt *Link) findLibPathCmd(cmd, libname string) string { if *flagExtld == "" { *flagExtld = "gcc" } - args := hostlinkArchArgs() + args := hostlinkArchArgs(ctxt.Arch) args = append(args, cmd) if ctxt.Debugvlog != 0 { ctxt.Logf("%s %v\n", *flagExtld, args) @@ -397,19 +333,19 @@ func (ctxt *Link) findLibPath(libname string) string { } func (ctxt *Link) loadlib() { - switch Buildmode { - case BuildmodeCShared, BuildmodePlugin: + switch ctxt.BuildMode { + case BuildModeCShared, BuildModePlugin: s := ctxt.Syms.Lookup("runtime.islibrary", 0) - s.Attr |= AttrDuplicateOK - Adduint8(ctxt, s, 1) - case BuildmodeCArchive: + s.Attr |= sym.AttrDuplicateOK + s.AddUint8(1) + case BuildModeCArchive: s := ctxt.Syms.Lookup("runtime.isarchive", 0) - s.Attr |= AttrDuplicateOK - Adduint8(ctxt, s, 1) + s.Attr |= sym.AttrDuplicateOK + s.AddUint8(1) } loadinternal(ctxt, "runtime") - if SysArch.Family == sys.ARM { + if ctxt.Arch.Family == sys.ARM { loadinternal(ctxt, "math") } if *flagRace { @@ -419,73 +355,73 @@ func (ctxt *Link) loadlib() { loadinternal(ctxt, "runtime/msan") } - var i int - for i = 0; i < len(ctxt.Library); i++ { - iscgo = iscgo || ctxt.Library[i].Pkg == "runtime/cgo" - if ctxt.Library[i].Shlib == "" { + // ctxt.Library grows during the loop, so not a range loop. + for i := 0; i < len(ctxt.Library); i++ { + lib := ctxt.Library[i] + if lib.Shlib == "" { if ctxt.Debugvlog > 1 { - ctxt.Logf("%5.2f autolib: %s (from %s)\n", Cputime(), ctxt.Library[i].File, ctxt.Library[i].Objref) + ctxt.Logf("%5.2f autolib: %s (from %s)\n", Cputime(), lib.File, lib.Objref) } - objfile(ctxt, ctxt.Library[i]) + loadobjfile(ctxt, lib) } } - for i = 0; i < len(ctxt.Library); i++ { - if ctxt.Library[i].Shlib != "" { + for _, lib := range ctxt.Library { + if lib.Shlib != "" { if ctxt.Debugvlog > 1 { - ctxt.Logf("%5.2f autolib: %s (from %s)\n", Cputime(), ctxt.Library[i].Shlib, ctxt.Library[i].Objref) + ctxt.Logf("%5.2f autolib: %s (from %s)\n", Cputime(), lib.Shlib, lib.Objref) } - ldshlibsyms(ctxt, ctxt.Library[i].Shlib) + ldshlibsyms(ctxt, lib.Shlib) } } + iscgo = ctxt.Syms.ROLookup("x_cgo_init", 0) != nil + // We now have enough information to determine the link mode. determineLinkMode(ctxt) - // Recalculate pe parameters now that we have Linkmode set. - if Headtype == objabi.Hwindows { + // Recalculate pe parameters now that we have ctxt.LinkMode set. + if ctxt.HeadType == objabi.Hwindows { Peinit(ctxt) } - if Headtype == objabi.Hdarwin && Linkmode == LinkExternal { + if ctxt.HeadType == objabi.Hdarwin && ctxt.LinkMode == LinkExternal { *FlagTextAddr = 0 } - if Linkmode == LinkExternal && SysArch.Family == sys.PPC64 { + if ctxt.LinkMode == LinkExternal && ctxt.Arch.Family == sys.PPC64 { toc := ctxt.Syms.Lookup(".TOC.", 0) - toc.Type = SDYNIMPORT + toc.Type = sym.SDYNIMPORT } - if Linkmode == LinkExternal && !iscgo { + if ctxt.LinkMode == LinkExternal && !iscgo && ctxt.LibraryByPkg["runtime/cgo"] == nil { // This indicates a user requested -linkmode=external. // The startup code uses an import of runtime/cgo to decide // whether to initialize the TLS. So give it one. This could // be handled differently but it's an unusual case. - loadinternal(ctxt, "runtime/cgo") - - if i < len(ctxt.Library) { - if ctxt.Library[i].Shlib != "" { - ldshlibsyms(ctxt, ctxt.Library[i].Shlib) + if lib := loadinternal(ctxt, "runtime/cgo"); lib != nil { + if lib.Shlib != "" { + ldshlibsyms(ctxt, lib.Shlib) } else { - if Buildmode == BuildmodeShared || *FlagLinkshared { + if ctxt.BuildMode == BuildModeShared || ctxt.linkShared { Exitf("cannot implicitly include runtime/cgo in a shared library") } - objfile(ctxt, ctxt.Library[i]) + loadobjfile(ctxt, lib) } } } - if Linkmode == LinkInternal { + if ctxt.LinkMode == LinkInternal { // Drop all the cgo_import_static declarations. // Turns out we won't be needing them. for _, s := range ctxt.Syms.Allsym { - if s.Type == SHOSTOBJ { + if s.Type == sym.SHOSTOBJ { // If a symbol was marked both // cgo_import_static and cgo_import_dynamic, // then we want to make it cgo_import_dynamic // now. if s.Extname != "" && s.Dynimplib != "" && !s.Attr.CgoExport() { - s.Type = SDYNIMPORT + s.Type = sym.SDYNIMPORT } else { s.Type = 0 } @@ -498,22 +434,22 @@ func (ctxt *Link) loadlib() { // runtime.tlsg is used for external linking on platforms that do not define // a variable to hold g in assembly (currently only intel). if tlsg.Type == 0 { - tlsg.Type = STLSBSS - tlsg.Size = int64(SysArch.PtrSize) - } else if tlsg.Type != SDYNIMPORT { + tlsg.Type = sym.STLSBSS + tlsg.Size = int64(ctxt.Arch.PtrSize) + } else if tlsg.Type != sym.SDYNIMPORT { Errorf(nil, "runtime declared tlsg variable %v", tlsg.Type) } - tlsg.Attr |= AttrReachable + tlsg.Attr |= sym.AttrReachable ctxt.Tlsg = tlsg - var moduledata *Symbol - if Buildmode == BuildmodePlugin { + var moduledata *sym.Symbol + if ctxt.BuildMode == BuildModePlugin { moduledata = ctxt.Syms.Lookup("local.pluginmoduledata", 0) - moduledata.Attr |= AttrLocal + moduledata.Attr |= sym.AttrLocal } else { moduledata = ctxt.Syms.Lookup("runtime.firstmoduledata", 0) } - if moduledata.Type != 0 && moduledata.Type != SDYNIMPORT { + if moduledata.Type != 0 && moduledata.Type != sym.SDYNIMPORT { // If the module (toolchain-speak for "executable or shared // library") we are linking contains the runtime package, it // will define the runtime.firstmoduledata symbol and we @@ -523,36 +459,36 @@ func (ctxt *Link) loadlib() { // In addition, on ARM, the runtime depends on the linker // recording the value of GOARM. - if SysArch.Family == sys.ARM { + if ctxt.Arch.Family == sys.ARM { s := ctxt.Syms.Lookup("runtime.goarm", 0) - s.Type = SRODATA + s.Type = sym.SRODATA s.Size = 0 - Adduint8(ctxt, s, uint8(objabi.GOARM)) + s.AddUint8(uint8(objabi.GOARM)) } if objabi.Framepointer_enabled(objabi.GOOS, objabi.GOARCH) { s := ctxt.Syms.Lookup("runtime.framepointer_enabled", 0) - s.Type = SRODATA + s.Type = sym.SRODATA s.Size = 0 - Adduint8(ctxt, s, 1) + s.AddUint8(1) } } else { // If OTOH the module does not contain the runtime package, // create a local symbol for the moduledata. moduledata = ctxt.Syms.Lookup("local.moduledata", 0) - moduledata.Attr |= AttrLocal + moduledata.Attr |= sym.AttrLocal } // In all cases way we mark the moduledata as noptrdata to hide it from // the GC. - moduledata.Type = SNOPTRDATA - moduledata.Attr |= AttrReachable + moduledata.Type = sym.SNOPTRDATA + moduledata.Attr |= sym.AttrReachable ctxt.Moduledata = moduledata // Now that we know the link mode, trim the dynexp list. - x := AttrCgoExportDynamic + x := sym.AttrCgoExportDynamic - if Linkmode == LinkExternal { - x = AttrCgoExportStatic + if ctxt.LinkMode == LinkExternal { + x = sym.AttrCgoExportStatic } w := 0 for i := 0; i < len(dynexp); i++ { @@ -564,7 +500,7 @@ func (ctxt *Link) loadlib() { dynexp = dynexp[:w] // In internal link mode, read the host object files. - if Linkmode == LinkInternal { + if ctxt.LinkMode == LinkInternal { hostobjs(ctxt) // If we have any undefined symbols in external @@ -572,7 +508,7 @@ func (ctxt *Link) loadlib() { any := false for _, s := range ctxt.Syms.Allsym { for _, r := range s.R { - if r.Sym != nil && r.Sym.Type&SMASK == SXREF && r.Sym.Name != ".got" { + if r.Sym != nil && r.Sym.Type == sym.SXREF && r.Sym.Name != ".got" { any = true break } @@ -585,7 +521,7 @@ func (ctxt *Link) loadlib() { if *flagLibGCC != "none" { hostArchive(ctxt, *flagLibGCC) } - if Headtype == objabi.Hwindows { + if ctxt.HeadType == objabi.Hwindows { if p := ctxt.findLibPath("libmingwex.a"); p != "none" { hostArchive(ctxt, p) } @@ -603,7 +539,7 @@ func (ctxt *Link) loadlib() { } } } else { - hostlinksetup() + hostlinksetup(ctxt) } // We've loaded all the code now. @@ -619,27 +555,48 @@ func (ctxt *Link) loadlib() { // binaries, so leave it enabled on OS X (Mach-O) binaries. // Also leave it enabled on Solaris which doesn't support // statically linked binaries. - if Buildmode == BuildmodeExe { - if havedynamic == 0 && Headtype != objabi.Hdarwin && Headtype != objabi.Hsolaris { + if ctxt.BuildMode == BuildModeExe { + if havedynamic == 0 && ctxt.HeadType != objabi.Hdarwin && ctxt.HeadType != objabi.Hsolaris { *FlagD = true } } + // If type. symbols are visible in the symbol table, rename them + // using a SHA-1 prefix. This reduces binary size (the full + // string of a type symbol can be multiple kilobytes) and removes + // characters that upset external linkers. + // + // Keep the type.. prefix, which parts of the linker (like the + // DWARF generator) know means the symbol is not decodable. + // + // Leave type.runtime. symbols alone, because other parts of + // the linker manipulates them, and also symbols whose names + // would not be shortened by this process. + if typeSymbolMangling(ctxt) { + *FlagW = true // disable DWARF generation + for _, s := range ctxt.Syms.Allsym { + newName := typeSymbolMangle(s.Name) + if newName != s.Name { + ctxt.Syms.Rename(s.Name, newName, int(s.Version)) + } + } + } + // If package versioning is required, generate a hash of the // the packages used in the link. - if Buildmode == BuildmodeShared || Buildmode == BuildmodePlugin || ctxt.Syms.ROLookup("plugin.Open", 0) != nil { - for i = 0; i < len(ctxt.Library); i++ { - if ctxt.Library[i].Shlib == "" { - genhash(ctxt, ctxt.Library[i]) + if ctxt.BuildMode == BuildModeShared || ctxt.BuildMode == BuildModePlugin || ctxt.CanUsePlugins() { + for _, lib := range ctxt.Library { + if lib.Shlib == "" { + genhash(ctxt, lib) } } } - if SysArch == sys.Arch386 { - if (Buildmode == BuildmodeCArchive && Iself) || Buildmode == BuildmodeCShared || Buildmode == BuildmodePIE || ctxt.DynlinkingGo() { + if ctxt.Arch == sys.Arch386 { + if (ctxt.BuildMode == BuildModeCArchive && ctxt.IsELF) || (ctxt.BuildMode == BuildModeCShared && ctxt.HeadType != objabi.Hwindows) || ctxt.BuildMode == BuildModePIE || ctxt.DynlinkingGo() { got := ctxt.Syms.Lookup("_GLOBAL_OFFSET_TABLE_", 0) - got.Type = SDYNIMPORT - got.Attr |= AttrReachable + got.Type = sym.SDYNIMPORT + got.Attr |= sym.AttrReachable } } @@ -654,11 +611,11 @@ func (ctxt *Link) loadlib() { if isRuntimeDepPkg(lib.Pkg) != doInternal { continue } - ctxt.Textp = append(ctxt.Textp, lib.textp...) - for _, s := range lib.dupTextSyms { + ctxt.Textp = append(ctxt.Textp, lib.Textp...) + for _, s := range lib.DupTextSyms { if !s.Attr.OnList() { ctxt.Textp = append(ctxt.Textp, s) - s.Attr |= AttrOnList + s.Attr |= sym.AttrOnList // dupok symbols may be defined in multiple packages. its // associated package is chosen sort of arbitrarily (the // first containing package that the linker loads). canonicalize @@ -674,9 +631,9 @@ func (ctxt *Link) loadlib() { // We might have overwritten some functions above (this tends to happen for the // autogenerated type equality/hashing functions) and we don't want to generated // pcln table entries for these any more so remove them from Textp. - textp := make([]*Symbol, 0, len(ctxt.Textp)) + textp := make([]*sym.Symbol, 0, len(ctxt.Textp)) for _, s := range ctxt.Textp { - if s.Type != SDYNIMPORT { + if s.Type != sym.SDYNIMPORT { textp = append(textp, s) } } @@ -684,6 +641,41 @@ func (ctxt *Link) loadlib() { } } +// typeSymbolMangling reports whether the linker should shorten the +// names of symbols that represent Go types. +// +// As the names of these symbols are derived from the string of +// the type, they can run to many kilobytes long. So we shorten +// them using a SHA-1 when the name appears in the final binary. +// +// These are the symbols that begin with the prefix 'type.' and +// contain run-time type information used by the runtime and reflect +// packages. All Go binaries contain these symbols, but only only +// those programs loaded dynamically in multiple parts need these +// symbols to have entries in the symbol table. +func typeSymbolMangling(ctxt *Link) bool { + return ctxt.BuildMode == BuildModeShared || ctxt.linkShared || ctxt.BuildMode == BuildModePlugin || ctxt.Syms.ROLookup("plugin.Open", 0) != nil +} + +// typeSymbolMangle mangles the given symbol name into something shorter. +func typeSymbolMangle(name string) string { + if !strings.HasPrefix(name, "type.") { + return name + } + if strings.HasPrefix(name, "type.runtime.") { + return name + } + if len(name) <= 14 && !strings.Contains(name, "@") { // Issue 19529 + return name + } + hash := sha1.Sum([]byte(name)) + prefix := "type." + if name[5] == '.' { + prefix = "type.." + } + return prefix + base64.StdEncoding.EncodeToString(hash[:6]) +} + /* * look for the next file in an archive. * adapted from libmach. @@ -716,7 +708,7 @@ func nextar(bp *bio.Reader, off int64, a *ArHdr) int64 { return arsize + SAR_HDR } -func genhash(ctxt *Link, lib *Library) { +func genhash(ctxt *Link, lib *sym.Library) { f, err := bio.Open(lib.File) if err != nil { Errorf(nil, "cannot open file %s for hash generation: %v", lib.File, err) @@ -724,8 +716,17 @@ func genhash(ctxt *Link, lib *Library) { } defer f.Close() + var magbuf [len(ARMAG)]byte + if _, err := io.ReadFull(f, magbuf[:]); err != nil { + Exitf("file %s too short", lib.File) + } + + if string(magbuf[:]) != ARMAG { + Exitf("%s is not an archive file", lib.File) + } + var arhdr ArHdr - l := nextar(f, int64(len(ARMAG)), &arhdr) + l := nextar(f, f.Offset(), &arhdr) if l <= 0 { Errorf(nil, "%s: short read on archive file symbol header", lib.File) return @@ -736,7 +737,7 @@ func genhash(ctxt *Link, lib *Library) { // To compute the hash of a package, we hash the first line of // __.PKGDEF (which contains the toolchain version and any // GOEXPERIMENT flags) and the export data (which is between - // the first two occurences of "\n$$"). + // the first two occurrences of "\n$$"). pkgDefBytes := make([]byte, atolwhex(arhdr.size)) _, err = io.ReadFull(f, pkgDefBytes) @@ -744,7 +745,7 @@ func genhash(ctxt *Link, lib *Library) { Errorf(nil, "%s: error reading package data: %v", lib.File, err) return } - firstEOL := bytes.Index(pkgDefBytes, []byte("\n")) + firstEOL := bytes.IndexByte(pkgDefBytes, '\n') if firstEOL < 0 { Errorf(nil, "cannot parse package data of %s for hash generation, no newline found", lib.File) return @@ -761,10 +762,10 @@ func genhash(ctxt *Link, lib *Library) { } h.Write(pkgDefBytes[0:firstEOL]) h.Write(pkgDefBytes[firstDoubleDollar : firstDoubleDollar+secondDoubleDollar]) - lib.hash = hex.EncodeToString(h.Sum(nil)) + lib.Hash = hex.EncodeToString(h.Sum(nil)) } -func objfile(ctxt *Link, lib *Library) { +func loadobjfile(ctxt *Link, lib *sym.Library) { pkg := objabi.PathToPrefix(lib.Pkg) if ctxt.Debugvlog > 1 { @@ -801,7 +802,7 @@ func objfile(ctxt *Link, lib *Library) { goto out } - if !strings.HasPrefix(arhdr.name, pkgname) { + if !strings.HasPrefix(arhdr.name, pkgdef) { Errorf(nil, "%s: cannot find package header", lib.File) goto out } @@ -865,7 +866,7 @@ var internalpkg = []string{ "runtime/msan", } -func ldhostobj(ld func(*Link, *bio.Reader, string, int64, string), f *bio.Reader, pkg string, length int64, pn string, file string) *Hostobj { +func ldhostobj(ld func(*Link, *bio.Reader, string, int64, string), headType objabi.HeadType, f *bio.Reader, pkg string, length int64, pn string, file string) *Hostobj { isinternal := false for i := 0; i < len(internalpkg); i++ { if pkg == internalpkg[i] { @@ -880,7 +881,7 @@ func ldhostobj(ld func(*Link, *bio.Reader, string, int64, string), f *bio.Reader // force external linking for any libraries that link in code that // uses errno. This can be removed if the Go linker ever supports // these relocation types. - if Headtype == objabi.Hdragonfly { + if headType == objabi.Hdragonfly { if pkg == "net" || pkg == "os/user" { isinternal = false } @@ -923,8 +924,8 @@ func rmtemp() { os.RemoveAll(*flagTmpdir) } -func hostlinksetup() { - if Linkmode != LinkExternal { +func hostlinksetup(ctxt *Link) { + if ctxt.LinkMode != LinkExternal { return } @@ -945,7 +946,7 @@ func hostlinksetup() { } // change our output to temporary object file - coutbuf.f.Close() + ctxt.Out.f.Close() mayberemoveoutfile() p := filepath.Join(*flagTmpdir, "go.o") @@ -955,8 +956,9 @@ func hostlinksetup() { Exitf("cannot create %s: %v", p, err) } - coutbuf.w = bufio.NewWriter(f) - coutbuf.f = f + ctxt.Out.w = bufio.NewWriter(f) + ctxt.Out.f = f + ctxt.Out.off = 0 } // hostobjCopy creates a copy of the object files in hostobj in a @@ -1025,7 +1027,7 @@ INSERT AFTER .debug_types; // archive builds a .a archive from the hostobj object files. func (ctxt *Link) archive() { - if Buildmode != BuildmodeCArchive { + if ctxt.BuildMode != BuildModeCArchive { return } @@ -1037,11 +1039,11 @@ func (ctxt *Link) archive() { // Force the buffer to flush here so that external // tools will see a complete file. - Cflush() - if err := coutbuf.f.Close(); err != nil { + ctxt.Out.Flush() + if err := ctxt.Out.f.Close(); err != nil { Exitf("close: %v", err) } - coutbuf.f = nil + ctxt.Out.f = nil argv := []string{*flagExtar, "-q", "-c", "-s", *flagOutfile} argv = append(argv, filepath.Join(*flagTmpdir, "go.o")) @@ -1056,11 +1058,11 @@ func (ctxt *Link) archive() { } } -func (l *Link) hostlink() { - if Linkmode != LinkExternal || nerrors > 0 { +func (ctxt *Link) hostlink() { + if ctxt.LinkMode != LinkExternal || nerrors > 0 { return } - if Buildmode == BuildmodeCArchive { + if ctxt.BuildMode == BuildModeCArchive { return } @@ -1070,24 +1072,25 @@ func (l *Link) hostlink() { var argv []string argv = append(argv, *flagExtld) - argv = append(argv, hostlinkArchArgs()...) + argv = append(argv, hostlinkArchArgs(ctxt.Arch)...) - if !*FlagS && !debug_s { - argv = append(argv, "-gdwarf-2") - } else if Headtype == objabi.Hdarwin { - // Recent versions of macOS print - // ld: warning: option -s is obsolete and being ignored - // so do not pass any arguments. - } else { - argv = append(argv, "-s") + if *FlagS || debug_s { + if ctxt.HeadType == objabi.Hdarwin { + // Recent versions of macOS print + // ld: warning: option -s is obsolete and being ignored + // so do not pass any arguments. + } else { + argv = append(argv, "-s") + } } - switch Headtype { + switch ctxt.HeadType { case objabi.Hdarwin: argv = append(argv, "-Wl,-headerpad,1144") - if l.DynlinkingGo() { + if ctxt.DynlinkingGo() { argv = append(argv, "-Wl,-flat_namespace") - } else if !SysArch.InFamily(sys.ARM64) { + } + if ctxt.BuildMode == BuildModeExe && !ctxt.Arch.InFamily(sys.ARM64) { argv = append(argv, "-Wl,-no_pie") } case objabi.Hopenbsd: @@ -1100,49 +1103,61 @@ func (l *Link) hostlink() { } } - switch Buildmode { - case BuildmodeExe: - if Headtype == objabi.Hdarwin { - argv = append(argv, "-Wl,-pagezero_size,4000000") + switch ctxt.BuildMode { + case BuildModeExe: + if ctxt.HeadType == objabi.Hdarwin { + if ctxt.Arch.Family == sys.ARM64 { + // __PAGEZERO segment size determined empirically. + // XCode 9.0.1 successfully uploads an iOS app with this value. + argv = append(argv, "-Wl,-pagezero_size,100000000") + } else { + argv = append(argv, "-Wl,-pagezero_size,4000000") + } } - case BuildmodePIE: - if UseRelro() { - argv = append(argv, "-Wl,-z,relro") + case BuildModePIE: + // ELF. + if ctxt.HeadType != objabi.Hdarwin { + if ctxt.UseRelro() { + argv = append(argv, "-Wl,-z,relro") + } + argv = append(argv, "-pie") } - argv = append(argv, "-pie") - case BuildmodeCShared: - if Headtype == objabi.Hdarwin { + case BuildModeCShared: + if ctxt.HeadType == objabi.Hdarwin { argv = append(argv, "-dynamiclib") - if SysArch.Family != sys.AMD64 { + if ctxt.Arch.Family != sys.AMD64 { argv = append(argv, "-Wl,-read_only_relocs,suppress") } } else { // ELF. argv = append(argv, "-Wl,-Bsymbolic") - if UseRelro() { + if ctxt.UseRelro() { argv = append(argv, "-Wl,-z,relro") } - // Pass -z nodelete to mark the shared library as - // non-closeable: a dlclose will do nothing. - argv = append(argv, "-shared", "-Wl,-z,nodelete") + argv = append(argv, "-shared") + if ctxt.HeadType != objabi.Hwindows { + // Pass -z nodelete to mark the shared library as + // non-closeable: a dlclose will do nothing. + argv = append(argv, "-Wl,-z,nodelete") + } } - case BuildmodeShared: - if UseRelro() { + case BuildModeShared: + if ctxt.UseRelro() { argv = append(argv, "-Wl,-z,relro") } argv = append(argv, "-shared") - case BuildmodePlugin: - if Headtype == objabi.Hdarwin { + case BuildModePlugin: + if ctxt.HeadType == objabi.Hdarwin { argv = append(argv, "-dynamiclib") } else { - if UseRelro() { + if ctxt.UseRelro() { argv = append(argv, "-Wl,-z,relro") } argv = append(argv, "-shared") } } - if Iself && l.DynlinkingGo() { + if ctxt.IsELF && ctxt.DynlinkingGo() { // We force all symbol resolution to be done at program startup // because lazy PLT resolution can use large amounts of stack at // times we cannot allow it to do so. @@ -1150,10 +1165,10 @@ func (l *Link) hostlink() { // Do not let the host linker generate COPY relocations. These // can move symbols out of sections that rely on stable offsets - // from the beginning of the section (like STYPE). + // from the beginning of the section (like sym.STYPE). argv = append(argv, "-Wl,-znocopyreloc") - if SysArch.InFamily(sys.ARM, sys.ARM64) { + if ctxt.Arch.InFamily(sys.ARM, sys.ARM64) { // On ARM, the GNU linker will generate COPY relocations // even with -znocopyreloc set. // https://sourceware.org/bugzilla/show_bug.cgi?id=19962 @@ -1176,7 +1191,7 @@ func (l *Link) hostlink() { } } - if Iself && len(buildinfo) > 0 { + if ctxt.IsELF && len(buildinfo) > 0 { argv = append(argv, fmt.Sprintf("-Wl,--build-id=0x%x", buildinfo)) } @@ -1198,7 +1213,7 @@ func (l *Link) hostlink() { } // Force global symbols to be exported for dlopen, etc. - if Iself { + if ctxt.IsELF { argv = append(argv, "-rdynamic") } @@ -1209,7 +1224,7 @@ func (l *Link) hostlink() { argv = append(argv, filepath.Join(*flagTmpdir, "go.o")) argv = append(argv, hostobjCopy()...) - if *FlagLinkshared { + if ctxt.linkShared { seenDirs := make(map[string]bool) seenLibs := make(map[string]bool) addshlib := func(path string) { @@ -1228,13 +1243,13 @@ func (l *Link) hostlink() { seenLibs[base] = true } } - for _, shlib := range l.Shlibs { + for _, shlib := range ctxt.Shlibs { addshlib(shlib.Path) for _, dep := range shlib.Deps { if dep == "" { continue } - libpath := findshlib(l, dep) + libpath := findshlib(ctxt, dep) if libpath != "" { addshlib(libpath) } @@ -1251,7 +1266,7 @@ func (l *Link) hostlink() { // does not work, the resulting programs will not run. See // issue #17847. To avoid this problem pass -no-pie to the // toolchain if it is supported. - if Buildmode == BuildmodeExe { + if ctxt.BuildMode == BuildModeExe { src := filepath.Join(*flagTmpdir, "trivial.c") if err := ioutil.WriteFile(src, []byte("int main() { return 0; }"), 0666); err != nil { Errorf(nil, "WriteFile trivial.c failed: %v", err) @@ -1282,7 +1297,7 @@ func (l *Link) hostlink() { // we added it. We do it in this order, rather than // only adding -rdynamic later, so that -*extldflags // can override -rdynamic without using -static. - if Iself && p == "-static" { + if ctxt.IsELF && p == "-static" { for i := range argv { if argv[i] == "-rdynamic" { argv[i] = "-static" @@ -1290,7 +1305,7 @@ func (l *Link) hostlink() { } } } - if Headtype == objabi.Hwindows { + if ctxt.HeadType == objabi.Hwindows { // use gcc linker script to work around gcc bug // (see https://golang.org/issue/20183 for details). p := writeGDBLinkerScript() @@ -1301,12 +1316,12 @@ func (l *Link) hostlink() { argv = append(argv, peimporteddlls()...) } - if l.Debugvlog != 0 { - l.Logf("%5.2f host link:", Cputime()) + if ctxt.Debugvlog != 0 { + ctxt.Logf("%5.2f host link:", Cputime()) for _, v := range argv { - l.Logf(" %q", v) + ctxt.Logf(" %q", v) } - l.Logf("\n") + ctxt.Logf("\n") } if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil { @@ -1314,12 +1329,12 @@ func (l *Link) hostlink() { } else if len(out) > 0 { // always print external output even if the command is successful, so that we don't // swallow linker warnings (see https://golang.org/issue/17935). - l.Logf("%s", out) + ctxt.Logf("%s", out) } - if !*FlagS && !*FlagW && !debug_s && Headtype == objabi.Hdarwin { + if !*FlagS && !*FlagW && !debug_s && ctxt.HeadType == objabi.Hdarwin { // Skip combining dwarf on arm. - if !SysArch.InFamily(sys.ARM, sys.ARM64) { + if !ctxt.Arch.InFamily(sys.ARM, sys.ARM64) { dsym := filepath.Join(*flagTmpdir, "go.dwarf") if out, err := exec.Command("dsymutil", "-f", *flagOutfile, "-o", dsym).CombinedOutput(); err != nil { Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out) @@ -1330,7 +1345,7 @@ func (l *Link) hostlink() { } // For os.Rename to work reliably, must be in same directory as outfile. combinedOutput := *flagOutfile + "~" - if err := machoCombineDwarf(*flagOutfile, dsym, combinedOutput); err != nil { + if err := machoCombineDwarf(*flagOutfile, dsym, combinedOutput, ctxt.BuildMode); err != nil { Exitf("%s: combining dwarf failed: %v", os.Args[0], err) } os.Remove(*flagOutfile) @@ -1343,8 +1358,8 @@ func (l *Link) hostlink() { // hostlinkArchArgs returns arguments to pass to the external linker // based on the architecture. -func hostlinkArchArgs() []string { - switch SysArch.Family { +func hostlinkArchArgs(arch *sys.Arch) []string { + switch arch.Family { case sys.I386: return []string{"-m32"} case sys.AMD64, sys.PPC64, sys.S390X: @@ -1364,7 +1379,7 @@ func hostlinkArchArgs() []string { // ldobj loads an input object. If it is a host object (an object // compiled by a non-Go compiler) it returns the Hostobj pointer. If // it is a Go object, it returns nil. -func ldobj(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn string, file string, whence int) *Hostobj { +func ldobj(ctxt *Link, f *bio.Reader, lib *sym.Library, length int64, pn string, file string, whence int) *Hostobj { pkg := objabi.PathToPrefix(lib.Pkg) eof := f.Offset() + length @@ -1377,15 +1392,43 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn string, fil magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4) if magic == 0x7f454c46 { // \x7F E L F - return ldhostobj(ldelf, f, pkg, length, pn, file) + ldelf := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + textp, flags, err := loadelf.Load(ctxt.Arch, ctxt.Syms, f, pkg, length, pn, ehdr.flags) + if err != nil { + Errorf(nil, "%v", err) + return + } + ehdr.flags = flags + ctxt.Textp = append(ctxt.Textp, textp...) + } + return ldhostobj(ldelf, ctxt.HeadType, f, pkg, length, pn, file) } if magic&^1 == 0xfeedface || magic&^0x01000000 == 0xcefaedfe { - return ldhostobj(ldmacho, f, pkg, length, pn, file) + ldmacho := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + textp, err := loadmacho.Load(ctxt.Arch, ctxt.Syms, f, pkg, length, pn) + if err != nil { + Errorf(nil, "%v", err) + return + } + ctxt.Textp = append(ctxt.Textp, textp...) + } + return ldhostobj(ldmacho, ctxt.HeadType, f, pkg, length, pn, file) } if c1 == 0x4c && c2 == 0x01 || c1 == 0x64 && c2 == 0x86 { - return ldhostobj(ldpe, f, pkg, length, pn, file) + ldpe := func(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { + textp, rsrc, err := loadpe.Load(ctxt.Arch, ctxt.Syms, f, pkg, length, pn) + if err != nil { + Errorf(nil, "%v", err) + return + } + if rsrc != nil { + setpersrc(ctxt, rsrc) + } + ctxt.Textp = append(ctxt.Textp, textp...) + } + return ldhostobj(ldpe, ctxt.HeadType, f, pkg, length, pn, file) } /* check the header */ @@ -1401,7 +1444,7 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn string, fil return nil } - if line == SysArch.Name { + if line == ctxt.Arch.Name { // old header format: just $GOOS Errorf(nil, "%s: stale object file", pn) return nil @@ -1432,13 +1475,29 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn string, fil } } - /* skip over exports and other info -- ends with \n!\n */ + // Skip over exports and other info -- ends with \n!\n. + // + // Note: It's possible for "\n!\n" to appear within the binary + // package export data format. To avoid truncating the package + // definition prematurely (issue 21703), we keep keep track of + // how many "$$" delimiters we've seen. + import0 := f.Offset() c1 = '\n' // the last line ended in \n c2 = bgetc(f) c3 = bgetc(f) - for c1 != '\n' || c2 != '!' || c3 != '\n' { + markers := 0 + for { + if c1 == '\n' { + if markers%2 == 0 && c2 == '!' && c3 == '\n' { + break + } + if c2 == '$' && c3 == '$' { + markers++ + } + } + c1 = c2 c2 = c3 c3 = bgetc(f) @@ -1454,7 +1513,8 @@ func ldobj(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn string, fil ldpkg(ctxt, f, pkg, import1-import0-2, pn, whence) // -2 for !\n f.Seek(import1, 0) - LoadObjFile(ctxt, f, lib, eof-f.Offset(), pn) + objfile.Load(ctxt.Arch, ctxt.Syms, f, lib, eof-f.Offset(), pn) + addImports(ctxt, lib, pn) return nil } @@ -1521,6 +1581,9 @@ func readnote(f *elf.File, name []byte, typ int32) ([]byte, error) { } func findshlib(ctxt *Link, shlib string) string { + if filepath.IsAbs(shlib) { + return shlib + } for _, libdir := range ctxt.Libdir { libpath := filepath.Join(libdir, shlib) if _, err := os.Stat(libpath); err == nil { @@ -1532,9 +1595,15 @@ func findshlib(ctxt *Link, shlib string) string { } func ldshlibsyms(ctxt *Link, shlib string) { - libpath := findshlib(ctxt, shlib) - if libpath == "" { - return + var libpath string + if filepath.IsAbs(shlib) { + libpath = shlib + shlib = filepath.Base(shlib) + } else { + libpath = findshlib(ctxt, shlib) + if libpath == "" { + return + } } for _, processedlib := range ctxt.Shlibs { if processedlib.Path == libpath { @@ -1550,6 +1619,7 @@ func ldshlibsyms(ctxt *Link, shlib string) { Errorf(nil, "cannot open shared library: %s", libpath) return } + defer f.Close() hash, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GOABIHASH_TAG) if err != nil { @@ -1562,14 +1632,29 @@ func ldshlibsyms(ctxt *Link, shlib string) { Errorf(nil, "cannot read dep list from shared library %s: %v", libpath, err) return } - deps := strings.Split(string(depsbytes), "\n") + var deps []string + for _, dep := range strings.Split(string(depsbytes), "\n") { + if dep == "" { + continue + } + if !filepath.IsAbs(dep) { + // If the dep can be interpreted as a path relative to the shlib + // in which it was found, do that. Otherwise, we will leave it + // to be resolved by libdir lookup. + abs := filepath.Join(filepath.Dir(libpath), dep) + if _, err := os.Stat(abs); err == nil { + dep = abs + } + } + deps = append(deps, dep) + } syms, err := f.DynamicSymbols() if err != nil { Errorf(nil, "cannot read symbols from shared library: %s", libpath) return } - gcdataLocations := make(map[uint64]*Symbol) + gcdataLocations := make(map[uint64]*sym.Symbol) for _, elfsym := range syms { if elf.ST_TYPE(elfsym.Info) == elf.STT_NOTYPE || elf.ST_TYPE(elfsym.Info) == elf.STT_SECTION { continue @@ -1579,10 +1664,10 @@ func ldshlibsyms(ctxt *Link, shlib string) { // libraries, any non-dynimport symbols we find that duplicate symbols // already loaded should be ignored (the symbols from the .a files // "win"). - if lsym.Type != 0 && lsym.Type != SDYNIMPORT { + if lsym.Type != 0 && lsym.Type != sym.SDYNIMPORT { continue } - lsym.Type = SDYNIMPORT + lsym.Type = sym.SDYNIMPORT lsym.ElfType = elf.ST_TYPE(elfsym.Info) lsym.Size = int64(elfsym.Size) if elfsym.Section != elf.SHN_UNDEF { @@ -1592,12 +1677,12 @@ func ldshlibsyms(ctxt *Link, shlib string) { // the type data. if strings.HasPrefix(lsym.Name, "type.") && !strings.HasPrefix(lsym.Name, "type..") { lsym.P = readelfsymboldata(ctxt, f, &elfsym) - gcdataLocations[elfsym.Value+2*uint64(SysArch.PtrSize)+8+1*uint64(SysArch.PtrSize)] = lsym + gcdataLocations[elfsym.Value+2*uint64(ctxt.Arch.PtrSize)+8+1*uint64(ctxt.Arch.PtrSize)] = lsym } } } - gcdataAddresses := make(map[*Symbol]uint64) - if SysArch.Family == sys.ARM64 { + gcdataAddresses := make(map[*sym.Symbol]uint64) + if ctxt.Arch.Family == sys.ARM64 { for _, sect := range f.Sections { if sect.Type == elf.SHT_RELA { var rela elf.Rela64 @@ -1625,12 +1710,12 @@ func ldshlibsyms(ctxt *Link, shlib string) { ctxt.Shlibs = append(ctxt.Shlibs, Shlib{Path: libpath, Hash: hash, Deps: deps, File: f, gcdataAddresses: gcdataAddresses}) } -func addsection(seg *Segment, name string, rwx int) *Section { - sect := new(Section) +func addsection(arch *sys.Arch, seg *sym.Segment, name string, rwx int) *sym.Section { + sect := new(sym.Section) sect.Rwx = uint8(rwx) sect.Name = name sect.Seg = seg - sect.Align = int32(SysArch.PtrSize) // everything is at least pointer-aligned + sect.Align = int32(arch.PtrSize) // everything is at least pointer-aligned seg.Sections = append(seg.Sections, sect) return sect } @@ -1656,12 +1741,12 @@ func Be32(b []byte) uint32 { } type chain struct { - sym *Symbol + sym *sym.Symbol up *chain limit int // limit on entry to sym } -var morestack *Symbol +var morestack *sym.Symbol // TODO: Record enough information in new object files to // allow stack checks here. @@ -1674,7 +1759,7 @@ func callsize(ctxt *Link) int { if haslinkregister(ctxt) { return 0 } - return SysArch.RegSize + return ctxt.Arch.RegSize } func (ctxt *Link) dostkcheck() { @@ -1728,7 +1813,7 @@ func stkcheck(ctxt *Link, up *chain, depth int) int { if s.Attr.StackCheck() { return 0 } - s.Attr |= AttrStackCheck + s.Attr |= sym.AttrStackCheck } if depth > 100 { @@ -1742,8 +1827,8 @@ func stkcheck(ctxt *Link, up *chain, depth int) int { // should never be called directly. // onlyctxt.Diagnose the direct caller. // TODO(mwhudson): actually think about this. - if depth == 1 && s.Type != SXREF && !ctxt.DynlinkingGo() && - Buildmode != BuildmodeCArchive && Buildmode != BuildmodePIE && Buildmode != BuildmodeCShared && Buildmode != BuildmodePlugin { + if depth == 1 && s.Type != sym.SXREF && !ctxt.DynlinkingGo() && + ctxt.BuildMode != BuildModeCArchive && ctxt.BuildMode != BuildModePIE && ctxt.BuildMode != BuildModeCShared && ctxt.BuildMode != BuildModePlugin { Errorf(s, "call to external function") } @@ -1788,7 +1873,7 @@ func stkcheck(ctxt *Link, up *chain, depth int) int { endr := len(s.R) var ch1 chain var pcsp Pciter - var r *Reloc + var r *sym.Reloc for pciterinit(ctxt, &pcsp, &s.FuncInfo.Pcsp); pcsp.done == 0; pciternext(&pcsp) { // pcsp.value is in effect for [pcsp.pc, pcsp.nextpc). @@ -1867,36 +1952,6 @@ func stkprint(ctxt *Link, ch *chain, limit int) { } } -func Cflush() { - if err := coutbuf.w.Flush(); err != nil { - Exitf("flushing %s: %v", coutbuf.f.Name(), err) - } -} - -func Cseek(p int64) { - if p == coutbuf.off { - return - } - Cflush() - if _, err := coutbuf.f.Seek(p, 0); err != nil { - Exitf("seeking in output [0, 1]: %v", err) - } - coutbuf.off = p -} - -func Cwritestring(s string) { - coutbuf.WriteString(s) -} - -func Cwrite(p []byte) { - coutbuf.Write(p) -} - -func Cput(c uint8) { - coutbuf.w.WriteByte(c) - coutbuf.off++ -} - func usage() { fmt.Fprintf(os.Stderr, "usage: link [options] main.o\n") objabi.Flagprint(2) @@ -1910,23 +1965,31 @@ func doversion() { type SymbolType int8 const ( + // see also http://9p.io/magic/man2html/1/nm TextSym SymbolType = 'T' DataSym = 'D' BSSSym = 'B' UndefinedSym = 'U' TLSSym = 't' - FileSym = 'f' FrameSym = 'm' ParamSym = 'p' AutoSym = 'a' + + // Deleted auto (not a real sym, just placeholder for type) + DeletedAutoSym = 'x' ) -func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, *Symbol)) { +func genasmsym(ctxt *Link, put func(*Link, *sym.Symbol, string, SymbolType, int64, *sym.Symbol)) { // These symbols won't show up in the first loop below because we - // skip STEXT symbols. Normal STEXT symbols are emitted by walking textp. + // skip sym.STEXT symbols. Normal sym.STEXT symbols are emitted by walking textp. s := ctxt.Syms.Lookup("runtime.text", 0) - if s.Type == STEXT { - put(ctxt, s, s.Name, TextSym, s.Value, nil) + if s.Type == sym.STEXT { + // We've already included this symbol in ctxt.Textp + // if ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin. + // See data.go:/textaddress + if !(ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin) { + put(ctxt, s, s.Name, TextSym, s.Value, nil) + } } n := 0 @@ -1944,15 +2007,20 @@ func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, * if s == nil { break } - if s.Type == STEXT { + if s.Type == sym.STEXT { put(ctxt, s, s.Name, TextSym, s.Value, nil) } n++ } s = ctxt.Syms.Lookup("runtime.etext", 0) - if s.Type == STEXT { - put(ctxt, s, s.Name, TextSym, s.Value, nil) + if s.Type == sym.STEXT { + // We've already included this symbol in ctxt.Textp + // if ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin. + // See data.go:/textaddress + if !(ctxt.DynlinkingGo() && ctxt.HeadType == objabi.Hdarwin) { + put(ctxt, s, s.Name, TextSym, s.Value, nil) + } } for _, s := range ctxt.Syms.Allsym { @@ -1962,36 +2030,36 @@ func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, * if (s.Name == "" || s.Name[0] == '.') && s.Version == 0 && s.Name != ".rathole" && s.Name != ".TOC." { continue } - switch s.Type & SMASK { - case SCONST, - SRODATA, - SSYMTAB, - SPCLNTAB, - SINITARR, - SDATA, - SNOPTRDATA, - SELFROSECT, - SMACHOGOT, - STYPE, - SSTRING, - SGOSTRING, - SGOFUNC, - SGCBITS, - STYPERELRO, - SSTRINGRELRO, - SGOSTRINGRELRO, - SGOFUNCRELRO, - SGCBITSRELRO, - SRODATARELRO, - STYPELINK, - SITABLINK, - SWINDOWS: + switch s.Type { + case sym.SCONST, + sym.SRODATA, + sym.SSYMTAB, + sym.SPCLNTAB, + sym.SINITARR, + sym.SDATA, + sym.SNOPTRDATA, + sym.SELFROSECT, + sym.SMACHOGOT, + sym.STYPE, + sym.SSTRING, + sym.SGOSTRING, + sym.SGOFUNC, + sym.SGCBITS, + sym.STYPERELRO, + sym.SSTRINGRELRO, + sym.SGOSTRINGRELRO, + sym.SGOFUNCRELRO, + sym.SGCBITSRELRO, + sym.SRODATARELRO, + sym.STYPELINK, + sym.SITABLINK, + sym.SWINDOWS: if !s.Attr.Reachable() { continue } put(ctxt, s, s.Name, DataSym, Symaddr(s), s.Gotype) - case SBSS, SNOPTRBSS: + case sym.SBSS, sym.SNOPTRBSS: if !s.Attr.Reachable() { continue } @@ -2000,22 +2068,19 @@ func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, * } put(ctxt, s, s.Name, BSSSym, Symaddr(s), s.Gotype) - case SFILE: - put(ctxt, nil, s.Name, FileSym, s.Value, nil) - - case SHOSTOBJ: - if Headtype == objabi.Hwindows || Iself { + case sym.SHOSTOBJ: + if ctxt.HeadType == objabi.Hwindows || ctxt.IsELF { put(ctxt, s, s.Name, UndefinedSym, s.Value, nil) } - case SDYNIMPORT: + case sym.SDYNIMPORT: if !s.Attr.Reachable() { continue } put(ctxt, s, s.Extname, UndefinedSym, 0, nil) - case STLSBSS: - if Linkmode == LinkExternal { + case sym.STLSBSS: + if ctxt.LinkMode == LinkExternal { put(ctxt, s, s.Name, TLSSym, Symaddr(s), s.Gotype) } } @@ -2030,12 +2095,17 @@ func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, * locals = s.FuncInfo.Locals } // NOTE(ality): acid can't produce a stack trace without .frame symbols - put(ctxt, nil, ".frame", FrameSym, int64(locals)+int64(SysArch.PtrSize), nil) + put(ctxt, nil, ".frame", FrameSym, int64(locals)+int64(ctxt.Arch.PtrSize), nil) if s.FuncInfo == nil { continue } for _, a := range s.FuncInfo.Autom { + if a.Name == objabi.A_DELETED_AUTO { + put(ctxt, nil, "", DeletedAutoSym, 0, a.Gotype) + continue + } + // Emit a or p according to actual offset, even if label is wrong. // This avoids negative offsets, which cannot be encoded. if a.Name != objabi.A_AUTO && a.Name != objabi.A_PARAM { @@ -2046,7 +2116,7 @@ func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, * if a.Name == objabi.A_PARAM { off = a.Aoffset } else { - off = a.Aoffset - int32(SysArch.PtrSize) + off = a.Aoffset - int32(ctxt.Arch.PtrSize) } // FP @@ -2056,8 +2126,8 @@ func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, * } // SP - if off <= int32(-SysArch.PtrSize) { - put(ctxt, nil, a.Asym.Name, AutoSym, -(int64(off) + int64(SysArch.PtrSize)), a.Gotype) + if off <= int32(-ctxt.Arch.PtrSize) { + put(ctxt, nil, a.Asym.Name, AutoSym, -(int64(off) + int64(ctxt.Arch.PtrSize)), a.Gotype) continue } // Otherwise, off is addressing the saved program counter. @@ -2070,23 +2140,23 @@ func genasmsym(ctxt *Link, put func(*Link, *Symbol, string, SymbolType, int64, * } } -func Symaddr(s *Symbol) int64 { +func Symaddr(s *sym.Symbol) int64 { if !s.Attr.Reachable() { Errorf(s, "unreachable symbol in symaddr") } return s.Value } -func (ctxt *Link) xdefine(p string, t SymKind, v int64) { +func (ctxt *Link) xdefine(p string, t sym.SymKind, v int64) { s := ctxt.Syms.Lookup(p, 0) s.Type = t s.Value = v - s.Attr |= AttrReachable - s.Attr |= AttrSpecial - s.Attr |= AttrLocal + s.Attr |= sym.AttrReachable + s.Attr |= sym.AttrSpecial + s.Attr |= sym.AttrLocal } -func datoff(s *Symbol, addr int64) int64 { +func datoff(s *sym.Symbol, addr int64) int64 { if uint64(addr) >= Segdata.Vaddr { return int64(uint64(addr) - Segdata.Vaddr + Segdata.Fileoff) } @@ -2106,21 +2176,23 @@ func Entryvalue(ctxt *Link) int64 { if s.Type == 0 { return *FlagTextAddr } - if s.Type != STEXT { + if s.Type != sym.STEXT { Errorf(s, "entry not text") } return s.Value } -func undefsym(ctxt *Link, s *Symbol) { - var r *Reloc +func undefsym(ctxt *Link, s *sym.Symbol) { + var r *sym.Reloc for i := 0; i < len(s.R); i++ { r = &s.R[i] if r.Sym == nil { // happens for some external ARM relocs continue } - if r.Sym.Type == Sxxx || r.Sym.Type == SXREF { + // TODO(mwhudson): the test of VisibilityHidden here probably doesn't make + // sense and should be removed when someone has thought about it properly. + if (r.Sym.Type == sym.Sxxx || r.Sym.Type == sym.SXREF) && !r.Sym.Attr.VisibilityHidden() { Errorf(s, "undefined: %q", r.Sym.Name) } if !r.Sym.Attr.Reachable() && r.Type != objabi.R_WEAKADDROFF { @@ -2147,14 +2219,14 @@ func (ctxt *Link) callgraph() { } var i int - var r *Reloc + var r *sym.Reloc for _, s := range ctxt.Textp { for i = 0; i < len(s.R); i++ { r = &s.R[i] if r.Sym == nil { continue } - if (r.Type == objabi.R_CALL || r.Type == objabi.R_CALLARM || r.Type == objabi.R_CALLPOWER || r.Type == objabi.R_CALLMIPS) && r.Sym.Type == STEXT { + if (r.Type == objabi.R_CALL || r.Type == objabi.R_CALLARM || r.Type == objabi.R_CALLPOWER || r.Type == objabi.R_CALLMIPS) && r.Sym.Type == sym.STEXT { ctxt.Logf("%s calls %s\n", s.Name, r.Sym.Name) } } @@ -2192,16 +2264,16 @@ const ( visited ) -func postorder(libs []*Library) []*Library { - order := make([]*Library, 0, len(libs)) // hold the result - mark := make(map[*Library]markKind, len(libs)) +func postorder(libs []*sym.Library) []*sym.Library { + order := make([]*sym.Library, 0, len(libs)) // hold the result + mark := make(map[*sym.Library]markKind, len(libs)) for _, lib := range libs { dfs(lib, mark, &order) } return order } -func dfs(lib *Library, mark map[*Library]markKind, order *[]*Library) { +func dfs(lib *sym.Library, mark map[*sym.Library]markKind, order *[]*sym.Library) { if mark[lib] == visited { return } @@ -2209,7 +2281,7 @@ func dfs(lib *Library, mark map[*Library]markKind, order *[]*Library) { panic("found import cycle while visiting " + lib.Pkg) } mark[lib] = visiting - for _, i := range lib.imports { + for _, i := range lib.Imports { dfs(i, mark, order) } mark[lib] = visited diff --git a/src/cmd/link/internal/ld/link.go b/src/cmd/link/internal/ld/link.go index 302364c2993..a413353b9fb 100644 --- a/src/cmd/link/internal/ld/link.go +++ b/src/cmd/link/internal/ld/link.go @@ -34,185 +34,25 @@ import ( "bufio" "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "debug/elf" "fmt" ) -// Symbol is an entry in the symbol table. -type Symbol struct { - Name string - Extname string - Type SymKind - Version int16 - Attr Attribute - Localentry uint8 - Dynid int32 - Plt int32 - Got int32 - Align int32 - Elfsym int32 - LocalElfsym int32 - Value int64 - Size int64 - // ElfType is set for symbols read from shared libraries by ldshlibsyms. It - // is not set for symbols defined by the packages being linked or by symbols - // read by ldelf (and so is left as elf.STT_NOTYPE). - ElfType elf.SymType - Sub *Symbol - Outer *Symbol - Gotype *Symbol - Reachparent *Symbol - File string - Dynimplib string - Dynimpvers string - Sect *Section - FuncInfo *FuncInfo - // P contains the raw symbol data. - P []byte - R []Reloc -} - -func (s *Symbol) String() string { - if s.Version == 0 { - return s.Name - } - return fmt.Sprintf("%s<%d>", s.Name, s.Version) -} - -func (s *Symbol) ElfsymForReloc() int32 { - // If putelfsym created a local version of this symbol, use that in all - // relocations. - if s.LocalElfsym != 0 { - return s.LocalElfsym - } else { - return s.Elfsym - } -} - -func (s *Symbol) Len() int64 { - return s.Size -} - -// Attribute is a set of common symbol attributes. -type Attribute int16 - -const ( - // AttrDuplicateOK marks a symbol that can be present in multiple object - // files. - AttrDuplicateOK Attribute = 1 << iota - // AttrExternal marks function symbols loaded from host object files. - AttrExternal - // AttrNoSplit marks functions that cannot split the stack; the linker - // cares because it checks that there are no call chains of nosplit - // functions that require more than StackLimit bytes (see - // lib.go:dostkcheck) - AttrNoSplit - // AttrReachable marks symbols that are transitively referenced from the - // entry points. Unreachable symbols are not written to the output. - AttrReachable - // AttrCgoExportDynamic and AttrCgoExportStatic mark symbols referenced - // by directives written by cgo (in response to //export directives in - // the source). - AttrCgoExportDynamic - AttrCgoExportStatic - // AttrSpecial marks symbols that do not have their address (i.e. Value) - // computed by the usual mechanism of data.go:dodata() & - // data.go:address(). - AttrSpecial - // AttrStackCheck is used by dostkcheck to only check each NoSplit - // function's stack usage once. - AttrStackCheck - // AttrNotInSymbolTable marks symbols that are not written to the symbol table. - AttrNotInSymbolTable - // AttrOnList marks symbols that are on some list (such as the list of - // all text symbols, or one of the lists of data symbols) and is - // consulted to avoid bugs where a symbol is put on a list twice. - AttrOnList - // AttrLocal marks symbols that are only visible within the module - // (exectuable or shared library) being linked. Only relevant when - // dynamically linking Go code. - AttrLocal - // AttrReflectMethod marks certain methods from the reflect package that - // can be used to call arbitrary methods. If no symbol with this bit set - // is marked as reachable, more dead code elimination can be done. - AttrReflectMethod - // AttrMakeTypelink Amarks types that should be added to the typelink - // table. See typelinks.go:typelinks(). - AttrMakeTypelink - // AttrShared marks symbols compiled with the -shared option. - AttrShared - // 14 attributes defined so far. -) - -func (a Attribute) DuplicateOK() bool { return a&AttrDuplicateOK != 0 } -func (a Attribute) External() bool { return a&AttrExternal != 0 } -func (a Attribute) NoSplit() bool { return a&AttrNoSplit != 0 } -func (a Attribute) Reachable() bool { return a&AttrReachable != 0 } -func (a Attribute) CgoExportDynamic() bool { return a&AttrCgoExportDynamic != 0 } -func (a Attribute) CgoExportStatic() bool { return a&AttrCgoExportStatic != 0 } -func (a Attribute) Special() bool { return a&AttrSpecial != 0 } -func (a Attribute) StackCheck() bool { return a&AttrStackCheck != 0 } -func (a Attribute) NotInSymbolTable() bool { return a&AttrNotInSymbolTable != 0 } -func (a Attribute) OnList() bool { return a&AttrOnList != 0 } -func (a Attribute) Local() bool { return a&AttrLocal != 0 } -func (a Attribute) ReflectMethod() bool { return a&AttrReflectMethod != 0 } -func (a Attribute) MakeTypelink() bool { return a&AttrMakeTypelink != 0 } -func (a Attribute) Shared() bool { return a&AttrShared != 0 } - -func (a Attribute) CgoExport() bool { - return a.CgoExportDynamic() || a.CgoExportStatic() -} - -func (a *Attribute) Set(flag Attribute, value bool) { - if value { - *a |= flag - } else { - *a &^= flag - } -} - -// Reloc is a relocation. -// -// The typical Reloc rewrites part of a symbol at offset Off to address Sym. -// A Reloc is stored in a slice on the Symbol it rewrites. -// -// Relocations are generated by the compiler as the type -// cmd/internal/obj.Reloc, which is encoded into the object file wire -// format and decoded by the linker into this type. A separate type is -// used to hold linker-specific state about the relocation. -// -// Some relocations are created by cmd/link. -type Reloc struct { - Off int32 // offset to rewrite - Siz uint8 // number of bytes to rewrite, 1, 2, or 4 - Done uint8 // set to 1 when relocation is complete - Variant RelocVariant // variation on Type - Type objabi.RelocType // the relocation type - Add int64 // addend - Xadd int64 // addend passed to external linker - Sym *Symbol // symbol the relocation addresses - Xsym *Symbol // symbol passed to external linker -} - -type Auto struct { - Asym *Symbol - Gotype *Symbol - Aoffset int32 - Name int16 -} - type Shlib struct { Path string Hash []byte Deps []string File *elf.File - gcdataAddresses map[*Symbol]uint64 + gcdataAddresses map[*sym.Symbol]uint64 } // Link holds the context for writing object code from a compiler // or for reading that input into the linker. type Link struct { - Syms *Symbols + Out *OutBuf + + Syms *sym.Symbols Arch *sys.Arch Debugvlog int @@ -220,20 +60,27 @@ type Link struct { Loaded bool // set after all inputs have been loaded as symbols - Tlsg *Symbol + IsELF bool + HeadType objabi.HeadType + + linkShared bool // link against installed Go shared libraries + LinkMode LinkMode + BuildMode BuildMode + + Tlsg *sym.Symbol Libdir []string - Library []*Library - LibraryByPkg map[string]*Library + Library []*sym.Library + LibraryByPkg map[string]*sym.Library Shlibs []Shlib Tlsoffset int - Textp []*Symbol - Filesyms []*Symbol - Moduledata *Symbol + Textp []*sym.Symbol + Filesyms []*sym.Symbol + Moduledata *sym.Symbol PackageFile map[string]string PackageShlib map[string]string - tramps []*Symbol // trampolines + tramps []*sym.Symbol // trampolines } // The smallest possible offset from the hardware stack pointer to a local @@ -253,56 +100,24 @@ func (ctxt *Link) FixedFrameSize() int64 { } } -func (l *Link) Logf(format string, args ...interface{}) { - fmt.Fprintf(l.Bso, format, args...) - l.Bso.Flush() +func (ctxt *Link) Logf(format string, args ...interface{}) { + fmt.Fprintf(ctxt.Bso, format, args...) + ctxt.Bso.Flush() } -type Library struct { - Objref string - Srcref string - File string - Pkg string - Shlib string - hash string - imports []*Library - textp []*Symbol // text symbols defined in this library - dupTextSyms []*Symbol // dupok text symbols defined in this library -} - -func (l Library) String() string { - return l.Pkg -} - -type FuncInfo struct { - Args int32 - Locals int32 - Autom []Auto - Pcsp Pcdata - Pcfile Pcdata - Pcline Pcdata - Pcinline Pcdata - Pcdata []Pcdata - Funcdata []*Symbol - Funcdataoff []int64 - File []*Symbol - InlTree []InlinedCall -} - -// InlinedCall is a node in a local inlining tree (FuncInfo.InlTree). -type InlinedCall struct { - Parent int32 // index of parent in InlTree - File *Symbol // file of the inlined call - Line int32 // line number of the inlined call - Func *Symbol // function that was inlined -} - -type Pcdata struct { - P []byte +func addImports(ctxt *Link, l *sym.Library, pn string) { + pkg := objabi.PathToPrefix(l.Pkg) + for _, importStr := range l.ImportStrings { + lib := addlib(ctxt, pkg, pn, importStr) + if lib != nil { + l.Imports = append(l.Imports, lib) + } + } + l.ImportStrings = nil } type Pciter struct { - d Pcdata + d sym.Pcdata p []byte pc uint32 nextpc uint32 @@ -311,22 +126,3 @@ type Pciter struct { start int done int } - -// RelocVariant is a linker-internal variation on a relocation. -type RelocVariant uint8 - -const ( - RV_NONE RelocVariant = iota - RV_POWER_LO - RV_POWER_HI - RV_POWER_HA - RV_POWER_DS - - // RV_390_DBL is a s390x-specific relocation variant that indicates that - // the value to be placed into the relocatable field should first be - // divided by 2. - RV_390_DBL - - RV_CHECK_OVERFLOW RelocVariant = 1 << 7 - RV_TYPE_MASK RelocVariant = RV_CHECK_OVERFLOW - 1 -) diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index 59c81a60d95..2b38ec00008 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -5,7 +5,9 @@ package ld import ( + "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "sort" "strings" ) @@ -90,6 +92,56 @@ const ( MACHO_FAKE_GOTPCREL = 100 ) +const ( + MH_MAGIC = 0xfeedface + MH_MAGIC_64 = 0xfeedfacf + + MH_OBJECT = 0x1 + MH_EXECUTE = 0x2 + + MH_NOUNDEFS = 0x1 +) + +const ( + LC_SEGMENT = 0x1 + LC_SYMTAB = 0x2 + LC_UNIXTHREAD = 0x5 + LC_DYSYMTAB = 0xb + LC_LOAD_DYLIB = 0xc + LC_ID_DYLIB = 0xd + LC_LOAD_DYLINKER = 0xe + LC_PREBOUND_DYLIB = 0x10 + LC_LOAD_WEAK_DYLIB = 0x18 + LC_SEGMENT_64 = 0x19 + LC_UUID = 0x1b + LC_RPATH = 0x8000001c + LC_CODE_SIGNATURE = 0x1d + LC_SEGMENT_SPLIT_INFO = 0x1e + LC_REEXPORT_DYLIB = 0x8000001f + LC_ENCRYPTION_INFO = 0x21 + LC_DYLD_INFO = 0x22 + LC_DYLD_INFO_ONLY = 0x80000022 + LC_VERSION_MIN_MACOSX = 0x24 + LC_VERSION_MIN_IPHONEOS = 0x25 + LC_FUNCTION_STARTS = 0x26 + LC_MAIN = 0x80000028 + LC_DATA_IN_CODE = 0x29 + LC_SOURCE_VERSION = 0x2A + LC_DYLIB_CODE_SIGN_DRS = 0x2B + LC_ENCRYPTION_INFO_64 = 0x2C +) + +const ( + S_REGULAR = 0x0 + S_ZEROFILL = 0x1 + S_NON_LAZY_SYMBOL_POINTERS = 0x6 + S_SYMBOL_STUBS = 0x8 + S_MOD_INIT_FUNC_POINTERS = 0x9 + S_ATTR_PURE_INSTRUCTIONS = 0x80000000 + S_ATTR_DEBUG = 0x02000000 + S_ATTR_SOME_INSTRUCTIONS = 0x00000400 +) + // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -97,8 +149,6 @@ const ( // Mach-O file writing // http://developer.apple.com/mac/library/DOCUMENTATION/DeveloperTools/Conceptual/MachORuntime/Reference/reference.html -var macho64 bool - var machohdr MachoHdr var load []MachoLoad @@ -120,7 +170,7 @@ const ( var nkind [NumSymKind]int -var sortsym []*Symbol +var sortsym []*sym.Symbol var nsortsym int @@ -130,18 +180,14 @@ var nsortsym int // "big enough" header size. The initial header is // one page, the non-dynamic library stuff takes // up about 1300 bytes; we overestimate that as 2k. -var loadBudget int = INITIAL_MACHO_HEADR - 2*1024 - -func Machoinit() { - macho64 = SysArch.RegSize == 8 -} +var loadBudget = INITIAL_MACHO_HEADR - 2*1024 func getMachoHdr() *MachoHdr { return &machohdr } -func newMachoLoad(type_ uint32, ndata uint32) *MachoLoad { - if macho64 && (ndata&1 != 0) { +func newMachoLoad(arch *sys.Arch, type_ uint32, ndata uint32) *MachoLoad { + if arch.PtrSize == 8 && (ndata&1 != 0) { ndata++ } @@ -184,14 +230,14 @@ var dylib []string var linkoff int64 -func machowrite() int { - o1 := coutbuf.Offset() +func machowrite(arch *sys.Arch, out *OutBuf, linkmode LinkMode) int { + o1 := out.Offset() loadsize := 4 * 4 * ndebug for i := 0; i < len(load); i++ { loadsize += 4 * (len(load[i].data) + 2) } - if macho64 { + if arch.PtrSize == 8 { loadsize += 18 * 4 * nseg loadsize += 20 * 4 * nsect } else { @@ -199,98 +245,98 @@ func machowrite() int { loadsize += 17 * 4 * nsect } - if macho64 { - Thearch.Lput(0xfeedfacf) + if arch.PtrSize == 8 { + out.Write32(MH_MAGIC_64) } else { - Thearch.Lput(0xfeedface) + out.Write32(MH_MAGIC) } - Thearch.Lput(machohdr.cpu) - Thearch.Lput(machohdr.subcpu) - if Linkmode == LinkExternal { - Thearch.Lput(1) /* file type - mach object */ + out.Write32(machohdr.cpu) + out.Write32(machohdr.subcpu) + if linkmode == LinkExternal { + out.Write32(MH_OBJECT) /* file type - mach object */ } else { - Thearch.Lput(2) /* file type - mach executable */ + out.Write32(MH_EXECUTE) /* file type - mach executable */ } - Thearch.Lput(uint32(len(load)) + uint32(nseg) + uint32(ndebug)) - Thearch.Lput(uint32(loadsize)) - Thearch.Lput(1) /* flags - no undefines */ - if macho64 { - Thearch.Lput(0) /* reserved */ + out.Write32(uint32(len(load)) + uint32(nseg) + uint32(ndebug)) + out.Write32(uint32(loadsize)) + if nkind[SymKindUndef] == 0 { + out.Write32(MH_NOUNDEFS) /* flags - no undefines */ + } else { + out.Write32(0) /* flags */ + } + if arch.PtrSize == 8 { + out.Write32(0) /* reserved */ } - var j int - var s *MachoSeg - var t *MachoSect for i := 0; i < nseg; i++ { - s = &seg[i] - if macho64 { - Thearch.Lput(25) /* segment 64 */ - Thearch.Lput(72 + 80*s.nsect) - strnput(s.name, 16) - Thearch.Vput(s.vaddr) - Thearch.Vput(s.vsize) - Thearch.Vput(s.fileoffset) - Thearch.Vput(s.filesize) - Thearch.Lput(s.prot1) - Thearch.Lput(s.prot2) - Thearch.Lput(s.nsect) - Thearch.Lput(s.flag) + s := &seg[i] + if arch.PtrSize == 8 { + out.Write32(LC_SEGMENT_64) + out.Write32(72 + 80*s.nsect) + out.WriteStringN(s.name, 16) + out.Write64(s.vaddr) + out.Write64(s.vsize) + out.Write64(s.fileoffset) + out.Write64(s.filesize) + out.Write32(s.prot1) + out.Write32(s.prot2) + out.Write32(s.nsect) + out.Write32(s.flag) } else { - Thearch.Lput(1) /* segment 32 */ - Thearch.Lput(56 + 68*s.nsect) - strnput(s.name, 16) - Thearch.Lput(uint32(s.vaddr)) - Thearch.Lput(uint32(s.vsize)) - Thearch.Lput(uint32(s.fileoffset)) - Thearch.Lput(uint32(s.filesize)) - Thearch.Lput(s.prot1) - Thearch.Lput(s.prot2) - Thearch.Lput(s.nsect) - Thearch.Lput(s.flag) + out.Write32(LC_SEGMENT) + out.Write32(56 + 68*s.nsect) + out.WriteStringN(s.name, 16) + out.Write32(uint32(s.vaddr)) + out.Write32(uint32(s.vsize)) + out.Write32(uint32(s.fileoffset)) + out.Write32(uint32(s.filesize)) + out.Write32(s.prot1) + out.Write32(s.prot2) + out.Write32(s.nsect) + out.Write32(s.flag) } - for j = 0; uint32(j) < s.nsect; j++ { - t = &s.sect[j] - if macho64 { - strnput(t.name, 16) - strnput(t.segname, 16) - Thearch.Vput(t.addr) - Thearch.Vput(t.size) - Thearch.Lput(t.off) - Thearch.Lput(t.align) - Thearch.Lput(t.reloc) - Thearch.Lput(t.nreloc) - Thearch.Lput(t.flag) - Thearch.Lput(t.res1) /* reserved */ - Thearch.Lput(t.res2) /* reserved */ - Thearch.Lput(0) /* reserved */ + for j := uint32(0); j < s.nsect; j++ { + t := &s.sect[j] + if arch.PtrSize == 8 { + out.WriteStringN(t.name, 16) + out.WriteStringN(t.segname, 16) + out.Write64(t.addr) + out.Write64(t.size) + out.Write32(t.off) + out.Write32(t.align) + out.Write32(t.reloc) + out.Write32(t.nreloc) + out.Write32(t.flag) + out.Write32(t.res1) /* reserved */ + out.Write32(t.res2) /* reserved */ + out.Write32(0) /* reserved */ } else { - strnput(t.name, 16) - strnput(t.segname, 16) - Thearch.Lput(uint32(t.addr)) - Thearch.Lput(uint32(t.size)) - Thearch.Lput(t.off) - Thearch.Lput(t.align) - Thearch.Lput(t.reloc) - Thearch.Lput(t.nreloc) - Thearch.Lput(t.flag) - Thearch.Lput(t.res1) /* reserved */ - Thearch.Lput(t.res2) /* reserved */ + out.WriteStringN(t.name, 16) + out.WriteStringN(t.segname, 16) + out.Write32(uint32(t.addr)) + out.Write32(uint32(t.size)) + out.Write32(t.off) + out.Write32(t.align) + out.Write32(t.reloc) + out.Write32(t.nreloc) + out.Write32(t.flag) + out.Write32(t.res1) /* reserved */ + out.Write32(t.res2) /* reserved */ } } } - var l *MachoLoad for i := 0; i < len(load); i++ { - l = &load[i] - Thearch.Lput(l.type_) - Thearch.Lput(4 * (uint32(len(l.data)) + 2)) - for j = 0; j < len(l.data); j++ { - Thearch.Lput(l.data[j]) + l := &load[i] + out.Write32(l.type_) + out.Write32(4 * (uint32(len(l.data)) + 2)) + for j := 0; j < len(l.data); j++ { + out.Write32(l.data[j]) } } - return int(coutbuf.Offset() - o1) + return int(out.Offset() - o1) } func (ctxt *Link) domacho() { @@ -301,36 +347,41 @@ func (ctxt *Link) domacho() { // empirically, string table must begin with " \x00". s := ctxt.Syms.Lookup(".machosymstr", 0) - s.Type = SMACHOSYMSTR - s.Attr |= AttrReachable - Adduint8(ctxt, s, ' ') - Adduint8(ctxt, s, '\x00') + s.Type = sym.SMACHOSYMSTR + s.Attr |= sym.AttrReachable + s.AddUint8(' ') + s.AddUint8('\x00') s = ctxt.Syms.Lookup(".machosymtab", 0) - s.Type = SMACHOSYMTAB - s.Attr |= AttrReachable + s.Type = sym.SMACHOSYMTAB + s.Attr |= sym.AttrReachable - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { s := ctxt.Syms.Lookup(".plt", 0) // will be __symbol_stub - s.Type = SMACHOPLT - s.Attr |= AttrReachable + s.Type = sym.SMACHOPLT + s.Attr |= sym.AttrReachable s = ctxt.Syms.Lookup(".got", 0) // will be __nl_symbol_ptr - s.Type = SMACHOGOT - s.Attr |= AttrReachable + s.Type = sym.SMACHOGOT + s.Attr |= sym.AttrReachable s.Align = 4 s = ctxt.Syms.Lookup(".linkedit.plt", 0) // indirect table for .plt - s.Type = SMACHOINDIRECTPLT - s.Attr |= AttrReachable + s.Type = sym.SMACHOINDIRECTPLT + s.Attr |= sym.AttrReachable s = ctxt.Syms.Lookup(".linkedit.got", 0) // indirect table for .got - s.Type = SMACHOINDIRECTGOT - s.Attr |= AttrReachable + s.Type = sym.SMACHOINDIRECTGOT + s.Attr |= sym.AttrReachable } } -func Machoadddynlib(lib string) { +func machoadddynlib(lib string, linkmode LinkMode) { + if seenlib[lib] || linkmode == LinkExternal { + return + } + seenlib[lib] = true + // Will need to store the library name rounded up // and 24 bytes of header metadata. If not enough // space, grab another page of initial space at the @@ -346,13 +397,13 @@ func Machoadddynlib(lib string) { dylib = append(dylib, lib) } -func machoshbits(ctxt *Link, mseg *MachoSeg, sect *Section, segname string) { +func machoshbits(ctxt *Link, mseg *MachoSeg, sect *sym.Section, segname string) { buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1) var msect *MachoSect - if sect.Rwx&1 == 0 && segname != "__DWARF" && (SysArch.Family == sys.ARM64 || - (SysArch.Family == sys.AMD64 && (Buildmode == BuildmodeCShared || Buildmode == BuildmodeCArchive || Buildmode == BuildmodePlugin)) || - (SysArch.Family == sys.ARM && (Buildmode == BuildmodeCShared || Buildmode == BuildmodeCArchive || Buildmode == BuildmodePlugin))) { + if sect.Rwx&1 == 0 && segname != "__DWARF" && (ctxt.Arch.Family == sys.ARM64 || + (ctxt.Arch.Family == sys.AMD64 && ctxt.BuildMode != BuildModeExe) || + (ctxt.Arch.Family == sys.ARM && ctxt.BuildMode != BuildModeExe)) { // Darwin external linker on arm64 and on amd64 and arm in c-shared/c-archive buildmode // complains about absolute relocs in __TEXT, so if the section is not // executable, put it in __DATA segment. @@ -379,36 +430,34 @@ func machoshbits(ctxt *Link, mseg *MachoSeg, sect *Section, segname string) { } msect.off = uint32(sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr) } else { - // zero fill msect.off = 0 - - msect.flag |= 1 + msect.flag |= S_ZEROFILL } if sect.Rwx&1 != 0 { - msect.flag |= 0x400 /* has instructions */ + msect.flag |= S_ATTR_SOME_INSTRUCTIONS } if sect.Name == ".plt" { msect.name = "__symbol_stub1" - msect.flag = 0x80000408 /* only instructions, code, symbol stubs */ - msect.res1 = 0 //nkind[SymKindLocal]; + msect.flag = S_ATTR_PURE_INSTRUCTIONS | S_ATTR_SOME_INSTRUCTIONS | S_SYMBOL_STUBS + msect.res1 = 0 //nkind[SymKindLocal]; msect.res2 = 6 } if sect.Name == ".got" { msect.name = "__nl_symbol_ptr" - msect.flag = 6 /* section with nonlazy symbol pointers */ + msect.flag = S_NON_LAZY_SYMBOL_POINTERS msect.res1 = uint32(ctxt.Syms.Lookup(".linkedit.plt", 0).Size / 4) /* offset into indirect symbol table */ } if sect.Name == ".init_array" { msect.name = "__mod_init_func" - msect.flag = 9 // S_MOD_INIT_FUNC_POINTERS + msect.flag = S_MOD_INIT_FUNC_POINTERS } if segname == "__DWARF" { - msect.flag |= 0x02000000 + msect.flag |= S_ATTR_DEBUG } } @@ -417,9 +466,9 @@ func Asmbmacho(ctxt *Link) { va := *FlagTextAddr - int64(HEADR) mh := getMachoHdr() - switch SysArch.Family { + switch ctxt.Arch.Family { default: - Exitf("unknown macho architecture: %v", SysArch.Family) + Exitf("unknown macho architecture: %v", ctxt.Arch.Family) case sys.ARM: mh.cpu = MACHO_CPU_ARM @@ -439,12 +488,12 @@ func Asmbmacho(ctxt *Link) { } var ms *MachoSeg - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { /* segment for entire file */ ms = newMachoSeg("", 40) ms.fileoffset = Segtext.Fileoff - if SysArch.Family == sys.ARM || Buildmode == BuildmodeCArchive { + if ctxt.Arch.Family == sys.ARM || ctxt.BuildMode == BuildModeCArchive { ms.filesize = Segdata.Fileoff + Segdata.Filelen - Segtext.Fileoff } else { ms.filesize = Segdwarf.Fileoff + Segdwarf.Filelen - Segtext.Fileoff @@ -453,7 +502,7 @@ func Asmbmacho(ctxt *Link) { } /* segment for zero page */ - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { ms = newMachoSeg("__PAGEZERO", 0) ms.vsize = uint64(va) } @@ -461,7 +510,7 @@ func Asmbmacho(ctxt *Link) { /* text */ v := Rnd(int64(uint64(HEADR)+Segtext.Length), int64(*FlagRound)) - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { ms = newMachoSeg("__TEXT", 20) ms.vaddr = uint64(va) ms.vsize = uint64(v) @@ -476,7 +525,7 @@ func Asmbmacho(ctxt *Link) { } /* data */ - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { w := int64(Segdata.Length) ms = newMachoSeg("__DATA", 20) ms.vaddr = uint64(va) + uint64(v) @@ -493,7 +542,7 @@ func Asmbmacho(ctxt *Link) { /* dwarf */ if !*FlagW { - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { ms = newMachoSeg("__DWARF", 20) ms.vaddr = Segdwarf.Vaddr ms.vsize = 0 @@ -505,33 +554,33 @@ func Asmbmacho(ctxt *Link) { } } - if Linkmode != LinkExternal { - switch SysArch.Family { + if ctxt.LinkMode != LinkExternal { + switch ctxt.Arch.Family { default: - Exitf("unknown macho architecture: %v", SysArch.Family) + Exitf("unknown macho architecture: %v", ctxt.Arch.Family) case sys.ARM: - ml := newMachoLoad(5, 17+2) /* unix thread */ + ml := newMachoLoad(ctxt.Arch, LC_UNIXTHREAD, 17+2) ml.data[0] = 1 /* thread type */ ml.data[1] = 17 /* word count */ ml.data[2+15] = uint32(Entryvalue(ctxt)) /* start pc */ case sys.AMD64: - ml := newMachoLoad(5, 42+2) /* unix thread */ + ml := newMachoLoad(ctxt.Arch, LC_UNIXTHREAD, 42+2) ml.data[0] = 4 /* thread type */ ml.data[1] = 42 /* word count */ ml.data[2+32] = uint32(Entryvalue(ctxt)) /* start pc */ ml.data[2+32+1] = uint32(Entryvalue(ctxt) >> 32) case sys.ARM64: - ml := newMachoLoad(5, 68+2) /* unix thread */ + ml := newMachoLoad(ctxt.Arch, LC_UNIXTHREAD, 68+2) ml.data[0] = 6 /* thread type */ ml.data[1] = 68 /* word count */ ml.data[2+64] = uint32(Entryvalue(ctxt)) /* start pc */ ml.data[2+64+1] = uint32(Entryvalue(ctxt) >> 32) case sys.I386: - ml := newMachoLoad(5, 16+2) /* unix thread */ + ml := newMachoLoad(ctxt.Arch, LC_UNIXTHREAD, 16+2) ml.data[0] = 1 /* thread type */ ml.data[1] = 16 /* word count */ ml.data[2+10] = uint32(Entryvalue(ctxt)) /* start pc */ @@ -545,7 +594,7 @@ func Asmbmacho(ctxt *Link) { s3 := ctxt.Syms.Lookup(".linkedit.got", 0) s4 := ctxt.Syms.Lookup(".machosymstr", 0) - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { ms := newMachoSeg("__LINKEDIT", 0) ms.vaddr = uint64(va) + uint64(v) + uint64(Rnd(int64(Segdata.Length), int64(*FlagRound))) ms.vsize = uint64(s1.Size) + uint64(s2.Size) + uint64(s3.Size) + uint64(s4.Size) @@ -555,7 +604,7 @@ func Asmbmacho(ctxt *Link) { ms.prot2 = 3 } - ml := newMachoLoad(2, 4) /* LC_SYMTAB */ + ml := newMachoLoad(ctxt.Arch, LC_SYMTAB, 4) ml.data[0] = uint32(linkoff) /* symoff */ ml.data[1] = uint32(nsortsym) /* nsyms */ ml.data[2] = uint32(linkoff + s1.Size + s2.Size + s3.Size) /* stroff */ @@ -563,23 +612,23 @@ func Asmbmacho(ctxt *Link) { machodysymtab(ctxt) - if Linkmode != LinkExternal { - ml := newMachoLoad(14, 6) /* LC_LOAD_DYLINKER */ - ml.data[0] = 12 /* offset to string */ + if ctxt.LinkMode != LinkExternal { + ml := newMachoLoad(ctxt.Arch, LC_LOAD_DYLINKER, 6) + ml.data[0] = 12 /* offset to string */ stringtouint32(ml.data[1:], "/usr/lib/dyld") for i := 0; i < len(dylib); i++ { - ml = newMachoLoad(12, 4+(uint32(len(dylib[i]))+1+7)/8*2) /* LC_LOAD_DYLIB */ - ml.data[0] = 24 /* offset of string from beginning of load */ - ml.data[1] = 0 /* time stamp */ - ml.data[2] = 0 /* version */ - ml.data[3] = 0 /* compatibility version */ + ml = newMachoLoad(ctxt.Arch, LC_LOAD_DYLIB, 4+(uint32(len(dylib[i]))+1+7)/8*2) + ml.data[0] = 24 /* offset of string from beginning of load */ + ml.data[1] = 0 /* time stamp */ + ml.data[2] = 0 /* version */ + ml.data[3] = 0 /* compatibility version */ stringtouint32(ml.data[4:], dylib[i]) } } } - if Linkmode == LinkInternal { + if ctxt.LinkMode == LinkInternal { // For lldb, must say LC_VERSION_MIN_MACOSX or else // it won't know that this Mach-O binary is from OS X // (could be iOS or WatchOS instead). @@ -588,21 +637,19 @@ func Asmbmacho(ctxt *Link) { // and we can assume OS X. // // See golang.org/issues/12941. - const LC_VERSION_MIN_MACOSX = 0x24 - - ml := newMachoLoad(LC_VERSION_MIN_MACOSX, 2) + ml := newMachoLoad(ctxt.Arch, LC_VERSION_MIN_MACOSX, 2) ml.data[0] = 10<<16 | 7<<8 | 0<<0 // OS X version 10.7.0 ml.data[1] = 10<<16 | 7<<8 | 0<<0 // SDK 10.7.0 } - a := machowrite() + a := machowrite(ctxt.Arch, ctxt.Out, ctxt.LinkMode) if int32(a) > HEADR { Exitf("HEADR too small: %d > %d", a, HEADR) } } -func symkind(s *Symbol) int { - if s.Type == SDYNIMPORT { +func symkind(s *sym.Symbol) int { + if s.Type == sym.SDYNIMPORT { return SymKindUndef } if s.Attr.CgoExport() { @@ -611,7 +658,7 @@ func symkind(s *Symbol) int { return SymKindLocal } -func addsym(ctxt *Link, s *Symbol, name string, type_ SymbolType, addr int64, gotype *Symbol) { +func addsym(ctxt *Link, s *sym.Symbol, name string, type_ SymbolType, addr int64, gotype *sym.Symbol) { if s == nil { return } @@ -632,7 +679,7 @@ func addsym(ctxt *Link, s *Symbol, name string, type_ SymbolType, addr int64, go nsortsym++ } -type machoscmp []*Symbol +type machoscmp []*sym.Symbol func (x machoscmp) Len() int { return len(x) @@ -658,7 +705,7 @@ func (x machoscmp) Less(i, j int) bool { func machogenasmsym(ctxt *Link) { genasmsym(ctxt, addsym) for _, s := range ctxt.Syms.Allsym { - if s.Type == SDYNIMPORT || s.Type == SHOSTOBJ { + if s.Type == sym.SDYNIMPORT || s.Type == sym.SHOSTOBJ { if s.Attr.Reachable() { addsym(ctxt, s, "", DataSym, 0, nil) } @@ -671,10 +718,10 @@ func machosymorder(ctxt *Link) { // So we sort them here and pre-allocate dynid for them // See https://golang.org/issue/4029 for i := 0; i < len(dynexp); i++ { - dynexp[i].Attr |= AttrReachable + dynexp[i].Attr |= sym.AttrReachable } machogenasmsym(ctxt) - sortsym = make([]*Symbol, nsortsym) + sortsym = make([]*sym.Symbol, nsortsym) nsortsym = 0 machogenasmsym(ctxt) sort.Sort(machoscmp(sortsym[:nsortsym])) @@ -687,11 +734,14 @@ func machosymorder(ctxt *Link) { // // When dynamically linking, all non-local variables and plugin-exported // symbols need to be exported. -func machoShouldExport(ctxt *Link, s *Symbol) bool { +func machoShouldExport(ctxt *Link, s *sym.Symbol) bool { if !ctxt.DynlinkingGo() || s.Attr.Local() { return false } - if Buildmode == BuildmodePlugin && strings.HasPrefix(s.Extname, *flagPluginPath) { + if ctxt.BuildMode == BuildModePlugin && strings.HasPrefix(s.Extname, objabi.PathToPrefix(*flagPluginPath)) { + return true + } + if strings.HasPrefix(s.Name, "go.itab.") { return true } if strings.HasPrefix(s.Name, "type.") && !strings.HasPrefix(s.Name, "type..") { @@ -703,7 +753,7 @@ func machoShouldExport(ctxt *Link, s *Symbol) bool { if strings.HasPrefix(s.Name, "go.link.pkghash") { return true } - return s.Type >= SELFSECT // only writable sections + return s.Type >= sym.SELFSECT // only writable sections } func machosymtab(ctxt *Link) { @@ -712,7 +762,7 @@ func machosymtab(ctxt *Link) { for i := 0; i < nsortsym; i++ { s := sortsym[i] - Adduint32(ctxt, symtab, uint32(symstr.Size)) + symtab.AddUint32(ctxt.Arch, uint32(symstr.Size)) export := machoShouldExport(ctxt, s) @@ -723,24 +773,24 @@ func machosymtab(ctxt *Link) { // symbols like crosscall2 are in pclntab and end up // pointing at the host binary, breaking unwinding. // See Issue #18190. - cexport := !strings.Contains(s.Extname, ".") && (Buildmode != BuildmodePlugin || onlycsymbol(s)) + cexport := !strings.Contains(s.Extname, ".") && (ctxt.BuildMode != BuildModePlugin || onlycsymbol(s)) if cexport || export { - Adduint8(ctxt, symstr, '_') + symstr.AddUint8('_') } // replace "·" as ".", because DTrace cannot handle it. Addstring(symstr, strings.Replace(s.Extname, "·", ".", -1)) - if s.Type == SDYNIMPORT || s.Type == SHOSTOBJ { - Adduint8(ctxt, symtab, 0x01) // type N_EXT, external symbol - Adduint8(ctxt, symtab, 0) // no section - Adduint16(ctxt, symtab, 0) // desc - adduintxx(ctxt, symtab, 0, SysArch.PtrSize) // no value + if s.Type == sym.SDYNIMPORT || s.Type == sym.SHOSTOBJ { + symtab.AddUint8(0x01) // type N_EXT, external symbol + symtab.AddUint8(0) // no section + symtab.AddUint16(ctxt.Arch, 0) // desc + symtab.AddUintXX(ctxt.Arch, 0, ctxt.Arch.PtrSize) // no value } else { if s.Attr.CgoExport() || export { - Adduint8(ctxt, symtab, 0x0f) + symtab.AddUint8(0x0f) } else { - Adduint8(ctxt, symtab, 0x0e) + symtab.AddUint8(0x0e) } o := s for o.Outer != nil { @@ -748,18 +798,18 @@ func machosymtab(ctxt *Link) { } if o.Sect == nil { Errorf(s, "missing section for symbol") - Adduint8(ctxt, symtab, 0) + symtab.AddUint8(0) } else { - Adduint8(ctxt, symtab, uint8(o.Sect.Extnum)) + symtab.AddUint8(uint8(o.Sect.Extnum)) } - Adduint16(ctxt, symtab, 0) // desc - adduintxx(ctxt, symtab, uint64(Symaddr(s)), SysArch.PtrSize) + symtab.AddUint16(ctxt.Arch, 0) // desc + symtab.AddUintXX(ctxt.Arch, uint64(Symaddr(s)), ctxt.Arch.PtrSize) } } } func machodysymtab(ctxt *Link) { - ml := newMachoLoad(11, 18) /* LC_DYSYMTAB */ + ml := newMachoLoad(ctxt.Arch, LC_DYSYMTAB, 18) n := 0 ml.data[0] = uint32(n) /* ilocalsym */ @@ -822,31 +872,31 @@ func Domacholink(ctxt *Link) int64 { // any alignment padding itself, working around the // issue. for s4.Size%16 != 0 { - Adduint8(ctxt, s4, 0) + s4.AddUint8(0) } size := int(s1.Size + s2.Size + s3.Size + s4.Size) if size > 0 { linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(*FlagRound)) + Rnd(int64(Segdata.Filelen), int64(*FlagRound)) + Rnd(int64(Segdwarf.Filelen), int64(*FlagRound)) - Cseek(linkoff) + ctxt.Out.SeekSet(linkoff) - Cwrite(s1.P[:s1.Size]) - Cwrite(s2.P[:s2.Size]) - Cwrite(s3.P[:s3.Size]) - Cwrite(s4.P[:s4.Size]) + ctxt.Out.Write(s1.P[:s1.Size]) + ctxt.Out.Write(s2.P[:s2.Size]) + ctxt.Out.Write(s3.P[:s3.Size]) + ctxt.Out.Write(s4.P[:s4.Size]) } return Rnd(int64(size), int64(*FlagRound)) } -func machorelocsect(ctxt *Link, sect *Section, syms []*Symbol) { +func machorelocsect(ctxt *Link, sect *sym.Section, syms []*sym.Symbol) { // If main section has no bits, nothing to relocate. if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { return } - sect.Reloff = uint64(coutbuf.Offset()) + sect.Reloff = uint64(ctxt.Out.Offset()) for i, s := range syms { if !s.Attr.Reachable() { continue @@ -858,37 +908,37 @@ func machorelocsect(ctxt *Link, sect *Section, syms []*Symbol) { } eaddr := int32(sect.Vaddr + sect.Length) - for _, sym := range syms { - if !sym.Attr.Reachable() { + for _, s := range syms { + if !s.Attr.Reachable() { continue } - if sym.Value >= int64(eaddr) { + if s.Value >= int64(eaddr) { break } - for ri := 0; ri < len(sym.R); ri++ { - r := &sym.R[ri] - if r.Done != 0 { + for ri := 0; ri < len(s.R); ri++ { + r := &s.R[ri] + if r.Done { continue } if r.Xsym == nil { - Errorf(sym, "missing xsym in relocation") + Errorf(s, "missing xsym in relocation") continue } if !r.Xsym.Attr.Reachable() { - Errorf(sym, "unreachable reloc %v target %v", r.Type, r.Xsym.Name) + Errorf(s, "unreachable reloc %d (%s) target %v", r.Type, sym.RelocName(ctxt.Arch, r.Type), r.Xsym.Name) } - if Thearch.Machoreloc1(sym, r, int64(uint64(sym.Value+int64(r.Off))-sect.Vaddr)) < 0 { - Errorf(sym, "unsupported obj reloc %v/%d to %s", r.Type, r.Siz, r.Sym.Name) + if !Thearch.Machoreloc1(ctxt.Arch, ctxt.Out, s, r, int64(uint64(s.Value+int64(r.Off))-sect.Vaddr)) { + Errorf(s, "unsupported obj reloc %d (%s)/%d to %s", r.Type, sym.RelocName(ctxt.Arch, r.Type), r.Siz, r.Sym.Name) } } } - sect.Rellen = uint64(coutbuf.Offset()) - sect.Reloff + sect.Rellen = uint64(ctxt.Out.Offset()) - sect.Reloff } func Machoemitreloc(ctxt *Link) { - for coutbuf.Offset()&7 != 0 { - Cput(0) + for ctxt.Out.Offset()&7 != 0 { + ctxt.Out.Write8(0) } machorelocsect(ctxt, Segtext.Sections[0], ctxt.Textp) diff --git a/src/cmd/link/internal/ld/macho_combine_dwarf.go b/src/cmd/link/internal/ld/macho_combine_dwarf.go index 8c6c4a86acf..17a484ce8f1 100644 --- a/src/cmd/link/internal/ld/macho_combine_dwarf.go +++ b/src/cmd/link/internal/ld/macho_combine_dwarf.go @@ -17,31 +17,10 @@ import ( var realdwarf, linkseg *macho.Segment var dwarfstart, linkstart int64 -var dwarfaddr, linkaddr int64 +var dwarfaddr int64 var linkoffset uint32 const ( - LC_ID_DYLIB = 0xd - LC_LOAD_DYLINKER = 0xe - LC_PREBOUND_DYLIB = 0x10 - LC_LOAD_WEAK_DYLIB = 0x18 - LC_UUID = 0x1b - LC_RPATH = 0x8000001c - LC_CODE_SIGNATURE = 0x1d - LC_SEGMENT_SPLIT_INFO = 0x1e - LC_REEXPORT_DYLIB = 0x8000001f - LC_ENCRYPTION_INFO = 0x21 - LC_DYLD_INFO = 0x22 - LC_DYLD_INFO_ONLY = 0x80000022 - LC_VERSION_MIN_MACOSX = 0x24 - LC_VERSION_MIN_IPHONEOS = 0x25 - LC_FUNCTION_STARTS = 0x26 - LC_MAIN = 0x80000028 - LC_DATA_IN_CODE = 0x29 - LC_SOURCE_VERSION = 0x2A - LC_DYLIB_CODE_SIGN_DRS = 0x2B - LC_ENCRYPTION_INFO_64 = 0x2C - pageAlign = 12 // 4096 = 1 << 12 ) @@ -112,7 +91,7 @@ func (r loadCmdReader) WriteAt(offset int64, data interface{}) error { // header to add the DWARF sections. (Use ld's -headerpad option) // dsym is the path to the macho file containing DWARF from dsymutil. // outexe is the path where the combined executable should be saved. -func machoCombineDwarf(inexe, dsym, outexe string) error { +func machoCombineDwarf(inexe, dsym, outexe string, buildmode BuildMode) error { exef, err := os.Open(inexe) if err != nil { return err @@ -251,7 +230,7 @@ func machoCombineDwarf(inexe, dsym, outexe string) error { return err } } - return machoUpdateDwarfHeader(&reader) + return machoUpdateDwarfHeader(&reader, buildmode) } // machoUpdateSegment updates the load command for a moved segment. @@ -312,7 +291,7 @@ func machoUpdateSections(r loadCmdReader, seg, sect reflect.Value, deltaOffset, } // machoUpdateDwarfHeader updates the DWARF segment load command. -func machoUpdateDwarfHeader(r *loadCmdReader) error { +func machoUpdateDwarfHeader(r *loadCmdReader, buildmode BuildMode) error { var seg, sect interface{} cmd, err := r.Next() if err != nil { @@ -342,7 +321,7 @@ func machoUpdateDwarfHeader(r *loadCmdReader) error { // We don't need the DWARF information actually available in memory. // But if we do this for buildmode=c-shared then the user-space // dynamic loader complains about memsz < filesz. Sigh. - if Buildmode != BuildmodeCShared { + if buildmode != BuildModeCShared { segv.FieldByName("Addr").SetUint(0) segv.FieldByName("Memsz").SetUint(0) deltaAddr = 0 diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index f03460d2b45..42e1ef7f472 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -48,8 +48,6 @@ var ( ) func init() { - flag.Var(&Linkmode, "linkmode", "set link `mode`") - flag.Var(&Buildmode, "buildmode", "set build `mode`") flag.Var(&rpath, "r", "set the ELF dynamic linker search `path` to dir1:dir2:...") } @@ -59,7 +57,6 @@ var ( flagOutfile = flag.String("o", "", "write output to `file`") flagPluginPath = flag.String("pluginpath", "", "full path name for plugin") - FlagLinkshared = flag.Bool("linkshared", false, "link against installed Go shared libraries") flagInstallSuffix = flag.String("installsuffix", "", "set package directory `suffix`") flagDumpDep = flag.Bool("dumpdep", false, "dump symbol dependency graph") @@ -88,7 +85,6 @@ var ( flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") FlagDebugTramp = flag.Int("debugtramp", 0, "debug trampolines") - flagHeadtype = flag.String("H", "", "set header `type`") FlagRound = flag.Int("R", -1, "set address rounding `quantum`") FlagTextAddr = flag.Int64("T", -1, "set text segment `address`") FlagDataAddr = flag.Int64("D", -1, "set data segment `address`") @@ -100,8 +96,9 @@ var ( ) // Main is the main entry point for the linker code. -func Main() { - ctxt := linknew(SysArch) +func Main(arch *sys.Arch, theArch Arch) { + Thearch = theArch + ctxt := linknew(arch) ctxt.Bso = bufio.NewWriter(os.Stdout) // For testing behavior of go command when tools crash silently. @@ -114,42 +111,46 @@ func Main() { } // TODO(matloob): define these above and then check flag values here - if SysArch.Family == sys.AMD64 && objabi.GOOS == "plan9" { + if ctxt.Arch.Family == sys.AMD64 && objabi.GOOS == "plan9" { flag.BoolVar(&Flag8, "8", false, "use 64-bit addresses in symbol table") } + flagHeadType := flag.String("H", "", "set header `type`") + flag.BoolVar(&ctxt.linkShared, "linkshared", false, "link against installed Go shared libraries") + flag.Var(&ctxt.LinkMode, "linkmode", "set link `mode`") + flag.Var(&ctxt.BuildMode, "buildmode", "set build `mode`") objabi.Flagfn1("B", "add an ELF NT_GNU_BUILD_ID `note` when using ELF", addbuildinfo) objabi.Flagfn1("L", "add specified `directory` to library path", func(a string) { Lflag(ctxt, a) }) - objabi.Flagfn0("V", "print version and exit", doversion) + objabi.AddVersionFlag() // -V objabi.Flagfn1("X", "add string value `definition` of the form importpath.name=value", func(s string) { addstrdata1(ctxt, s) }) objabi.Flagcount("v", "print link trace", &ctxt.Debugvlog) objabi.Flagfn1("importcfg", "read import configuration from `file`", ctxt.readImportCfg) objabi.Flagparse(usage) - switch *flagHeadtype { + switch *flagHeadType { case "": case "windowsgui": - Headtype = objabi.Hwindows + ctxt.HeadType = objabi.Hwindows windowsgui = true default: - if err := Headtype.Set(*flagHeadtype); err != nil { + if err := ctxt.HeadType.Set(*flagHeadType); err != nil { Errorf(nil, "%v", err) usage() } } startProfile() - if Buildmode == BuildmodeUnset { - Buildmode = BuildmodeExe + if ctxt.BuildMode == BuildModeUnset { + ctxt.BuildMode = BuildModeExe } - if Buildmode != BuildmodeShared && flag.NArg() != 1 { + if ctxt.BuildMode != BuildModeShared && flag.NArg() != 1 { usage() } if *flagOutfile == "" { *flagOutfile = "a.out" - if Headtype == objabi.Hwindows { + if ctxt.HeadType == objabi.Hwindows { *flagOutfile += ".exe" } } @@ -158,23 +159,23 @@ func Main() { libinit(ctxt) // creates outfile - if Headtype == objabi.Hunknown { - Headtype.Set(objabi.GOOS) + if ctxt.HeadType == objabi.Hunknown { + ctxt.HeadType.Set(objabi.GOOS) } ctxt.computeTLSOffset() Thearch.Archinit(ctxt) - if *FlagLinkshared && !Iself { + if ctxt.linkShared && !ctxt.IsELF { Exitf("-linkshared can only be used on elf systems") } if ctxt.Debugvlog != 0 { - ctxt.Logf("HEADER = -H%d -T0x%x -D0x%x -R0x%x\n", Headtype, uint64(*FlagTextAddr), uint64(*FlagDataAddr), uint32(*FlagRound)) + ctxt.Logf("HEADER = -H%d -T0x%x -D0x%x -R0x%x\n", ctxt.HeadType, uint64(*FlagTextAddr), uint64(*FlagDataAddr), uint32(*FlagRound)) } - switch Buildmode { - case BuildmodeShared: + switch ctxt.BuildMode { + case BuildModeShared: for i := 0; i < flag.NArg(); i++ { arg := flag.Arg(i) parts := strings.SplitN(arg, "=", 2) @@ -188,7 +189,7 @@ func Main() { pkglistfornote = append(pkglistfornote, '\n') addlibpath(ctxt, "command line", "command line", file, pkgpath, "") } - case BuildmodePlugin: + case BuildModePlugin: addlibpath(ctxt, "command line", "command line", flag.Arg(0), *flagPluginPath, "") default: addlibpath(ctxt, "command line", "command line", flag.Arg(0), "main", "") @@ -201,11 +202,11 @@ func Main() { ctxt.callgraph() ctxt.doelf() - if Headtype == objabi.Hdarwin { + if ctxt.HeadType == objabi.Hdarwin { ctxt.domacho() } ctxt.dostkcheck() - if Headtype == objabi.Hwindows { + if ctxt.HeadType == objabi.Hwindows { ctxt.dope() } ctxt.addexport() diff --git a/src/cmd/link/internal/ld/nooptcgolink_test.go b/src/cmd/link/internal/ld/nooptcgolink_test.go index 1df29652b20..e019a39bf73 100644 --- a/src/cmd/link/internal/ld/nooptcgolink_test.go +++ b/src/cmd/link/internal/ld/nooptcgolink_test.go @@ -22,7 +22,7 @@ func TestNooptCgoBuild(t *testing.T) { t.Fatal(err) } defer os.RemoveAll(dir) - cmd := exec.Command("go", "build", "-gcflags=-N -l", "-o", filepath.Join(dir, "a.out")) + cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags=-N -l", "-o", filepath.Join(dir, "a.out")) cmd.Dir = filepath.Join(runtime.GOROOT(), "src", "runtime", "testdata", "testprogcgo") out, err := cmd.CombinedOutput() if err != nil { diff --git a/src/cmd/link/internal/ld/outbuf.go b/src/cmd/link/internal/ld/outbuf.go new file mode 100644 index 00000000000..580435ad04f --- /dev/null +++ b/src/cmd/link/internal/ld/outbuf.go @@ -0,0 +1,120 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ld + +import ( + "bufio" + "cmd/internal/sys" + "encoding/binary" + "os" +) + +// OutBuf is a buffered file writer. +// +// It is simlar to the Writer in cmd/internal/bio with a few small differences. +// +// First, it tracks the output architecture and uses it to provide +// endian helpers. +// +// Second, it provides a very cheap offset counter that doesn't require +// any system calls to read the value. +type OutBuf struct { + arch *sys.Arch + off int64 + w *bufio.Writer + f *os.File + encbuf [8]byte // temp buffer used by WriteN methods +} + +func (out *OutBuf) SeekSet(p int64) { + if p == out.off { + return + } + out.Flush() + if _, err := out.f.Seek(p, 0); err != nil { + Exitf("seeking to %d in %s: %v", p, out.f.Name(), err) + } + out.off = p +} + +func (out *OutBuf) Offset() int64 { + return out.off +} + +// Write writes the contents of v to the buffer. +// +// As Write is backed by a bufio.Writer, callers do not have +// to explicitly handle the returned error as long as Flush is +// eventually called. +func (out *OutBuf) Write(v []byte) (int, error) { + n, err := out.w.Write(v) + out.off += int64(n) + return n, err +} + +func (out *OutBuf) Write8(v uint8) { + if err := out.w.WriteByte(v); err == nil { + out.off++ + } +} + +func (out *OutBuf) Write16(v uint16) { + out.arch.ByteOrder.PutUint16(out.encbuf[:], v) + out.Write(out.encbuf[:2]) +} + +func (out *OutBuf) Write32(v uint32) { + out.arch.ByteOrder.PutUint32(out.encbuf[:], v) + out.Write(out.encbuf[:4]) +} + +func (out *OutBuf) Write32b(v uint32) { + binary.BigEndian.PutUint32(out.encbuf[:], v) + out.Write(out.encbuf[:4]) +} + +func (out *OutBuf) Write64(v uint64) { + out.arch.ByteOrder.PutUint64(out.encbuf[:], v) + out.Write(out.encbuf[:8]) +} + +func (out *OutBuf) Write64b(v uint64) { + binary.BigEndian.PutUint64(out.encbuf[:], v) + out.Write(out.encbuf[:8]) +} + +func (out *OutBuf) WriteString(s string) { + n, _ := out.w.WriteString(s) + out.off += int64(n) +} + +// WriteStringN writes the first n bytes of s. +// If n is larger than len(s) then it is padded with zero bytes. +func (out *OutBuf) WriteStringN(s string, n int) { + out.WriteStringPad(s, n, zeros[:]) +} + +// WriteStringPad writes the first n bytes of s. +// If n is larger than len(s) then it is padded with the bytes in pad (repeated as needed). +func (out *OutBuf) WriteStringPad(s string, n int, pad []byte) { + if len(s) >= n { + out.WriteString(s[:n]) + } else { + out.WriteString(s) + n -= len(s) + for n > len(pad) { + out.Write(pad) + n -= len(pad) + + } + out.Write(pad[:n]) + } +} + +func (out *OutBuf) Flush() { + if err := out.w.Flush(); err != nil { + Exitf("flushing %s: %v", out.f.Name(), err) + } +} diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 1f6aed3f712..b954c05c814 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -7,6 +7,7 @@ package ld import ( "cmd/internal/objabi" "cmd/internal/src" + "cmd/link/internal/sym" "log" "os" "path/filepath" @@ -58,7 +59,7 @@ func pciternext(it *Pciter) { it.nextpc = it.pc + v*it.pcscale } -func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) { +func pciterinit(ctxt *Link, it *Pciter, d *sym.Pcdata) { it.d = *d it.p = it.d.P it.pc = 0 @@ -70,7 +71,7 @@ func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) { pciternext(it) } -func addvarint(d *Pcdata, val uint32) { +func addvarint(d *sym.Pcdata, val uint32) { n := int32(0) for v := val; v >= 0x80; v >>= 7 { n++ @@ -92,36 +93,36 @@ func addvarint(d *Pcdata, val uint32) { p[0] = byte(v) } -func addpctab(ctxt *Link, ftab *Symbol, off int32, d *Pcdata) int32 { +func addpctab(ctxt *Link, ftab *sym.Symbol, off int32, d *sym.Pcdata) int32 { var start int32 if len(d.P) > 0 { start = int32(len(ftab.P)) - Addbytes(ftab, d.P) + ftab.AddBytes(d.P) } - return int32(setuint32(ctxt, ftab, int64(off), uint32(start))) + return int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(start))) } -func ftabaddstring(ctxt *Link, ftab *Symbol, s string) int32 { +func ftabaddstring(ctxt *Link, ftab *sym.Symbol, s string) int32 { n := int32(len(s)) + 1 start := int32(len(ftab.P)) - Symgrow(ftab, int64(start)+int64(n)+1) + ftab.Grow(int64(start) + int64(n) + 1) copy(ftab.P[start:], s) return start } // numberfile assigns a file number to the file if it hasn't been assigned already. -func numberfile(ctxt *Link, file *Symbol) { - if file.Type != SFILEPATH { +func numberfile(ctxt *Link, file *sym.Symbol) { + if file.Type != sym.SFILEPATH { ctxt.Filesyms = append(ctxt.Filesyms, file) file.Value = int64(len(ctxt.Filesyms)) - file.Type = SFILEPATH + file.Type = sym.SFILEPATH path := file.Name[len(src.FileSymPrefix):] file.Name = expandGoroot(path) } } -func renumberfiles(ctxt *Link, files []*Symbol, d *Pcdata) { - var f *Symbol +func renumberfiles(ctxt *Link, files []*sym.Symbol, d *sym.Pcdata) { + var f *sym.Symbol // Give files numbers. for i := 0; i < len(files); i++ { @@ -130,7 +131,7 @@ func renumberfiles(ctxt *Link, files []*Symbol, d *Pcdata) { } newval := int32(-1) - var out Pcdata + var out sym.Pcdata var it Pciter for pciterinit(ctxt, &it, d); it.done == 0; pciternext(&it) { // value delta @@ -163,7 +164,7 @@ func renumberfiles(ctxt *Link, files []*Symbol, d *Pcdata) { // onlycsymbol reports whether this is a cgo symbol provided by the // runtime and only used from C code. -func onlycsymbol(s *Symbol) bool { +func onlycsymbol(s *sym.Symbol) bool { switch s.Name { case "_cgo_topofstack", "_cgo_panic", "crosscall2": return true @@ -171,38 +172,38 @@ func onlycsymbol(s *Symbol) bool { return false } -func container(s *Symbol) int { +func emitPcln(ctxt *Link, s *sym.Symbol) bool { if s == nil { - return 0 + return true } - if Buildmode == BuildmodePlugin && Headtype == objabi.Hdarwin && onlycsymbol(s) { - return 1 + if ctxt.BuildMode == BuildModePlugin && ctxt.HeadType == objabi.Hdarwin && onlycsymbol(s) { + return false } // We want to generate func table entries only for the "lowest level" symbols, // not containers of subsymbols. - if s.Type&SCONTAINER != 0 { - return 1 + if s.Attr.Container() { + return true } - return 0 + return true } // pclntab initializes the pclntab symbol with // runtime function and file name information. -var pclntabZpcln FuncInfo +var pclntabZpcln sym.FuncInfo // These variables are used to initialize runtime.firstmoduledata, see symtab.go:symtab. var pclntabNfunc int32 var pclntabFiletabOffset int32 var pclntabPclntabOffset int32 -var pclntabFirstFunc *Symbol -var pclntabLastFunc *Symbol +var pclntabFirstFunc *sym.Symbol +var pclntabLastFunc *sym.Symbol func (ctxt *Link) pclntab() { funcdataBytes := int64(0) ftab := ctxt.Syms.Lookup("runtime.pclntab", 0) - ftab.Type = SPCLNTAB - ftab.Attr |= AttrReachable + ftab.Type = sym.SPCLNTAB + ftab.Attr |= sym.AttrReachable // See golang.org/s/go12symtab for the format. Briefly: // 8-byte header @@ -212,26 +213,26 @@ func (ctxt *Link) pclntab() { // offset to file table [4 bytes] nfunc := int32(0) - // Find container symbols, mark them with SCONTAINER + // Find container symbols and mark them as such. for _, s := range ctxt.Textp { if s.Outer != nil { - s.Outer.Type |= SCONTAINER + s.Outer.Attr |= sym.AttrContainer } } for _, s := range ctxt.Textp { - if container(s) == 0 { + if emitPcln(ctxt, s) { nfunc++ } } pclntabNfunc = nfunc - Symgrow(ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize)+4) - setuint32(ctxt, ftab, 0, 0xfffffffb) - setuint8(ctxt, ftab, 6, uint8(SysArch.MinLC)) - setuint8(ctxt, ftab, 7, uint8(SysArch.PtrSize)) - setuint(ctxt, ftab, 8, uint64(nfunc)) - pclntabPclntabOffset = int32(8 + SysArch.PtrSize) + ftab.Grow(8 + int64(ctxt.Arch.PtrSize) + int64(nfunc)*2*int64(ctxt.Arch.PtrSize) + int64(ctxt.Arch.PtrSize) + 4) + ftab.SetUint32(ctxt.Arch, 0, 0xfffffffb) + ftab.SetUint8(ctxt.Arch, 6, uint8(ctxt.Arch.MinLC)) + ftab.SetUint8(ctxt.Arch, 7, uint8(ctxt.Arch.PtrSize)) + ftab.SetUint(ctxt.Arch, 8, uint64(nfunc)) + pclntabPclntabOffset = int32(8 + ctxt.Arch.PtrSize) funcnameoff := make(map[string]int32) nameToOffset := func(name string) int32 { @@ -244,10 +245,10 @@ func (ctxt *Link) pclntab() { } nfunc = 0 - var last *Symbol + var last *sym.Symbol for _, s := range ctxt.Textp { last = s - if container(s) != 0 { + if !emitPcln(ctxt, s) { continue } pcln := s.FuncInfo @@ -262,14 +263,14 @@ func (ctxt *Link) pclntab() { if len(pcln.InlTree) > 0 { if len(pcln.Pcdata) <= objabi.PCDATA_InlTreeIndex { // Create inlining pcdata table. - pcdata := make([]Pcdata, objabi.PCDATA_InlTreeIndex+1) + pcdata := make([]sym.Pcdata, objabi.PCDATA_InlTreeIndex+1) copy(pcdata, pcln.Pcdata) pcln.Pcdata = pcdata } if len(pcln.Funcdataoff) <= objabi.FUNCDATA_InlTree { // Create inline tree funcdata. - funcdata := make([]*Symbol, objabi.FUNCDATA_InlTree+1) + funcdata := make([]*sym.Symbol, objabi.FUNCDATA_InlTree+1) funcdataoff := make([]int64, objabi.FUNCDATA_InlTree+1) copy(funcdata, pcln.Funcdata) copy(funcdataoff, pcln.Funcdataoff) @@ -279,10 +280,10 @@ func (ctxt *Link) pclntab() { } funcstart := int32(len(ftab.P)) - funcstart += int32(-len(ftab.P)) & (int32(SysArch.PtrSize) - 1) + funcstart += int32(-len(ftab.P)) & (int32(ctxt.Arch.PtrSize) - 1) - setaddr(ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize), s) - setuint(ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize), uint64(funcstart)) + ftab.SetAddr(ctxt.Arch, 8+int64(ctxt.Arch.PtrSize)+int64(nfunc)*2*int64(ctxt.Arch.PtrSize), s) + ftab.SetUint(ctxt.Arch, 8+int64(ctxt.Arch.PtrSize)+int64(nfunc)*2*int64(ctxt.Arch.PtrSize)+int64(ctxt.Arch.PtrSize), uint64(funcstart)) // Write runtime._func. Keep in sync with ../../../../runtime/runtime2.go:/_func // and package debug/gosym. @@ -290,18 +291,18 @@ func (ctxt *Link) pclntab() { // fixed size of struct, checked below off := funcstart - end := funcstart + int32(SysArch.PtrSize) + 3*4 + 5*4 + int32(len(pcln.Pcdata))*4 + int32(len(pcln.Funcdata))*int32(SysArch.PtrSize) - if len(pcln.Funcdata) > 0 && (end&int32(SysArch.PtrSize-1) != 0) { + end := funcstart + int32(ctxt.Arch.PtrSize) + 3*4 + 5*4 + int32(len(pcln.Pcdata))*4 + int32(len(pcln.Funcdata))*int32(ctxt.Arch.PtrSize) + if len(pcln.Funcdata) > 0 && (end&int32(ctxt.Arch.PtrSize-1) != 0) { end += 4 } - Symgrow(ftab, int64(end)) + ftab.Grow(int64(end)) // entry uintptr - off = int32(setaddr(ctxt, ftab, int64(off), s)) + off = int32(ftab.SetAddr(ctxt.Arch, int64(off), s)) // name int32 nameoff := nameToOffset(s.Name) - off = int32(setuint32(ctxt, ftab, int64(off), uint32(nameoff))) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(nameoff))) // args int32 // TODO: Move into funcinfo. @@ -309,14 +310,14 @@ func (ctxt *Link) pclntab() { if s.FuncInfo != nil { args = uint32(s.FuncInfo.Args) } - off = int32(setuint32(ctxt, ftab, int64(off), args)) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), args)) // frame int32 // This has been removed (it was never set quite correctly anyway). // Nothing should use it. // Leave an obviously incorrect value. // TODO: Remove entirely. - off = int32(setuint32(ctxt, ftab, int64(off), 0x1234567)) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), 0x1234567)) if pcln != &pclntabZpcln { renumberfiles(ctxt, pcln.File, &pcln.Pcfile) @@ -334,8 +335,8 @@ func (ctxt *Link) pclntab() { if len(pcln.InlTree) > 0 { inlTreeSym := ctxt.Syms.Lookup("inltree."+s.Name, 0) - inlTreeSym.Type = SRODATA - inlTreeSym.Attr |= AttrReachable | AttrDuplicateOK + inlTreeSym.Type = sym.SRODATA + inlTreeSym.Attr |= sym.AttrReachable | sym.AttrDuplicateOK for i, call := range pcln.InlTree { // Usually, call.File is already numbered since the file @@ -346,10 +347,10 @@ func (ctxt *Link) pclntab() { numberfile(ctxt, call.File) nameoff := nameToOffset(call.Func.Name) - setuint32(ctxt, inlTreeSym, int64(i*16+0), uint32(call.Parent)) - setuint32(ctxt, inlTreeSym, int64(i*16+4), uint32(call.File.Value)) - setuint32(ctxt, inlTreeSym, int64(i*16+8), uint32(call.Line)) - setuint32(ctxt, inlTreeSym, int64(i*16+12), uint32(nameoff)) + inlTreeSym.SetUint32(ctxt.Arch, int64(i*16+0), uint32(call.Parent)) + inlTreeSym.SetUint32(ctxt.Arch, int64(i*16+4), uint32(call.File.Value)) + inlTreeSym.SetUint32(ctxt.Arch, int64(i*16+8), uint32(call.Line)) + inlTreeSym.SetUint32(ctxt.Arch, int64(i*16+12), uint32(nameoff)) } pcln.Funcdata[objabi.FUNCDATA_InlTree] = inlTreeSym @@ -361,8 +362,8 @@ func (ctxt *Link) pclntab() { off = addpctab(ctxt, ftab, off, &pcln.Pcfile) off = addpctab(ctxt, ftab, off, &pcln.Pcline) - off = int32(setuint32(ctxt, ftab, int64(off), uint32(len(pcln.Pcdata)))) - off = int32(setuint32(ctxt, ftab, int64(off), uint32(len(pcln.Funcdata)))) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(len(pcln.Pcdata)))) + off = int32(ftab.SetUint32(ctxt.Arch, int64(off), uint32(len(pcln.Funcdata)))) for i := 0; i < len(pcln.Pcdata); i++ { off = addpctab(ctxt, ftab, off, &pcln.Pcdata[i]) } @@ -370,25 +371,25 @@ func (ctxt *Link) pclntab() { // funcdata, must be pointer-aligned and we're only int32-aligned. // Missing funcdata will be 0 (nil pointer). if len(pcln.Funcdata) > 0 { - if off&int32(SysArch.PtrSize-1) != 0 { + if off&int32(ctxt.Arch.PtrSize-1) != 0 { off += 4 } for i := 0; i < len(pcln.Funcdata); i++ { if pcln.Funcdata[i] == nil { - setuint(ctxt, ftab, int64(off)+int64(SysArch.PtrSize)*int64(i), uint64(pcln.Funcdataoff[i])) + ftab.SetUint(ctxt.Arch, int64(off)+int64(ctxt.Arch.PtrSize)*int64(i), uint64(pcln.Funcdataoff[i])) } else { // TODO: Dedup. funcdataBytes += pcln.Funcdata[i].Size - setaddrplus(ctxt, ftab, int64(off)+int64(SysArch.PtrSize)*int64(i), pcln.Funcdata[i], pcln.Funcdataoff[i]) + ftab.SetAddrPlus(ctxt.Arch, int64(off)+int64(ctxt.Arch.PtrSize)*int64(i), pcln.Funcdata[i], pcln.Funcdataoff[i]) } } - off += int32(len(pcln.Funcdata)) * int32(SysArch.PtrSize) + off += int32(len(pcln.Funcdata)) * int32(ctxt.Arch.PtrSize) } if off != end { - Errorf(s, "bad math in functab: funcstart=%d off=%d but end=%d (npcdata=%d nfuncdata=%d ptrsize=%d)", funcstart, off, end, len(pcln.Pcdata), len(pcln.Funcdata), SysArch.PtrSize) + Errorf(s, "bad math in functab: funcstart=%d off=%d but end=%d (npcdata=%d nfuncdata=%d ptrsize=%d)", funcstart, off, end, len(pcln.Pcdata), len(pcln.Funcdata), ctxt.Arch.PtrSize) errorexit() } @@ -397,20 +398,20 @@ func (ctxt *Link) pclntab() { pclntabLastFunc = last // Final entry of table is just end pc. - setaddrplus(ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize), last, last.Size) + ftab.SetAddrPlus(ctxt.Arch, 8+int64(ctxt.Arch.PtrSize)+int64(nfunc)*2*int64(ctxt.Arch.PtrSize), last, last.Size) // Start file table. start := int32(len(ftab.P)) - start += int32(-len(ftab.P)) & (int32(SysArch.PtrSize) - 1) + start += int32(-len(ftab.P)) & (int32(ctxt.Arch.PtrSize) - 1) pclntabFiletabOffset = start - setuint32(ctxt, ftab, 8+int64(SysArch.PtrSize)+int64(nfunc)*2*int64(SysArch.PtrSize)+int64(SysArch.PtrSize), uint32(start)) + ftab.SetUint32(ctxt.Arch, 8+int64(ctxt.Arch.PtrSize)+int64(nfunc)*2*int64(ctxt.Arch.PtrSize)+int64(ctxt.Arch.PtrSize), uint32(start)) - Symgrow(ftab, int64(start)+(int64(len(ctxt.Filesyms))+1)*4) - setuint32(ctxt, ftab, int64(start), uint32(len(ctxt.Filesyms)+1)) + ftab.Grow(int64(start) + (int64(len(ctxt.Filesyms))+1)*4) + ftab.SetUint32(ctxt.Arch, int64(start), uint32(len(ctxt.Filesyms)+1)) for i := len(ctxt.Filesyms) - 1; i >= 0; i-- { s := ctxt.Filesyms[i] - setuint32(ctxt, ftab, int64(start)+s.Value*4, uint32(ftabaddstring(ctxt, ftab, s.Name))) + ftab.SetUint32(ctxt.Arch, int64(start)+s.Value*4, uint32(ftabaddstring(ctxt, ftab, s.Name))) } ftab.Size = int64(len(ftab.P)) @@ -443,9 +444,9 @@ const ( // function for a pc. See src/runtime/symtab.go:findfunc for details. func (ctxt *Link) findfunctab() { t := ctxt.Syms.Lookup("runtime.findfunctab", 0) - t.Type = SRODATA - t.Attr |= AttrReachable - t.Attr |= AttrLocal + t.Type = sym.SRODATA + t.Attr |= sym.AttrReachable + t.Attr |= sym.AttrLocal // find min and max address min := ctxt.Textp[0].Value @@ -464,16 +465,16 @@ func (ctxt *Link) findfunctab() { } idx := int32(0) for i, s := range ctxt.Textp { - if container(s) != 0 { + if !emitPcln(ctxt, s) { continue } p := s.Value - var e *Symbol + var e *sym.Symbol i++ if i < len(ctxt.Textp) { e = ctxt.Textp[i] } - for container(e) != 0 && i < len(ctxt.Textp) { + for !emitPcln(ctxt, e) && i < len(ctxt.Textp) { e = ctxt.Textp[i] i++ } @@ -500,7 +501,7 @@ func (ctxt *Link) findfunctab() { // allocate table nbuckets := int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE) - Symgrow(t, 4*int64(nbuckets)+int64(n)) + t.Grow(4*int64(nbuckets) + int64(n)) // fill in table for i := int32(0); i < nbuckets; i++ { @@ -508,7 +509,7 @@ func (ctxt *Link) findfunctab() { if base == NOIDX { Errorf(nil, "hole in findfunctab") } - setuint32(ctxt, t, int64(i)*(4+SUBBUCKETS), uint32(base)) + t.SetUint32(ctxt.Arch, int64(i)*(4+SUBBUCKETS), uint32(base)) for j := int32(0); j < SUBBUCKETS && i*SUBBUCKETS+j < n; j++ { idx = indexes[i*SUBBUCKETS+j] if idx == NOIDX { @@ -518,7 +519,7 @@ func (ctxt *Link) findfunctab() { Errorf(nil, "too many functions in a findfunc bucket! %d/%d %d %d", i, nbuckets, j, idx-base) } - setuint8(ctxt, t, int64(i)*(4+SUBBUCKETS)+4+int64(j), uint8(idx-base)) + t.SetUint8(ctxt.Arch, int64(i)*(4+SUBBUCKETS)+4+int64(j), uint8(idx-base)) } } } diff --git a/src/cmd/link/internal/ld/pe.go b/src/cmd/link/internal/ld/pe.go index f26c83ee30e..f48b35c6f99 100644 --- a/src/cmd/link/internal/ld/pe.go +++ b/src/cmd/link/internal/ld/pe.go @@ -7,76 +7,15 @@ package ld import ( "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" + "debug/pe" "encoding/binary" "fmt" - "os" "sort" "strconv" "strings" ) -type IMAGE_FILE_HEADER struct { - Machine uint16 - NumberOfSections uint16 - TimeDateStamp uint32 - PointerToSymbolTable uint32 - NumberOfSymbols uint32 - SizeOfOptionalHeader uint16 - Characteristics uint16 -} - -type IMAGE_DATA_DIRECTORY struct { - VirtualAddress uint32 - Size uint32 -} - -type IMAGE_OPTIONAL_HEADER struct { - Magic uint16 - MajorLinkerVersion uint8 - MinorLinkerVersion uint8 - SizeOfCode uint32 - SizeOfInitializedData uint32 - SizeOfUninitializedData uint32 - AddressOfEntryPoint uint32 - BaseOfCode uint32 - BaseOfData uint32 - ImageBase uint32 - SectionAlignment uint32 - FileAlignment uint32 - MajorOperatingSystemVersion uint16 - MinorOperatingSystemVersion uint16 - MajorImageVersion uint16 - MinorImageVersion uint16 - MajorSubsystemVersion uint16 - MinorSubsystemVersion uint16 - Win32VersionValue uint32 - SizeOfImage uint32 - SizeOfHeaders uint32 - CheckSum uint32 - Subsystem uint16 - DllCharacteristics uint16 - SizeOfStackReserve uint32 - SizeOfStackCommit uint32 - SizeOfHeapReserve uint32 - SizeOfHeapCommit uint32 - LoaderFlags uint32 - NumberOfRvaAndSizes uint32 - DataDirectory [16]IMAGE_DATA_DIRECTORY -} - -type IMAGE_SECTION_HEADER struct { - Name [8]uint8 - VirtualSize uint32 - VirtualAddress uint32 - SizeOfRawData uint32 - PointerToRawData uint32 - PointerToRelocations uint32 - PointerToLineNumbers uint32 - NumberOfRelocations uint16 - NumberOfLineNumbers uint16 - Characteristics uint32 -} - type IMAGE_IMPORT_DESCRIPTOR struct { OriginalFirstThunk uint32 TimeDateStamp uint32 @@ -152,39 +91,25 @@ const ( IMAGE_SUBSYSTEM_WINDOWS_CUI = 3 ) -// X64 -type PE64_IMAGE_OPTIONAL_HEADER struct { - Magic uint16 - MajorLinkerVersion uint8 - MinorLinkerVersion uint8 - SizeOfCode uint32 - SizeOfInitializedData uint32 - SizeOfUninitializedData uint32 - AddressOfEntryPoint uint32 - BaseOfCode uint32 - ImageBase uint64 - SectionAlignment uint32 - FileAlignment uint32 - MajorOperatingSystemVersion uint16 - MinorOperatingSystemVersion uint16 - MajorImageVersion uint16 - MinorImageVersion uint16 - MajorSubsystemVersion uint16 - MinorSubsystemVersion uint16 - Win32VersionValue uint32 - SizeOfImage uint32 - SizeOfHeaders uint32 - CheckSum uint32 - Subsystem uint16 - DllCharacteristics uint16 - SizeOfStackReserve uint64 - SizeOfStackCommit uint64 - SizeOfHeapReserve uint64 - SizeOfHeapCommit uint64 - LoaderFlags uint32 - NumberOfRvaAndSizes uint32 - DataDirectory [16]IMAGE_DATA_DIRECTORY -} +// TODO(crawshaw): add these constants to debug/pe. +const ( + // TODO: the Microsoft doco says IMAGE_SYM_DTYPE_ARRAY is 3 and IMAGE_SYM_DTYPE_FUNCTION is 2 + IMAGE_SYM_TYPE_NULL = 0 + IMAGE_SYM_TYPE_STRUCT = 8 + IMAGE_SYM_DTYPE_FUNCTION = 0x20 + IMAGE_SYM_DTYPE_ARRAY = 0x30 + IMAGE_SYM_CLASS_EXTERNAL = 2 + IMAGE_SYM_CLASS_STATIC = 3 + + IMAGE_REL_I386_DIR32 = 0x0006 + IMAGE_REL_I386_SECREL = 0x000B + IMAGE_REL_I386_REL32 = 0x0014 + + IMAGE_REL_AMD64_ADDR64 = 0x0001 + IMAGE_REL_AMD64_ADDR32 = 0x0002 + IMAGE_REL_AMD64_REL32 = 0x0004 + IMAGE_REL_AMD64_SECREL = 0x000B +) // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -326,43 +251,8 @@ var dosstub = []uint8{ 0x00, } -var rsrcsym *Symbol - -var strtbl []byte - -var PESECTHEADR int32 - -var PEFILEHEADR int32 - -var pe64 int - -var pensect int - -var nextsectoff int - -var nextfileoff int - -var textsect int - -var datasect int - -var bsssect int - -var fh IMAGE_FILE_HEADER - -var oh IMAGE_OPTIONAL_HEADER - -var oh64 PE64_IMAGE_OPTIONAL_HEADER - -var sh [16]IMAGE_SECTION_HEADER - -// shNames stores full names of PE sections stored in sh. -var shNames []string - -var dd []IMAGE_DATA_DIRECTORY - type Imp struct { - s *Symbol + s *sym.Symbol off uint64 next *Imp argsize int @@ -376,92 +266,667 @@ type Dll struct { next *Dll } -var dr *Dll +var ( + rsrcsym *sym.Symbol + PESECTHEADR int32 + PEFILEHEADR int32 + pe64 int + dr *Dll + dexport [1024]*sym.Symbol + nexport int +) -var dexport [1024]*Symbol +// peStringTable is a COFF string table. +type peStringTable struct { + strings []string + stringsLen int +} -var nexport int +// size resturns size of string table t. +func (t *peStringTable) size() int { + // string table starts with 4-byte length at the beginning + return t.stringsLen + 4 +} -func addpesectionWithLongName(ctxt *Link, shortname, longname string, sectsize int, filesize int) *IMAGE_SECTION_HEADER { - if pensect == 16 { - Errorf(nil, "too many sections") +// add adds string str to string table t. +func (t *peStringTable) add(str string) int { + off := t.size() + t.strings = append(t.strings, str) + t.stringsLen += len(str) + 1 // each string will have 0 appended to it + return off +} + +// write writes string table t into the output file. +func (t *peStringTable) write(out *OutBuf) { + out.Write32(uint32(t.size())) + for _, s := range t.strings { + out.WriteString(s) + out.Write8(0) + } +} + +// peSection represents section from COFF section table. +type peSection struct { + name string + shortName string + index int // one-based index into the Section Table + virtualSize uint32 + virtualAddress uint32 + sizeOfRawData uint32 + pointerToRawData uint32 + pointerToRelocations uint32 + numberOfRelocations uint16 + characteristics uint32 +} + +// checkOffset verifies COFF section sect offset in the file. +func (sect *peSection) checkOffset(off int64) { + if off != int64(sect.pointerToRawData) { + Errorf(nil, "%s.PointerToRawData = %#x, want %#x", sect.name, uint64(int64(sect.pointerToRawData)), uint64(off)) errorexit() } +} - h := &sh[pensect] - pensect++ - copy(h.Name[:], shortname) - shNames = append(shNames, longname) - h.VirtualSize = uint32(sectsize) - h.VirtualAddress = uint32(nextsectoff) - nextsectoff = int(Rnd(int64(nextsectoff)+int64(sectsize), PESECTALIGN)) - h.PointerToRawData = uint32(nextfileoff) - if filesize > 0 { - h.SizeOfRawData = uint32(Rnd(int64(filesize), PEFILEALIGN)) - nextfileoff += int(h.SizeOfRawData) +// checkSegment verifies COFF section sect matches address +// and file offset provided in segment seg. +func (sect *peSection) checkSegment(seg *sym.Segment) { + if seg.Vaddr-PEBASE != uint64(sect.virtualAddress) { + Errorf(nil, "%s.VirtualAddress = %#x, want %#x", sect.name, uint64(int64(sect.virtualAddress)), uint64(int64(seg.Vaddr-PEBASE))) + errorexit() } + if seg.Fileoff != uint64(sect.pointerToRawData) { + Errorf(nil, "%s.PointerToRawData = %#x, want %#x", sect.name, uint64(int64(sect.pointerToRawData)), uint64(int64(seg.Fileoff))) + errorexit() + } +} +// pad adds zeros to the section sect. It writes as many bytes +// as necessary to make section sect.SizeOfRawData bytes long. +// It assumes that n bytes are already written to the file. +func (sect *peSection) pad(out *OutBuf, n uint32) { + out.WriteStringN("", int(sect.sizeOfRawData-n)) +} + +// write writes COFF section sect into the output file. +func (sect *peSection) write(out *OutBuf, linkmode LinkMode) error { + h := pe.SectionHeader32{ + VirtualSize: sect.virtualSize, + SizeOfRawData: sect.sizeOfRawData, + PointerToRawData: sect.pointerToRawData, + PointerToRelocations: sect.pointerToRelocations, + NumberOfRelocations: sect.numberOfRelocations, + Characteristics: sect.characteristics, + } + if linkmode != LinkExternal { + h.VirtualAddress = sect.virtualAddress + } + copy(h.Name[:], sect.shortName) + return binary.Write(out, binary.LittleEndian, h) +} + +// emitRelocations emits the relocation entries for the sect. +// The actual relocations are emitted by relocfn. +// This updates the corresponding PE section table entry +// with the relocation offset and count. +func (sect *peSection) emitRelocations(out *OutBuf, relocfn func() int) { + sect.pointerToRelocations = uint32(out.Offset()) + // first entry: extended relocs + out.Write32(0) // placeholder for number of relocation + 1 + out.Write32(0) + out.Write16(0) + + n := relocfn() + 1 + + cpos := out.Offset() + out.SeekSet(int64(sect.pointerToRelocations)) + out.Write32(uint32(n)) + out.SeekSet(cpos) + if n > 0x10000 { + n = 0x10000 + sect.characteristics |= IMAGE_SCN_LNK_NRELOC_OVFL + } else { + sect.pointerToRelocations += 10 // skip the extend reloc entry + } + sect.numberOfRelocations = uint16(n - 1) +} + +// peFile is used to build COFF file. +type peFile struct { + sections []*peSection + stringTable peStringTable + textSect *peSection + dataSect *peSection + bssSect *peSection + ctorsSect *peSection + nextSectOffset uint32 + nextFileOffset uint32 + symtabOffset int64 // offset to the start of symbol table + symbolCount int // number of symbol table records written + dataDirectory [16]pe.DataDirectory +} + +// addSection adds section to the COFF file f. +func (f *peFile) addSection(name string, sectsize int, filesize int) *peSection { + sect := &peSection{ + name: name, + shortName: name, + index: len(f.sections) + 1, + virtualSize: uint32(sectsize), + virtualAddress: f.nextSectOffset, + pointerToRawData: f.nextFileOffset, + } + f.nextSectOffset = uint32(Rnd(int64(f.nextSectOffset)+int64(sectsize), PESECTALIGN)) + if filesize > 0 { + sect.sizeOfRawData = uint32(Rnd(int64(filesize), PEFILEALIGN)) + f.nextFileOffset += sect.sizeOfRawData + } + f.sections = append(f.sections, sect) + return sect +} + +// addDWARFSection adds DWARF section to the COFF file f. +// This function is similar to addSection, but DWARF section names are +// longer than 8 characters, so they need to be stored in the string table. +func (f *peFile) addDWARFSection(name string, size int) *peSection { + if size == 0 { + Exitf("DWARF section %q is empty", name) + } + // DWARF section names are longer than 8 characters. + // PE format requires such names to be stored in string table, + // and section names replaced with slash (/) followed by + // correspondent string table index. + // see http://www.microsoft.com/whdc/system/platform/firmware/PECOFFdwn.mspx + // for details + off := f.stringTable.add(name) + h := f.addSection(name, size, size) + h.shortName = fmt.Sprintf("/%d", off) + h.characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE return h } -func addpesection(ctxt *Link, name string, sectsize int, filesize int) *IMAGE_SECTION_HEADER { - return addpesectionWithLongName(ctxt, name, name, sectsize, filesize) -} -func chksectoff(ctxt *Link, h *IMAGE_SECTION_HEADER, off int64) { - if off != int64(h.PointerToRawData) { - Errorf(nil, "%s.PointerToRawData = %#x, want %#x", cstring(h.Name[:]), uint64(int64(h.PointerToRawData)), uint64(off)) - errorexit() +// addDWARF adds DWARF information to the COFF file f. +func (f *peFile) addDWARF() { + if *FlagS { // disable symbol table + return + } + if *FlagW { // disable dwarf + return + } + for _, sect := range Segdwarf.Sections { + h := f.addDWARFSection(sect.Name, int(sect.Length)) + fileoff := sect.Vaddr - Segdwarf.Vaddr + Segdwarf.Fileoff + if uint64(h.pointerToRawData) != fileoff { + Exitf("%s.PointerToRawData = %#x, want %#x", sect.Name, h.pointerToRawData, fileoff) + } } } -func chksectseg(ctxt *Link, h *IMAGE_SECTION_HEADER, s *Segment) { - if s.Vaddr-PEBASE != uint64(h.VirtualAddress) { - Errorf(nil, "%s.VirtualAddress = %#x, want %#x", cstring(h.Name[:]), uint64(int64(h.VirtualAddress)), uint64(int64(s.Vaddr-PEBASE))) - errorexit() +// addInitArray adds .ctors COFF section to the file f. +func (f *peFile) addInitArray(ctxt *Link) *peSection { + // The size below was determined by the specification for array relocations, + // and by observing what GCC writes here. If the initarray section grows to + // contain more than one constructor entry, the size will need to be 8 * constructor_count. + // However, the entire Go runtime is initialized from just one function, so it is unlikely + // that this will need to grow in the future. + var size int + switch objabi.GOARCH { + default: + Exitf("peFile.addInitArray: unsupported GOARCH=%q\n", objabi.GOARCH) + case "386": + size = 4 + case "amd64": + size = 8 + } + sect := f.addSection(".ctors", size, size) + sect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ + sect.sizeOfRawData = uint32(size) + ctxt.Out.SeekSet(int64(sect.pointerToRawData)) + sect.checkOffset(ctxt.Out.Offset()) + + init_entry := ctxt.Syms.Lookup(*flagEntrySymbol, 0) + addr := uint64(init_entry.Value) - init_entry.Sect.Vaddr + switch objabi.GOARCH { + case "386": + ctxt.Out.Write32(uint32(addr)) + case "amd64": + ctxt.Out.Write64(addr) + } + return sect +} + +// emitRelocations emits relocation entries for go.o in external linking. +func (f *peFile) emitRelocations(ctxt *Link) { + for ctxt.Out.Offset()&7 != 0 { + ctxt.Out.Write8(0) } - if s.Fileoff != uint64(h.PointerToRawData) { - Errorf(nil, "%s.PointerToRawData = %#x, want %#x", cstring(h.Name[:]), uint64(int64(h.PointerToRawData)), uint64(int64(s.Fileoff))) - errorexit() + // relocsect relocates symbols from first in section sect, and returns + // the total number of relocations emitted. + relocsect := func(sect *sym.Section, syms []*sym.Symbol, base uint64) int { + // If main section has no bits, nothing to relocate. + if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { + return 0 + } + relocs := 0 + sect.Reloff = uint64(ctxt.Out.Offset()) + for i, s := range syms { + if !s.Attr.Reachable() { + continue + } + if uint64(s.Value) >= sect.Vaddr { + syms = syms[i:] + break + } + } + eaddr := int32(sect.Vaddr + sect.Length) + for _, sym := range syms { + if !sym.Attr.Reachable() { + continue + } + if sym.Value >= int64(eaddr) { + break + } + for ri := 0; ri < len(sym.R); ri++ { + r := &sym.R[ri] + if r.Done { + continue + } + if r.Xsym == nil { + Errorf(sym, "missing xsym in relocation") + continue + } + if r.Xsym.Dynid < 0 { + Errorf(sym, "reloc %d to non-coff symbol %s (outer=%s) %d", r.Type, r.Sym.Name, r.Xsym.Name, r.Sym.Type) + } + if !Thearch.PEreloc1(ctxt.Arch, ctxt.Out, sym, r, int64(uint64(sym.Value+int64(r.Off))-base)) { + Errorf(sym, "unsupported obj reloc %d/%d to %s", r.Type, r.Siz, r.Sym.Name) + } + relocs++ + } + } + sect.Rellen = uint64(ctxt.Out.Offset()) - sect.Reloff + return relocs + } + + f.textSect.emitRelocations(ctxt.Out, func() int { + n := relocsect(Segtext.Sections[0], ctxt.Textp, Segtext.Vaddr) + for _, sect := range Segtext.Sections[1:] { + n += relocsect(sect, datap, Segtext.Vaddr) + } + return n + }) + + f.dataSect.emitRelocations(ctxt.Out, func() int { + var n int + for _, sect := range Segdata.Sections { + n += relocsect(sect, datap, Segdata.Vaddr) + } + return n + }) + +dwarfLoop: + for _, sect := range Segdwarf.Sections { + for _, pesect := range f.sections { + if sect.Name == pesect.name { + pesect.emitRelocations(ctxt.Out, func() int { + return relocsect(sect, dwarfp, sect.Vaddr) + }) + continue dwarfLoop + } + } + Errorf(nil, "emitRelocations: could not find %q section", sect.Name) + } + + f.ctorsSect.emitRelocations(ctxt.Out, func() int { + dottext := ctxt.Syms.Lookup(".text", 0) + ctxt.Out.Write32(0) + ctxt.Out.Write32(uint32(dottext.Dynid)) + switch objabi.GOARCH { + default: + Errorf(dottext, "unknown architecture for PE: %q\n", objabi.GOARCH) + case "386": + ctxt.Out.Write16(IMAGE_REL_I386_DIR32) + case "amd64": + ctxt.Out.Write16(IMAGE_REL_AMD64_ADDR64) + } + return 1 + }) +} + +// writeSymbol appends symbol s to file f symbol table. +// It also sets s.Dynid to written symbol number. +func (f *peFile) writeSymbol(out *OutBuf, s *sym.Symbol, value int64, sectidx int, typ uint16, class uint8) { + if len(s.Name) > 8 { + out.Write32(0) + out.Write32(uint32(f.stringTable.add(s.Name))) + } else { + out.WriteStringN(s.Name, 8) + } + out.Write32(uint32(value)) + out.Write16(uint16(sectidx)) + out.Write16(typ) + out.Write8(class) + out.Write8(0) // no aux entries + + s.Dynid = int32(f.symbolCount) + + f.symbolCount++ +} + +// mapToPESection searches peFile f for s symbol's location. +// It returns PE section index, and offset within that section. +func (f *peFile) mapToPESection(s *sym.Symbol, linkmode LinkMode) (pesectidx int, offset int64, err error) { + if s.Sect == nil { + return 0, 0, fmt.Errorf("could not map %s symbol with no section", s.Name) + } + if s.Sect.Seg == &Segtext { + return f.textSect.index, int64(uint64(s.Value) - Segtext.Vaddr), nil + } + if s.Sect.Seg != &Segdata { + return 0, 0, fmt.Errorf("could not map %s symbol with non .text or .data section", s.Name) + } + v := uint64(s.Value) - Segdata.Vaddr + if linkmode != LinkExternal { + return f.dataSect.index, int64(v), nil + } + if s.Type == sym.SDATA { + return f.dataSect.index, int64(v), nil + } + // Note: although address of runtime.edata (type sym.SDATA) is at the start of .bss section + // it still belongs to the .data section, not the .bss section. + if v < Segdata.Filelen { + return f.dataSect.index, int64(v), nil + } + return f.bssSect.index, int64(v - Segdata.Filelen), nil +} + +// writeSymbols writes all COFF symbol table records. +func (f *peFile) writeSymbols(ctxt *Link) { + + put := func(ctxt *Link, s *sym.Symbol, name string, type_ SymbolType, addr int64, gotype *sym.Symbol) { + if s == nil { + return + } + if s.Sect == nil && type_ != UndefinedSym { + return + } + switch type_ { + default: + return + case DataSym, BSSSym, TextSym, UndefinedSym: + } + + // Only windows/386 requires underscore prefix on external symbols. + if ctxt.Arch.Family == sys.I386 && + ctxt.LinkMode == LinkExternal && + (s.Type == sym.SHOSTOBJ || s.Attr.CgoExport()) { + s.Name = "_" + s.Name + } + + var typ uint16 + if ctxt.LinkMode == LinkExternal { + typ = IMAGE_SYM_TYPE_NULL + } else { + // TODO: fix IMAGE_SYM_DTYPE_ARRAY value and use following expression, instead of 0x0308 + typ = IMAGE_SYM_DTYPE_ARRAY<<8 + IMAGE_SYM_TYPE_STRUCT + typ = 0x0308 // "array of structs" + } + sect, value, err := f.mapToPESection(s, ctxt.LinkMode) + if err != nil { + if type_ == UndefinedSym { + typ = IMAGE_SYM_DTYPE_FUNCTION + } else { + Errorf(s, "addpesym: %v", err) + } + } + class := IMAGE_SYM_CLASS_EXTERNAL + if s.Version != 0 || s.Attr.VisibilityHidden() || s.Attr.Local() { + class = IMAGE_SYM_CLASS_STATIC + } + f.writeSymbol(ctxt.Out, s, value, sect, typ, uint8(class)) + } + + if ctxt.LinkMode == LinkExternal { + // Include section symbols as external, because + // .ctors and .debug_* section relocations refer to it. + for _, pesect := range f.sections { + sym := ctxt.Syms.Lookup(pesect.name, 0) + f.writeSymbol(ctxt.Out, sym, 0, pesect.index, IMAGE_SYM_TYPE_NULL, IMAGE_SYM_CLASS_STATIC) + } + } + + genasmsym(ctxt, put) +} + +// writeSymbolTableAndStringTable writes out symbol and string tables for peFile f. +func (f *peFile) writeSymbolTableAndStringTable(ctxt *Link) { + f.symtabOffset = ctxt.Out.Offset() + + // write COFF symbol table + if !*FlagS || ctxt.LinkMode == LinkExternal { + f.writeSymbols(ctxt) + } + + // update COFF file header and section table + size := f.stringTable.size() + 18*f.symbolCount + var h *peSection + if ctxt.LinkMode != LinkExternal { + // We do not really need .symtab for go.o, and if we have one, ld + // will also include it in the exe, and that will confuse windows. + h = f.addSection(".symtab", size, size) + h.characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE + h.checkOffset(f.symtabOffset) + } + + // write COFF string table + f.stringTable.write(ctxt.Out) + if ctxt.LinkMode != LinkExternal { + h.pad(ctxt.Out, uint32(size)) } } +// writeFileHeader writes COFF file header for peFile f. +func (f *peFile) writeFileHeader(arch *sys.Arch, out *OutBuf, linkmode LinkMode) { + var fh pe.FileHeader + + switch arch.Family { + default: + Exitf("unknown PE architecture: %v", arch.Family) + case sys.AMD64: + fh.Machine = IMAGE_FILE_MACHINE_AMD64 + case sys.I386: + fh.Machine = IMAGE_FILE_MACHINE_I386 + } + + fh.NumberOfSections = uint16(len(f.sections)) + + // Being able to produce identical output for identical input is + // much more beneficial than having build timestamp in the header. + fh.TimeDateStamp = 0 + + if linkmode == LinkExternal { + fh.Characteristics = IMAGE_FILE_LINE_NUMS_STRIPPED + } else { + fh.Characteristics = IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED + } + if pe64 != 0 { + var oh64 pe.OptionalHeader64 + fh.SizeOfOptionalHeader = uint16(binary.Size(&oh64)) + fh.Characteristics |= IMAGE_FILE_LARGE_ADDRESS_AWARE + } else { + var oh pe.OptionalHeader32 + fh.SizeOfOptionalHeader = uint16(binary.Size(&oh)) + fh.Characteristics |= IMAGE_FILE_32BIT_MACHINE + } + + fh.PointerToSymbolTable = uint32(f.symtabOffset) + fh.NumberOfSymbols = uint32(f.symbolCount) + + binary.Write(out, binary.LittleEndian, &fh) +} + +// writeOptionalHeader writes COFF optional header for peFile f. +func (f *peFile) writeOptionalHeader(ctxt *Link) { + var oh pe.OptionalHeader32 + var oh64 pe.OptionalHeader64 + + if pe64 != 0 { + oh64.Magic = 0x20b // PE32+ + } else { + oh.Magic = 0x10b // PE32 + oh.BaseOfData = f.dataSect.virtualAddress + } + + // Fill out both oh64 and oh. We only use one. Oh well. + oh64.MajorLinkerVersion = 3 + oh.MajorLinkerVersion = 3 + oh64.MinorLinkerVersion = 0 + oh.MinorLinkerVersion = 0 + oh64.SizeOfCode = f.textSect.sizeOfRawData + oh.SizeOfCode = f.textSect.sizeOfRawData + oh64.SizeOfInitializedData = f.dataSect.sizeOfRawData + oh.SizeOfInitializedData = f.dataSect.sizeOfRawData + oh64.SizeOfUninitializedData = 0 + oh.SizeOfUninitializedData = 0 + if ctxt.LinkMode != LinkExternal { + oh64.AddressOfEntryPoint = uint32(Entryvalue(ctxt) - PEBASE) + oh.AddressOfEntryPoint = uint32(Entryvalue(ctxt) - PEBASE) + } + oh64.BaseOfCode = f.textSect.virtualAddress + oh.BaseOfCode = f.textSect.virtualAddress + oh64.ImageBase = PEBASE + oh.ImageBase = PEBASE + oh64.SectionAlignment = uint32(PESECTALIGN) + oh.SectionAlignment = uint32(PESECTALIGN) + oh64.FileAlignment = uint32(PEFILEALIGN) + oh.FileAlignment = uint32(PEFILEALIGN) + oh64.MajorOperatingSystemVersion = 4 + oh.MajorOperatingSystemVersion = 4 + oh64.MinorOperatingSystemVersion = 0 + oh.MinorOperatingSystemVersion = 0 + oh64.MajorImageVersion = 1 + oh.MajorImageVersion = 1 + oh64.MinorImageVersion = 0 + oh.MinorImageVersion = 0 + oh64.MajorSubsystemVersion = 4 + oh.MajorSubsystemVersion = 4 + oh64.MinorSubsystemVersion = 0 + oh.MinorSubsystemVersion = 0 + oh64.SizeOfImage = f.nextSectOffset + oh.SizeOfImage = f.nextSectOffset + oh64.SizeOfHeaders = uint32(PEFILEHEADR) + oh.SizeOfHeaders = uint32(PEFILEHEADR) + if windowsgui { + oh64.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_GUI + oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_GUI + } else { + oh64.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI + oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI + } + + // Disable stack growth as we don't want Windows to + // fiddle with the thread stack limits, which we set + // ourselves to circumvent the stack checks in the + // Windows exception dispatcher. + // Commit size must be strictly less than reserve + // size otherwise reserve will be rounded up to a + // larger size, as verified with VMMap. + + // On 64-bit, we always reserve 2MB stacks. "Pure" Go code is + // okay with much smaller stacks, but the syscall package + // makes it easy to call into arbitrary C code without cgo, + // and system calls even in "pure" Go code are actually C + // calls that may need more stack than we think. + // + // The default stack reserve size affects only the main + // thread, ctrlhandler thread, and profileloop thread. For + // these, it must be greater than the stack size assumed by + // externalthreadhandler. + // + // For other threads we specify stack size in runtime explicitly. + // For these, the reserve must match STACKSIZE in + // runtime/cgo/gcc_windows_{386,amd64}.c and the correspondent + // CreateThread parameter in runtime.newosproc. + oh64.SizeOfStackReserve = 0x00200000 + if !iscgo { + oh64.SizeOfStackCommit = 0x00001000 + } else { + // TODO(brainman): Maybe remove optional header writing altogether for cgo. + // For cgo it is the external linker that is building final executable. + // And it probably does not use any information stored in optional header. + oh64.SizeOfStackCommit = 0x00200000 - 0x2000 // account for 2 guard pages + } + + // 32-bit is trickier since there much less address space to + // work with. Here we use large stacks only in cgo binaries as + // a compromise. + if !iscgo { + oh.SizeOfStackReserve = 0x00020000 + oh.SizeOfStackCommit = 0x00001000 + } else { + oh.SizeOfStackReserve = 0x00100000 + oh.SizeOfStackCommit = 0x00100000 - 0x2000 // account for 2 guard pages + } + + oh64.SizeOfHeapReserve = 0x00100000 + oh.SizeOfHeapReserve = 0x00100000 + oh64.SizeOfHeapCommit = 0x00001000 + oh.SizeOfHeapCommit = 0x00001000 + oh64.NumberOfRvaAndSizes = 16 + oh.NumberOfRvaAndSizes = 16 + + if pe64 != 0 { + oh64.DataDirectory = f.dataDirectory + } else { + oh.DataDirectory = f.dataDirectory + } + + if pe64 != 0 { + binary.Write(ctxt.Out, binary.LittleEndian, &oh64) + } else { + binary.Write(ctxt.Out, binary.LittleEndian, &oh) + } +} + +var pefile peFile + func Peinit(ctxt *Link) { var l int - switch SysArch.Family { + switch ctxt.Arch.Family { // 64-bit architectures case sys.AMD64: pe64 = 1 - + var oh64 pe.OptionalHeader64 l = binary.Size(&oh64) - dd = oh64.DataDirectory[:] // 32-bit architectures default: + var oh pe.OptionalHeader32 l = binary.Size(&oh) - dd = oh.DataDirectory[:] } - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { PESECTALIGN = 0 PEFILEALIGN = 0 } + var sh [16]pe.SectionHeader32 + var fh pe.FileHeader PEFILEHEADR = int32(Rnd(int64(len(dosstub)+binary.Size(&fh)+l+binary.Size(&sh)), PEFILEALIGN)) - if Linkmode != LinkExternal { + if ctxt.LinkMode != LinkExternal { PESECTHEADR = int32(Rnd(int64(PEFILEHEADR), PESECTALIGN)) } else { PESECTHEADR = 0 } - nextsectoff = int(PESECTHEADR) - nextfileoff = int(PEFILEHEADR) + pefile.nextSectOffset = uint32(PESECTHEADR) + pefile.nextFileOffset = uint32(PEFILEHEADR) - if Linkmode == LinkInternal { + if ctxt.LinkMode == LinkInternal { // some mingw libs depend on this symbol, for example, FindPESectionByName - ctxt.xdefine("__image_base__", SDATA, PEBASE) - ctxt.xdefine("_image_base__", SDATA, PEBASE) + ctxt.xdefine("__image_base__", sym.SDATA, PEBASE) + ctxt.xdefine("_image_base__", sym.SDATA, PEBASE) } HEADR = PEFILEHEADR @@ -479,34 +944,28 @@ func Peinit(ctxt *Link) { } } -func pewrite() { - Cseek(0) - if Linkmode != LinkExternal { - Cwrite(dosstub) - strnput("PE", 4) +func pewrite(ctxt *Link) { + ctxt.Out.SeekSet(0) + if ctxt.LinkMode != LinkExternal { + ctxt.Out.Write(dosstub) + ctxt.Out.WriteStringN("PE", 4) } - binary.Write(&coutbuf, binary.LittleEndian, &fh) + pefile.writeFileHeader(ctxt.Arch, ctxt.Out, ctxt.LinkMode) - if pe64 != 0 { - binary.Write(&coutbuf, binary.LittleEndian, &oh64) - } else { - binary.Write(&coutbuf, binary.LittleEndian, &oh) + pefile.writeOptionalHeader(ctxt) + + for _, sect := range pefile.sections { + sect.write(ctxt.Out, ctxt.LinkMode) } - if Linkmode == LinkExternal { - for i := range sh[:pensect] { - sh[i].VirtualAddress = 0 - } - } - binary.Write(&coutbuf, binary.LittleEndian, sh[:pensect]) } -func strput(s string) { - coutbuf.WriteString(s) - Cput(0) +func strput(out *OutBuf, s string) { + out.WriteString(s) + out.Write8(0) // string must be padded to even size if (len(s)+1)%2 != 0 { - Cput(0) + out.Write8(0) } } @@ -516,7 +975,7 @@ func initdynimport(ctxt *Link) *Dll { dr = nil var m *Imp for _, s := range ctxt.Syms.Allsym { - if !s.Attr.Reachable() || s.Type != SDYNIMPORT { + if !s.Attr.Reachable() || s.Type != sym.SDYNIMPORT { continue } for d = dr; d != nil; d = d.next { @@ -545,7 +1004,7 @@ func initdynimport(ctxt *Link) *Dll { if err != nil { Errorf(s, "failed to parse stdcall decoration: %v", err) } - m.argsize *= SysArch.PtrSize + m.argsize *= ctxt.Arch.PtrSize s.Extname = s.Extname[:i] } @@ -554,41 +1013,42 @@ func initdynimport(ctxt *Link) *Dll { d.ms = m } - if Linkmode == LinkExternal { + if ctxt.LinkMode == LinkExternal { // Add real symbol name for d := dr; d != nil; d = d.next { for m = d.ms; m != nil; m = m.next { - m.s.Type = SDATA - Symgrow(m.s, int64(SysArch.PtrSize)) + m.s.Type = sym.SDATA + m.s.Grow(int64(ctxt.Arch.PtrSize)) dynName := m.s.Extname // only windows/386 requires stdcall decoration - if SysArch.Family == sys.I386 && m.argsize >= 0 { + if ctxt.Arch.Family == sys.I386 && m.argsize >= 0 { dynName += fmt.Sprintf("@%d", m.argsize) } dynSym := ctxt.Syms.Lookup(dynName, 0) - dynSym.Attr |= AttrReachable - dynSym.Type = SHOSTOBJ - r := Addrel(m.s) + dynSym.Attr |= sym.AttrReachable + dynSym.Type = sym.SHOSTOBJ + r := m.s.AddRel() r.Sym = dynSym r.Off = 0 - r.Siz = uint8(SysArch.PtrSize) + r.Siz = uint8(ctxt.Arch.PtrSize) r.Type = objabi.R_ADDR } } } else { dynamic := ctxt.Syms.Lookup(".windynamic", 0) - dynamic.Attr |= AttrReachable - dynamic.Type = SWINDOWS + dynamic.Attr |= sym.AttrReachable + dynamic.Type = sym.SWINDOWS for d := dr; d != nil; d = d.next { for m = d.ms; m != nil; m = m.next { - m.s.Type = SWINDOWS | SSUB + m.s.Type = sym.SWINDOWS + m.s.Attr |= sym.AttrSubSymbol m.s.Sub = dynamic.Sub dynamic.Sub = m.s m.s.Value = dynamic.Size - dynamic.Size += int64(SysArch.PtrSize) + dynamic.Size += int64(ctxt.Arch.PtrSize) } - dynamic.Size += int64(SysArch.PtrSize) + dynamic.Size += int64(ctxt.Arch.PtrSize) } } @@ -607,8 +1067,8 @@ func peimporteddlls() []string { return dlls } -func addimports(ctxt *Link, datsect *IMAGE_SECTION_HEADER) { - startoff := coutbuf.Offset() +func addimports(ctxt *Link, datsect *peSection) { + startoff := ctxt.Out.Offset() dynamic := ctxt.Syms.Lookup(".windynamic", 0) // skip import descriptor table (will write it later) @@ -617,101 +1077,102 @@ func addimports(ctxt *Link, datsect *IMAGE_SECTION_HEADER) { for d := dr; d != nil; d = d.next { n++ } - Cseek(startoff + int64(binary.Size(&IMAGE_IMPORT_DESCRIPTOR{}))*int64(n+1)) + ctxt.Out.SeekSet(startoff + int64(binary.Size(&IMAGE_IMPORT_DESCRIPTOR{}))*int64(n+1)) // write dll names for d := dr; d != nil; d = d.next { - d.nameoff = uint64(coutbuf.Offset()) - uint64(startoff) - strput(d.name) + d.nameoff = uint64(ctxt.Out.Offset()) - uint64(startoff) + strput(ctxt.Out, d.name) } // write function names var m *Imp for d := dr; d != nil; d = d.next { for m = d.ms; m != nil; m = m.next { - m.off = uint64(nextsectoff) + uint64(coutbuf.Offset()) - uint64(startoff) - Wputl(0) // hint - strput(m.s.Extname) + m.off = uint64(pefile.nextSectOffset) + uint64(ctxt.Out.Offset()) - uint64(startoff) + ctxt.Out.Write16(0) // hint + strput(ctxt.Out, m.s.Extname) } } // write OriginalFirstThunks - oftbase := uint64(coutbuf.Offset()) - uint64(startoff) + oftbase := uint64(ctxt.Out.Offset()) - uint64(startoff) - n = uint64(coutbuf.Offset()) + n = uint64(ctxt.Out.Offset()) for d := dr; d != nil; d = d.next { - d.thunkoff = uint64(coutbuf.Offset()) - n + d.thunkoff = uint64(ctxt.Out.Offset()) - n for m = d.ms; m != nil; m = m.next { if pe64 != 0 { - Vputl(m.off) + ctxt.Out.Write64(m.off) } else { - Lputl(uint32(m.off)) + ctxt.Out.Write32(uint32(m.off)) } } if pe64 != 0 { - Vputl(0) + ctxt.Out.Write64(0) } else { - Lputl(0) + ctxt.Out.Write32(0) } } // add pe section and pad it at the end - n = uint64(coutbuf.Offset()) - uint64(startoff) + n = uint64(ctxt.Out.Offset()) - uint64(startoff) - isect := addpesection(ctxt, ".idata", int(n), int(n)) - isect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE - chksectoff(ctxt, isect, startoff) - strnput("", int(uint64(isect.SizeOfRawData)-n)) - endoff := coutbuf.Offset() + isect := pefile.addSection(".idata", int(n), int(n)) + isect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE + isect.checkOffset(startoff) + isect.pad(ctxt.Out, uint32(n)) + endoff := ctxt.Out.Offset() // write FirstThunks (allocated in .data section) - ftbase := uint64(dynamic.Value) - uint64(datsect.VirtualAddress) - PEBASE + ftbase := uint64(dynamic.Value) - uint64(datsect.virtualAddress) - PEBASE - Cseek(int64(uint64(datsect.PointerToRawData) + ftbase)) + ctxt.Out.SeekSet(int64(uint64(datsect.pointerToRawData) + ftbase)) for d := dr; d != nil; d = d.next { for m = d.ms; m != nil; m = m.next { if pe64 != 0 { - Vputl(m.off) + ctxt.Out.Write64(m.off) } else { - Lputl(uint32(m.off)) + ctxt.Out.Write32(uint32(m.off)) } } if pe64 != 0 { - Vputl(0) + ctxt.Out.Write64(0) } else { - Lputl(0) + ctxt.Out.Write32(0) } } // finally write import descriptor table - Cseek(startoff) + out := ctxt.Out + out.SeekSet(startoff) for d := dr; d != nil; d = d.next { - Lputl(uint32(uint64(isect.VirtualAddress) + oftbase + d.thunkoff)) - Lputl(0) - Lputl(0) - Lputl(uint32(uint64(isect.VirtualAddress) + d.nameoff)) - Lputl(uint32(uint64(datsect.VirtualAddress) + ftbase + d.thunkoff)) + out.Write32(uint32(uint64(isect.virtualAddress) + oftbase + d.thunkoff)) + out.Write32(0) + out.Write32(0) + out.Write32(uint32(uint64(isect.virtualAddress) + d.nameoff)) + out.Write32(uint32(uint64(datsect.virtualAddress) + ftbase + d.thunkoff)) } - Lputl(0) //end - Lputl(0) - Lputl(0) - Lputl(0) - Lputl(0) + out.Write32(0) //end + out.Write32(0) + out.Write32(0) + out.Write32(0) + out.Write32(0) // update data directory - dd[IMAGE_DIRECTORY_ENTRY_IMPORT].VirtualAddress = isect.VirtualAddress - dd[IMAGE_DIRECTORY_ENTRY_IMPORT].Size = isect.VirtualSize - dd[IMAGE_DIRECTORY_ENTRY_IAT].VirtualAddress = uint32(dynamic.Value - PEBASE) - dd[IMAGE_DIRECTORY_ENTRY_IAT].Size = uint32(dynamic.Size) + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT].VirtualAddress = isect.virtualAddress + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT].Size = isect.virtualSize + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_IAT].VirtualAddress = uint32(dynamic.Value - PEBASE) + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_IAT].Size = uint32(dynamic.Size) - Cseek(endoff) + out.SeekSet(endoff) } -type byExtname []*Symbol +type byExtname []*sym.Symbol func (s byExtname) Len() int { return len(s) } func (s byExtname) Swap(i, j int) { s[i], s[j] = s[j], s[i] } @@ -747,12 +1208,12 @@ func addexports(ctxt *Link) { return } - sect := addpesection(ctxt, ".edata", size, size) - sect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ - chksectoff(ctxt, sect, coutbuf.Offset()) - va := int(sect.VirtualAddress) - dd[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress = uint32(va) - dd[IMAGE_DIRECTORY_ENTRY_EXPORT].Size = sect.VirtualSize + sect := pefile.addSection(".edata", size, size) + sect.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ + sect.checkOffset(ctxt.Out.Offset()) + va := int(sect.virtualAddress) + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress = uint32(va) + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT].Size = sect.virtualSize vaName := va + binary.Size(&e) + nexport*4 vaAddr := va + binary.Size(&e) @@ -769,331 +1230,50 @@ func addexports(ctxt *Link) { e.AddressOfNames = uint32(vaName) e.AddressOfNameOrdinals = uint32(vaNa) + out := ctxt.Out + // put IMAGE_EXPORT_DIRECTORY - binary.Write(&coutbuf, binary.LittleEndian, &e) + binary.Write(out, binary.LittleEndian, &e) // put EXPORT Address Table for i := 0; i < nexport; i++ { - Lputl(uint32(dexport[i].Value - PEBASE)) + out.Write32(uint32(dexport[i].Value - PEBASE)) } // put EXPORT Name Pointer Table v := int(e.Name + uint32(len(*flagOutfile)) + 1) for i := 0; i < nexport; i++ { - Lputl(uint32(v)) + out.Write32(uint32(v)) v += len(dexport[i].Extname) + 1 } // put EXPORT Ordinal Table for i := 0; i < nexport; i++ { - Wputl(uint16(i)) + out.Write16(uint16(i)) } // put Names - strnput(*flagOutfile, len(*flagOutfile)+1) + out.WriteStringN(*flagOutfile, len(*flagOutfile)+1) for i := 0; i < nexport; i++ { - strnput(dexport[i].Extname, len(dexport[i].Extname)+1) + out.WriteStringN(dexport[i].Extname, len(dexport[i].Extname)+1) } - strnput("", int(sect.SizeOfRawData-uint32(size))) -} - -// perelocsect relocates symbols from first in section sect, and returns -// the total number of relocations emitted. -func perelocsect(ctxt *Link, sect *Section, syms []*Symbol, base uint64) int { - // If main section has no bits, nothing to relocate. - if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen { - return 0 - } - - relocs := 0 - - sect.Reloff = uint64(coutbuf.Offset()) - for i, s := range syms { - if !s.Attr.Reachable() { - continue - } - if uint64(s.Value) >= sect.Vaddr { - syms = syms[i:] - break - } - } - - eaddr := int32(sect.Vaddr + sect.Length) - for _, sym := range syms { - if !sym.Attr.Reachable() { - continue - } - if sym.Value >= int64(eaddr) { - break - } - for ri := 0; ri < len(sym.R); ri++ { - r := &sym.R[ri] - if r.Done != 0 { - continue - } - if r.Xsym == nil { - Errorf(sym, "missing xsym in relocation") - continue - } - - if r.Xsym.Dynid < 0 { - Errorf(sym, "reloc %d to non-coff symbol %s (outer=%s) %d", r.Type, r.Sym.Name, r.Xsym.Name, r.Sym.Type) - } - if !Thearch.PEreloc1(sym, r, int64(uint64(sym.Value+int64(r.Off))-base)) { - Errorf(sym, "unsupported obj reloc %d/%d to %s", r.Type, r.Siz, r.Sym.Name) - } - - relocs++ - } - } - - sect.Rellen = uint64(coutbuf.Offset()) - sect.Reloff - - return relocs -} - -// peemitsectreloc emits the relocation entries for sect. -// The actual relocations are emitted by relocfn. -// This updates the corresponding PE section table entry -// with the relocation offset and count. -func peemitsectreloc(sect *IMAGE_SECTION_HEADER, relocfn func() int) { - sect.PointerToRelocations = uint32(coutbuf.Offset()) - // first entry: extended relocs - Lputl(0) // placeholder for number of relocation + 1 - Lputl(0) - Wputl(0) - - n := relocfn() + 1 - - cpos := coutbuf.Offset() - Cseek(int64(sect.PointerToRelocations)) - Lputl(uint32(n)) - Cseek(cpos) - if n > 0x10000 { - n = 0x10000 - sect.Characteristics |= IMAGE_SCN_LNK_NRELOC_OVFL - } else { - sect.PointerToRelocations += 10 // skip the extend reloc entry - } - sect.NumberOfRelocations = uint16(n - 1) -} - -// peemitreloc emits relocation entries for go.o in external linking. -func peemitreloc(ctxt *Link, text, data, ctors *IMAGE_SECTION_HEADER) { - for coutbuf.Offset()&7 != 0 { - Cput(0) - } - - peemitsectreloc(text, func() int { - n := perelocsect(ctxt, Segtext.Sections[0], ctxt.Textp, Segtext.Vaddr) - for _, sect := range Segtext.Sections[1:] { - n += perelocsect(ctxt, sect, datap, Segtext.Vaddr) - } - return n - }) - - peemitsectreloc(data, func() int { - var n int - for _, sect := range Segdata.Sections { - n += perelocsect(ctxt, sect, datap, Segdata.Vaddr) - } - return n - }) - -dwarfLoop: - for _, sect := range Segdwarf.Sections { - for i, name := range shNames { - if sect.Name == name { - peemitsectreloc(&sh[i], func() int { - return perelocsect(ctxt, sect, dwarfp, sect.Vaddr) - }) - continue dwarfLoop - } - } - Errorf(nil, "peemitsectreloc: could not find %q section", sect.Name) - } - - peemitsectreloc(ctors, func() int { - dottext := ctxt.Syms.Lookup(".text", 0) - Lputl(0) - Lputl(uint32(dottext.Dynid)) - switch objabi.GOARCH { - default: - Errorf(dottext, "unknown architecture for PE: %q\n", objabi.GOARCH) - case "386": - Wputl(IMAGE_REL_I386_DIR32) - case "amd64": - Wputl(IMAGE_REL_AMD64_ADDR64) - } - return 1 - }) + sect.pad(out, uint32(size)) } func (ctxt *Link) dope() { /* relocation table */ rel := ctxt.Syms.Lookup(".rel", 0) - rel.Attr |= AttrReachable - rel.Type = SELFROSECT + rel.Attr |= sym.AttrReachable + rel.Type = sym.SELFROSECT initdynimport(ctxt) initdynexport(ctxt) } -func strtbladd(name string) int { - off := len(strtbl) + 4 // offset includes 4-byte length at beginning of table - strtbl = append(strtbl, name...) - strtbl = append(strtbl, 0) - return off -} - -/* - * For more than 8 characters section names, name contains a slash (/) that is - * followed by an ASCII representation of a decimal number that is an offset into - * the string table. - * reference: pecoff_v8.docx Page 24. - * - */ -func newPEDWARFSection(ctxt *Link, name string, size int64) *IMAGE_SECTION_HEADER { - if size == 0 { - return nil - } - - off := strtbladd(name) - s := fmt.Sprintf("/%d", off) - h := addpesectionWithLongName(ctxt, s, name, int(size), int(size)) - h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE - - return h -} - -// writePESymTableRecords writes all COFF symbol table records. -// It returns number of records written. -func writePESymTableRecords(ctxt *Link) int { - var symcnt int - - writeOneSymbol := func(s *Symbol, addr int64, sectidx int, typ uint16, class uint8) { - // write COFF symbol table record - if len(s.Name) > 8 { - Lputl(0) - Lputl(uint32(strtbladd(s.Name))) - } else { - strnput(s.Name, 8) - } - Lputl(uint32(addr)) - Wputl(uint16(sectidx)) - Wputl(typ) - Cput(class) - Cput(0) // no aux entries - - s.Dynid = int32(symcnt) - - symcnt++ - } - - put := func(ctxt *Link, s *Symbol, name string, type_ SymbolType, addr int64, gotype *Symbol) { - if s == nil { - return - } - if s.Sect == nil && type_ != UndefinedSym { - return - } - switch type_ { - default: - return - case DataSym, BSSSym, TextSym, UndefinedSym: - } - - // Only windows/386 requires underscore prefix on external symbols. - if SysArch.Family == sys.I386 && - Linkmode == LinkExternal && - (s.Type == SHOSTOBJ || s.Attr.CgoExport()) { - s.Name = "_" + s.Name - } - - typ := uint16(IMAGE_SYM_TYPE_NULL) - var sect int - var value int64 - if s.Sect != nil && s.Sect.Seg == &Segdata { - // Note: although address of runtime.edata (type SDATA) is at the start of .bss section - // it still belongs to the .data section, not the .bss section. - if uint64(s.Value) >= Segdata.Vaddr+Segdata.Filelen && s.Type != SDATA && Linkmode == LinkExternal { - value = int64(uint64(s.Value) - Segdata.Vaddr - Segdata.Filelen) - sect = bsssect - } else { - value = int64(uint64(s.Value) - Segdata.Vaddr) - sect = datasect - } - } else if s.Sect != nil && s.Sect.Seg == &Segtext { - value = int64(uint64(s.Value) - Segtext.Vaddr) - sect = textsect - } else if type_ == UndefinedSym { - typ = IMAGE_SYM_DTYPE_FUNCTION - } else { - Errorf(s, "addpesym %#x", addr) - } - if typ != IMAGE_SYM_TYPE_NULL { - } else if Linkmode != LinkExternal { - // TODO: fix IMAGE_SYM_DTYPE_ARRAY value and use following expression, instead of 0x0308 - typ = IMAGE_SYM_DTYPE_ARRAY<<8 + IMAGE_SYM_TYPE_STRUCT - typ = 0x0308 // "array of structs" - } - class := IMAGE_SYM_CLASS_EXTERNAL - if s.Version != 0 || (s.Type&SHIDDEN != 0) || s.Attr.Local() { - class = IMAGE_SYM_CLASS_STATIC - } - writeOneSymbol(s, value, sect, typ, uint8(class)) - } - - if Linkmode == LinkExternal { - // Include section symbols as external, because - // .ctors and .debug_* section relocations refer to it. - for idx, name := range shNames { - sym := ctxt.Syms.Lookup(name, 0) - writeOneSymbol(sym, 0, idx+1, IMAGE_SYM_TYPE_NULL, IMAGE_SYM_CLASS_STATIC) - } - } - - genasmsym(ctxt, put) - - return symcnt -} - -func addpesymtable(ctxt *Link) { - symtabStartPos := coutbuf.Offset() - - // write COFF symbol table - var symcnt int - if !*FlagS || Linkmode == LinkExternal { - symcnt = writePESymTableRecords(ctxt) - } - - // update COFF file header and section table - size := len(strtbl) + 4 + 18*symcnt - var h *IMAGE_SECTION_HEADER - if Linkmode != LinkExternal { - // We do not really need .symtab for go.o, and if we have one, ld - // will also include it in the exe, and that will confuse windows. - h = addpesection(ctxt, ".symtab", size, size) - h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE - chksectoff(ctxt, h, symtabStartPos) - } - fh.PointerToSymbolTable = uint32(symtabStartPos) - fh.NumberOfSymbols = uint32(symcnt) - - // write COFF string table - Lputl(uint32(len(strtbl)) + 4) - for i := 0; i < len(strtbl); i++ { - Cput(strtbl[i]) - } - if Linkmode != LinkExternal { - strnput("", int(h.SizeOfRawData-uint32(size))) - } -} - -func setpersrc(ctxt *Link, sym *Symbol) { +func setpersrc(ctxt *Link, sym *sym.Symbol) { if rsrcsym != nil { Errorf(sym, "too many .rsrc sections") } @@ -1106,18 +1286,18 @@ func addpersrc(ctxt *Link) { return } - h := addpesection(ctxt, ".rsrc", int(rsrcsym.Size), int(rsrcsym.Size)) - h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_INITIALIZED_DATA - chksectoff(ctxt, h, coutbuf.Offset()) + h := pefile.addSection(".rsrc", int(rsrcsym.Size), int(rsrcsym.Size)) + h.characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_INITIALIZED_DATA + h.checkOffset(ctxt.Out.Offset()) // relocation var p []byte - var r *Reloc + var r *sym.Reloc var val uint32 for ri := 0; ri < len(rsrcsym.R); ri++ { r = &rsrcsym.R[ri] p = rsrcsym.P[r.Off:] - val = uint32(int64(h.VirtualAddress) + r.Add) + val = uint32(int64(h.virtualAddress) + r.Add) // 32-bit little-endian p[0] = byte(val) @@ -1127,222 +1307,66 @@ func addpersrc(ctxt *Link) { p[3] = byte(val >> 24) } - Cwrite(rsrcsym.P) - strnput("", int(int64(h.SizeOfRawData)-rsrcsym.Size)) + ctxt.Out.Write(rsrcsym.P) + h.pad(ctxt.Out, uint32(rsrcsym.Size)) // update data directory - dd[IMAGE_DIRECTORY_ENTRY_RESOURCE].VirtualAddress = h.VirtualAddress + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_RESOURCE].VirtualAddress = h.virtualAddress - dd[IMAGE_DIRECTORY_ENTRY_RESOURCE].Size = h.VirtualSize -} - -func addinitarray(ctxt *Link) (c *IMAGE_SECTION_HEADER) { - // The size below was determined by the specification for array relocations, - // and by observing what GCC writes here. If the initarray section grows to - // contain more than one constructor entry, the size will need to be 8 * constructor_count. - // However, the entire Go runtime is initialized from just one function, so it is unlikely - // that this will need to grow in the future. - var size int - switch objabi.GOARCH { - default: - fmt.Fprintf(os.Stderr, "link: unknown architecture for PE: %q\n", objabi.GOARCH) - os.Exit(2) - case "386": - size = 4 - case "amd64": - size = 8 - } - - c = addpesection(ctxt, ".ctors", size, size) - c.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ - c.SizeOfRawData = uint32(size) - - Cseek(int64(c.PointerToRawData)) - chksectoff(ctxt, c, coutbuf.Offset()) - init_entry := ctxt.Syms.Lookup(*flagEntrySymbol, 0) - addr := uint64(init_entry.Value) - init_entry.Sect.Vaddr - - switch objabi.GOARCH { - case "386": - Lputl(uint32(addr)) - case "amd64": - Vputl(addr) - } - - return c + pefile.dataDirectory[IMAGE_DIRECTORY_ENTRY_RESOURCE].Size = h.virtualSize } func Asmbpe(ctxt *Link) { - switch SysArch.Family { + switch ctxt.Arch.Family { default: - Exitf("unknown PE architecture: %v", SysArch.Family) - case sys.AMD64: - fh.Machine = IMAGE_FILE_MACHINE_AMD64 - case sys.I386: - fh.Machine = IMAGE_FILE_MACHINE_I386 + Exitf("unknown PE architecture: %v", ctxt.Arch.Family) + case sys.AMD64, sys.I386: } - t := addpesection(ctxt, ".text", int(Segtext.Length), int(Segtext.Length)) - t.Characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ - if Linkmode == LinkExternal { + t := pefile.addSection(".text", int(Segtext.Length), int(Segtext.Length)) + t.characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ + if ctxt.LinkMode == LinkExternal { // some data symbols (e.g. masks) end up in the .text section, and they normally // expect larger alignment requirement than the default text section alignment. - t.Characteristics |= IMAGE_SCN_ALIGN_32BYTES + t.characteristics |= IMAGE_SCN_ALIGN_32BYTES } - chksectseg(ctxt, t, &Segtext) - textsect = pensect + t.checkSegment(&Segtext) + pefile.textSect = t - var d *IMAGE_SECTION_HEADER - var c *IMAGE_SECTION_HEADER - if Linkmode != LinkExternal { - d = addpesection(ctxt, ".data", int(Segdata.Length), int(Segdata.Filelen)) - d.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE - chksectseg(ctxt, d, &Segdata) - datasect = pensect + var d *peSection + if ctxt.LinkMode != LinkExternal { + d = pefile.addSection(".data", int(Segdata.Length), int(Segdata.Filelen)) + d.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE + d.checkSegment(&Segdata) + pefile.dataSect = d } else { - d = addpesection(ctxt, ".data", int(Segdata.Filelen), int(Segdata.Filelen)) - d.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_ALIGN_32BYTES - chksectseg(ctxt, d, &Segdata) - datasect = pensect + d = pefile.addSection(".data", int(Segdata.Filelen), int(Segdata.Filelen)) + d.characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_ALIGN_32BYTES + d.checkSegment(&Segdata) + pefile.dataSect = d - b := addpesection(ctxt, ".bss", int(Segdata.Length-Segdata.Filelen), 0) - b.Characteristics = IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_ALIGN_32BYTES - b.PointerToRawData = 0 - bsssect = pensect + b := pefile.addSection(".bss", int(Segdata.Length-Segdata.Filelen), 0) + b.characteristics = IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_ALIGN_32BYTES + b.pointerToRawData = 0 + pefile.bssSect = b } - if !*FlagS { - dwarfaddpeheaders(ctxt) + pefile.addDWARF() + + if ctxt.LinkMode == LinkExternal { + pefile.ctorsSect = pefile.addInitArray(ctxt) } - if Linkmode == LinkExternal { - c = addinitarray(ctxt) - } - - Cseek(int64(nextfileoff)) - if Linkmode != LinkExternal { + ctxt.Out.SeekSet(int64(pefile.nextFileOffset)) + if ctxt.LinkMode != LinkExternal { addimports(ctxt, d) addexports(ctxt) } - addpesymtable(ctxt) + pefile.writeSymbolTableAndStringTable(ctxt) addpersrc(ctxt) - if Linkmode == LinkExternal { - peemitreloc(ctxt, t, d, c) + if ctxt.LinkMode == LinkExternal { + pefile.emitRelocations(ctxt) } - fh.NumberOfSections = uint16(pensect) - - // Being able to produce identical output for identical input is - // much more beneficial than having build timestamp in the header. - fh.TimeDateStamp = 0 - - if Linkmode == LinkExternal { - fh.Characteristics = IMAGE_FILE_LINE_NUMS_STRIPPED - } else { - fh.Characteristics = IMAGE_FILE_RELOCS_STRIPPED | IMAGE_FILE_EXECUTABLE_IMAGE | IMAGE_FILE_DEBUG_STRIPPED - } - if pe64 != 0 { - fh.SizeOfOptionalHeader = uint16(binary.Size(&oh64)) - fh.Characteristics |= IMAGE_FILE_LARGE_ADDRESS_AWARE - oh64.Magic = 0x20b // PE32+ - } else { - fh.SizeOfOptionalHeader = uint16(binary.Size(&oh)) - fh.Characteristics |= IMAGE_FILE_32BIT_MACHINE - oh.Magic = 0x10b // PE32 - oh.BaseOfData = d.VirtualAddress - } - - // Fill out both oh64 and oh. We only use one. Oh well. - oh64.MajorLinkerVersion = 3 - - oh.MajorLinkerVersion = 3 - oh64.MinorLinkerVersion = 0 - oh.MinorLinkerVersion = 0 - oh64.SizeOfCode = t.SizeOfRawData - oh.SizeOfCode = t.SizeOfRawData - oh64.SizeOfInitializedData = d.SizeOfRawData - oh.SizeOfInitializedData = d.SizeOfRawData - oh64.SizeOfUninitializedData = 0 - oh.SizeOfUninitializedData = 0 - if Linkmode != LinkExternal { - oh64.AddressOfEntryPoint = uint32(Entryvalue(ctxt) - PEBASE) - oh.AddressOfEntryPoint = uint32(Entryvalue(ctxt) - PEBASE) - } - oh64.BaseOfCode = t.VirtualAddress - oh.BaseOfCode = t.VirtualAddress - oh64.ImageBase = PEBASE - oh.ImageBase = PEBASE - oh64.SectionAlignment = uint32(PESECTALIGN) - oh.SectionAlignment = uint32(PESECTALIGN) - oh64.FileAlignment = uint32(PEFILEALIGN) - oh.FileAlignment = uint32(PEFILEALIGN) - oh64.MajorOperatingSystemVersion = 4 - oh.MajorOperatingSystemVersion = 4 - oh64.MinorOperatingSystemVersion = 0 - oh.MinorOperatingSystemVersion = 0 - oh64.MajorImageVersion = 1 - oh.MajorImageVersion = 1 - oh64.MinorImageVersion = 0 - oh.MinorImageVersion = 0 - oh64.MajorSubsystemVersion = 4 - oh.MajorSubsystemVersion = 4 - oh64.MinorSubsystemVersion = 0 - oh.MinorSubsystemVersion = 0 - oh64.SizeOfImage = uint32(nextsectoff) - oh.SizeOfImage = uint32(nextsectoff) - oh64.SizeOfHeaders = uint32(PEFILEHEADR) - oh.SizeOfHeaders = uint32(PEFILEHEADR) - if windowsgui { - oh64.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_GUI - oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_GUI - } else { - oh64.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI - oh.Subsystem = IMAGE_SUBSYSTEM_WINDOWS_CUI - } - - // Disable stack growth as we don't want Windows to - // fiddle with the thread stack limits, which we set - // ourselves to circumvent the stack checks in the - // Windows exception dispatcher. - // Commit size must be strictly less than reserve - // size otherwise reserve will be rounded up to a - // larger size, as verified with VMMap. - - // On 64-bit, we always reserve 2MB stacks. "Pure" Go code is - // okay with much smaller stacks, but the syscall package - // makes it easy to call into arbitrary C code without cgo, - // and system calls even in "pure" Go code are actually C - // calls that may need more stack than we think. - // - // The default stack reserve size affects only the main - // thread, ctrlhandler thread, and profileloop thread. For - // these, it must be greater than the stack size assumed by - // externalthreadhandler. - // - // For other threads we specify stack size in runtime explicitly. - // For these, the reserve must match STACKSIZE in - // runtime/cgo/gcc_windows_{386,amd64}.c and the correspondent - // CreateThread parameter in runtime.newosproc. - oh64.SizeOfStackReserve = 0x00200000 - oh64.SizeOfStackCommit = 0x00200000 - 0x2000 // account for 2 guard pages - - // 32-bit is trickier since there much less address space to - // work with. Here we use large stacks only in cgo binaries as - // a compromise. - if !iscgo { - oh.SizeOfStackReserve = 0x00020000 - oh.SizeOfStackCommit = 0x00001000 - } else { - oh.SizeOfStackReserve = 0x00100000 - oh.SizeOfStackCommit = 0x00100000 - 0x2000 - } - - oh64.SizeOfHeapReserve = 0x00100000 - oh.SizeOfHeapReserve = 0x00100000 - oh64.SizeOfHeapCommit = 0x00001000 - oh.SizeOfHeapCommit = 0x00001000 - oh64.NumberOfRvaAndSizes = 16 - oh.NumberOfRvaAndSizes = 16 - - pewrite() + pewrite(ctxt) } diff --git a/src/cmd/link/internal/ld/sym.go b/src/cmd/link/internal/ld/sym.go index 6e239d79a53..6f019de8cc0 100644 --- a/src/cmd/link/internal/ld/sym.go +++ b/src/cmd/link/internal/ld/sym.go @@ -34,35 +34,37 @@ package ld import ( "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "log" ) func linknew(arch *sys.Arch) *Link { ctxt := &Link{ - Syms: &Symbols{ - hash: []map[string]*Symbol{ - // preallocate about 2mb for hash of - // non static symbols - make(map[string]*Symbol, 100000), - }, - Allsym: make([]*Symbol, 0, 100000), - }, + Syms: sym.NewSymbols(), + Out: &OutBuf{arch: arch}, Arch: arch, - LibraryByPkg: make(map[string]*Library), + LibraryByPkg: make(map[string]*sym.Library), } if objabi.GOARCH != arch.Name { log.Fatalf("invalid objabi.GOARCH %s (want %s)", objabi.GOARCH, arch.Name) } + AtExit(func() { + if nerrors > 0 && ctxt.Out.f != nil { + ctxt.Out.f.Close() + mayberemoveoutfile() + } + }) + return ctxt } // computeTLSOffset records the thread-local storage offset. func (ctxt *Link) computeTLSOffset() { - switch Headtype { + switch ctxt.HeadType { default: - log.Fatalf("unknown thread-local storage offset for %v", Headtype) + log.Fatalf("unknown thread-local storage offset for %v", ctxt.HeadType) case objabi.Hplan9, objabi.Hwindows: break diff --git a/src/cmd/link/internal/ld/symtab.go b/src/cmd/link/internal/ld/symtab.go index 78e9dc26bc0..bb8c1992ba8 100644 --- a/src/cmd/link/internal/ld/symtab.go +++ b/src/cmd/link/internal/ld/symtab.go @@ -33,6 +33,7 @@ package ld import ( "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "fmt" "path/filepath" "strings" @@ -52,31 +53,31 @@ func putelfstr(s string) int { return off } -func putelfsyment(off int, addr int64, size int64, info int, shndx int, other int) { +func putelfsyment(out *OutBuf, off int, addr int64, size int64, info int, shndx int, other int) { if elf64 { - Thearch.Lput(uint32(off)) - Cput(uint8(info)) - Cput(uint8(other)) - Thearch.Wput(uint16(shndx)) - Thearch.Vput(uint64(addr)) - Thearch.Vput(uint64(size)) + out.Write32(uint32(off)) + out.Write8(uint8(info)) + out.Write8(uint8(other)) + out.Write16(uint16(shndx)) + out.Write64(uint64(addr)) + out.Write64(uint64(size)) Symsize += ELF64SYMSIZE } else { - Thearch.Lput(uint32(off)) - Thearch.Lput(uint32(addr)) - Thearch.Lput(uint32(size)) - Cput(uint8(info)) - Cput(uint8(other)) - Thearch.Wput(uint16(shndx)) + out.Write32(uint32(off)) + out.Write32(uint32(addr)) + out.Write32(uint32(size)) + out.Write8(uint8(info)) + out.Write8(uint8(other)) + out.Write16(uint16(shndx)) Symsize += ELF32SYMSIZE } } -var numelfsym int = 1 // 0 is reserved +var numelfsym = 1 // 0 is reserved var elfbind int -func putelfsym(ctxt *Link, x *Symbol, s string, t SymbolType, addr int64, go_ *Symbol) { +func putelfsym(ctxt *Link, x *sym.Symbol, s string, t SymbolType, addr int64, go_ *sym.Symbol) { var typ int switch t { @@ -109,7 +110,7 @@ func putelfsym(ctxt *Link, x *Symbol, s string, t SymbolType, addr int64, go_ *S } var elfshnum int - if xo.Type == SDYNIMPORT || xo.Type == SHOSTOBJ { + if xo.Type == sym.SDYNIMPORT || xo.Type == sym.SHOSTOBJ { elfshnum = SHN_UNDEF } else { if xo.Sect == nil { @@ -120,14 +121,14 @@ func putelfsym(ctxt *Link, x *Symbol, s string, t SymbolType, addr int64, go_ *S Errorf(x, "missing ELF section in putelfsym") return } - elfshnum = xo.Sect.Elfsect.shnum + elfshnum = xo.Sect.Elfsect.(*ElfShdr).shnum } // One pass for each binding: STB_LOCAL, STB_GLOBAL, // maybe one day STB_WEAK. bind := STB_GLOBAL - if x.Version != 0 || (x.Type&SHIDDEN != 0) || x.Attr.Local() { + if x.Version != 0 || x.Attr.VisibilityHidden() || x.Attr.Local() { bind = STB_LOCAL } @@ -136,18 +137,23 @@ func putelfsym(ctxt *Link, x *Symbol, s string, t SymbolType, addr int64, go_ *S // To avoid filling the dynamic table with lots of unnecessary symbols, // mark all Go symbols local (not global) in the final executable. // But when we're dynamically linking, we need all those global symbols. - if !ctxt.DynlinkingGo() && Linkmode == LinkExternal && !x.Attr.CgoExportStatic() && elfshnum != SHN_UNDEF { + if !ctxt.DynlinkingGo() && ctxt.LinkMode == LinkExternal && !x.Attr.CgoExportStatic() && elfshnum != SHN_UNDEF { bind = STB_LOCAL } - if Linkmode == LinkExternal && elfshnum != SHN_UNDEF { + if ctxt.LinkMode == LinkExternal && elfshnum != SHN_UNDEF { addr -= int64(xo.Sect.Vaddr) } other := STV_DEFAULT - if x.Type&SHIDDEN != 0 { + if x.Attr.VisibilityHidden() { + // TODO(mwhudson): We only set AttrVisibilityHidden in ldelf, i.e. when + // internally linking. But STV_HIDDEN visibility only matters in object + // files and shared libraries, and as we are a long way from implementing + // internal linking for shared libraries and only create object files when + // externally linking, I don't think this makes a lot of sense. other = STV_HIDDEN } - if SysArch.Family == sys.PPC64 && typ == STT_FUNC && x.Attr.Shared() && x.Name != "runtime.duffzero" && x.Name != "runtime.duffcopy" { + if ctxt.Arch.Family == sys.PPC64 && typ == STT_FUNC && x.Attr.Shared() && x.Name != "runtime.duffzero" && x.Name != "runtime.duffcopy" { // On ppc64 the top three bits of the st_other field indicate how // many instructions separate the global and local entry points. In // our case it is two instructions, indicated by the value 3. @@ -165,16 +171,16 @@ func putelfsym(ctxt *Link, x *Symbol, s string, t SymbolType, addr int64, go_ *S s = strings.Replace(s, "·", ".", -1) } - if ctxt.DynlinkingGo() && bind == STB_GLOBAL && elfbind == STB_LOCAL && x.Type == STEXT { + if ctxt.DynlinkingGo() && bind == STB_GLOBAL && elfbind == STB_LOCAL && x.Type == sym.STEXT { // When dynamically linking, we want references to functions defined // in this module to always be to the function object, not to the // PLT. We force this by writing an additional local symbol for every // global function symbol and making all relocations against the // global symbol refer to this local symbol instead (see - // (*Symbol).ElfsymForReloc). This is approximately equivalent to the + // (*sym.Symbol).ElfsymForReloc). This is approximately equivalent to the // ELF linker -Bsymbolic-functions option, but that is buggy on // several platforms. - putelfsyment(putelfstr("local."+s), addr, size, STB_LOCAL<<4|typ&0xf, elfshnum, other) + putelfsyment(ctxt.Out, putelfstr("local."+s), addr, size, STB_LOCAL<<4|typ&0xf, elfshnum, other) x.LocalElfsym = int32(numelfsym) numelfsym++ return @@ -182,20 +188,20 @@ func putelfsym(ctxt *Link, x *Symbol, s string, t SymbolType, addr int64, go_ *S return } - putelfsyment(putelfstr(s), addr, size, bind<<4|typ&0xf, elfshnum, other) + putelfsyment(ctxt.Out, putelfstr(s), addr, size, bind<<4|typ&0xf, elfshnum, other) x.Elfsym = int32(numelfsym) numelfsym++ } -func putelfsectionsym(s *Symbol, shndx int) { - putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_SECTION, shndx, 0) +func putelfsectionsym(out *OutBuf, s *sym.Symbol, shndx int) { + putelfsyment(out, 0, 0, 0, STB_LOCAL<<4|STT_SECTION, shndx, 0) s.Elfsym = int32(numelfsym) numelfsym++ } func Asmelfsym(ctxt *Link) { // the first symbol entry is reserved - putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0) + putelfsyment(ctxt.Out, 0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0) dwarfaddelfsectionsyms(ctxt) @@ -203,7 +209,7 @@ func Asmelfsym(ctxt *Link) { // Avoid having the working directory inserted into the symbol table. // It is added with a name to avoid problems with external linking // encountered on some versions of Solaris. See issue #14957. - putelfsyment(putelfstr("go.go"), 0, 0, STB_LOCAL<<4|STT_FILE, SHN_ABS, 0) + putelfsyment(ctxt.Out, putelfstr("go.go"), 0, 0, STB_LOCAL<<4|STT_FILE, SHN_ABS, 0) numelfsym++ elfbind = STB_LOCAL @@ -214,7 +220,7 @@ func Asmelfsym(ctxt *Link) { genasmsym(ctxt, putelfsym) } -func putplan9sym(ctxt *Link, x *Symbol, s string, typ SymbolType, addr int64, go_ *Symbol) { +func putplan9sym(ctxt *Link, x *sym.Symbol, s string, typ SymbolType, addr int64, go_ *sym.Symbol) { t := int(typ) switch typ { case TextSym, DataSym, BSSSym: @@ -223,28 +229,20 @@ func putplan9sym(ctxt *Link, x *Symbol, s string, typ SymbolType, addr int64, go } fallthrough - case AutoSym, ParamSym, FileSym, FrameSym: + case AutoSym, ParamSym, FrameSym: l := 4 - if Headtype == objabi.Hplan9 && SysArch.Family == sys.AMD64 && !Flag8 { - Lputb(uint32(addr >> 32)) + if ctxt.HeadType == objabi.Hplan9 && ctxt.Arch.Family == sys.AMD64 && !Flag8 { + ctxt.Out.Write32b(uint32(addr >> 32)) l = 8 } - Lputb(uint32(addr)) - Cput(uint8(t + 0x80)) /* 0x80 is variable length */ + ctxt.Out.Write32b(uint32(addr)) + ctxt.Out.Write8(uint8(t + 0x80)) /* 0x80 is variable length */ - var i int + ctxt.Out.WriteString(s) + ctxt.Out.Write8(0) - /* skip the '<' in filenames */ - if t == FileSym { - s = s[1:] - } - for i = 0; i < len(s); i++ { - Cput(s[i]) - } - Cput(0) - - Symsize += int32(l) + 1 + int32(i) + 1 + Symsize += int32(l) + 1 + int32(len(s)) + 1 default: return @@ -255,43 +253,9 @@ func Asmplan9sym(ctxt *Link) { genasmsym(ctxt, putplan9sym) } -var symt *Symbol +var symt *sym.Symbol -var encbuf [10]byte - -func Wputb(w uint16) { Cwrite(Append16b(encbuf[:0], w)) } -func Lputb(l uint32) { Cwrite(Append32b(encbuf[:0], l)) } -func Vputb(v uint64) { Cwrite(Append64b(encbuf[:0], v)) } - -func Wputl(w uint16) { Cwrite(Append16l(encbuf[:0], w)) } -func Lputl(l uint32) { Cwrite(Append32l(encbuf[:0], l)) } -func Vputl(v uint64) { Cwrite(Append64l(encbuf[:0], v)) } - -func Append16b(b []byte, v uint16) []byte { - return append(b, uint8(v>>8), uint8(v)) -} -func Append16l(b []byte, v uint16) []byte { - return append(b, uint8(v), uint8(v>>8)) -} - -func Append32b(b []byte, v uint32) []byte { - return append(b, uint8(v>>24), uint8(v>>16), uint8(v>>8), uint8(v)) -} -func Append32l(b []byte, v uint32) []byte { - return append(b, uint8(v), uint8(v>>8), uint8(v>>16), uint8(v>>24)) -} - -func Append64b(b []byte, v uint64) []byte { - return append(b, uint8(v>>56), uint8(v>>48), uint8(v>>40), uint8(v>>32), - uint8(v>>24), uint8(v>>16), uint8(v>>8), uint8(v)) -} - -func Append64l(b []byte, v uint64) []byte { - return append(b, uint8(v), uint8(v>>8), uint8(v>>16), uint8(v>>24), - uint8(v>>32), uint8(v>>40), uint8(v>>48), uint8(v>>56)) -} - -type byPkg []*Library +type byPkg []*sym.Library func (libs byPkg) Len() int { return len(libs) @@ -310,8 +274,8 @@ func (libs byPkg) Swap(a, b int) { func textsectionmap(ctxt *Link) uint32 { t := ctxt.Syms.Lookup("runtime.textsectionmap", 0) - t.Type = SRODATA - t.Attr |= AttrReachable + t.Type = sym.SRODATA + t.Attr |= sym.AttrReachable nsections := int64(0) for _, sect := range Segtext.Sections { @@ -321,7 +285,7 @@ func textsectionmap(ctxt *Link) uint32 { break } } - Symgrow(t, 3*nsections*int64(SysArch.PtrSize)) + t.Grow(3 * nsections * int64(ctxt.Arch.PtrSize)) off := int64(0) n := 0 @@ -340,21 +304,21 @@ func textsectionmap(ctxt *Link) uint32 { if sect.Name != ".text" { break } - off = setuint(ctxt, t, off, sect.Vaddr-textbase) - off = setuint(ctxt, t, off, sect.Length) + off = t.SetUint(ctxt.Arch, off, sect.Vaddr-textbase) + off = t.SetUint(ctxt.Arch, off, sect.Length) if n == 0 { s := ctxt.Syms.ROLookup("runtime.text", 0) if s == nil { Errorf(nil, "Unable to find symbol runtime.text\n") } - off = setaddr(ctxt, t, off, s) + off = t.SetAddr(ctxt.Arch, off, s) } else { s := ctxt.Syms.Lookup(fmt.Sprintf("runtime.text.%d", n), 0) if s == nil { Errorf(nil, "Unable to find symbol runtime.text.%d\n", n) } - off = setaddr(ctxt, t, off, s) + off = t.SetAddr(ctxt.Arch, off, s) } n++ } @@ -366,98 +330,98 @@ func (ctxt *Link) symtab() { // Define these so that they'll get put into the symbol table. // data.c:/^address will provide the actual values. - ctxt.xdefine("runtime.text", STEXT, 0) + ctxt.xdefine("runtime.text", sym.STEXT, 0) - ctxt.xdefine("runtime.etext", STEXT, 0) - ctxt.xdefine("runtime.itablink", SRODATA, 0) - ctxt.xdefine("runtime.eitablink", SRODATA, 0) - ctxt.xdefine("runtime.rodata", SRODATA, 0) - ctxt.xdefine("runtime.erodata", SRODATA, 0) - ctxt.xdefine("runtime.types", SRODATA, 0) - ctxt.xdefine("runtime.etypes", SRODATA, 0) - ctxt.xdefine("runtime.noptrdata", SNOPTRDATA, 0) - ctxt.xdefine("runtime.enoptrdata", SNOPTRDATA, 0) - ctxt.xdefine("runtime.data", SDATA, 0) - ctxt.xdefine("runtime.edata", SDATA, 0) - ctxt.xdefine("runtime.bss", SBSS, 0) - ctxt.xdefine("runtime.ebss", SBSS, 0) - ctxt.xdefine("runtime.noptrbss", SNOPTRBSS, 0) - ctxt.xdefine("runtime.enoptrbss", SNOPTRBSS, 0) - ctxt.xdefine("runtime.end", SBSS, 0) - ctxt.xdefine("runtime.epclntab", SRODATA, 0) - ctxt.xdefine("runtime.esymtab", SRODATA, 0) + ctxt.xdefine("runtime.etext", sym.STEXT, 0) + ctxt.xdefine("runtime.itablink", sym.SRODATA, 0) + ctxt.xdefine("runtime.eitablink", sym.SRODATA, 0) + ctxt.xdefine("runtime.rodata", sym.SRODATA, 0) + ctxt.xdefine("runtime.erodata", sym.SRODATA, 0) + ctxt.xdefine("runtime.types", sym.SRODATA, 0) + ctxt.xdefine("runtime.etypes", sym.SRODATA, 0) + ctxt.xdefine("runtime.noptrdata", sym.SNOPTRDATA, 0) + ctxt.xdefine("runtime.enoptrdata", sym.SNOPTRDATA, 0) + ctxt.xdefine("runtime.data", sym.SDATA, 0) + ctxt.xdefine("runtime.edata", sym.SDATA, 0) + ctxt.xdefine("runtime.bss", sym.SBSS, 0) + ctxt.xdefine("runtime.ebss", sym.SBSS, 0) + ctxt.xdefine("runtime.noptrbss", sym.SNOPTRBSS, 0) + ctxt.xdefine("runtime.enoptrbss", sym.SNOPTRBSS, 0) + ctxt.xdefine("runtime.end", sym.SBSS, 0) + ctxt.xdefine("runtime.epclntab", sym.SRODATA, 0) + ctxt.xdefine("runtime.esymtab", sym.SRODATA, 0) // garbage collection symbols s := ctxt.Syms.Lookup("runtime.gcdata", 0) - s.Type = SRODATA + s.Type = sym.SRODATA s.Size = 0 - s.Attr |= AttrReachable - ctxt.xdefine("runtime.egcdata", SRODATA, 0) + s.Attr |= sym.AttrReachable + ctxt.xdefine("runtime.egcdata", sym.SRODATA, 0) s = ctxt.Syms.Lookup("runtime.gcbss", 0) - s.Type = SRODATA + s.Type = sym.SRODATA s.Size = 0 - s.Attr |= AttrReachable - ctxt.xdefine("runtime.egcbss", SRODATA, 0) + s.Attr |= sym.AttrReachable + ctxt.xdefine("runtime.egcbss", sym.SRODATA, 0) // pseudo-symbols to mark locations of type, string, and go string data. - var symtype *Symbol - var symtyperel *Symbol - if UseRelro() && (Buildmode == BuildmodeCArchive || Buildmode == BuildmodeCShared || Buildmode == BuildmodePIE) { + var symtype *sym.Symbol + var symtyperel *sym.Symbol + if ctxt.UseRelro() && (ctxt.BuildMode == BuildModeCArchive || ctxt.BuildMode == BuildModeCShared || ctxt.BuildMode == BuildModePIE) { s = ctxt.Syms.Lookup("type.*", 0) - s.Type = STYPE + s.Type = sym.STYPE s.Size = 0 - s.Attr |= AttrReachable + s.Attr |= sym.AttrReachable symtype = s s = ctxt.Syms.Lookup("typerel.*", 0) - s.Type = STYPERELRO + s.Type = sym.STYPERELRO s.Size = 0 - s.Attr |= AttrReachable + s.Attr |= sym.AttrReachable symtyperel = s } else if !ctxt.DynlinkingGo() { s = ctxt.Syms.Lookup("type.*", 0) - s.Type = STYPE + s.Type = sym.STYPE s.Size = 0 - s.Attr |= AttrReachable + s.Attr |= sym.AttrReachable symtype = s symtyperel = s } - groupSym := func(name string, t SymKind) *Symbol { + groupSym := func(name string, t sym.SymKind) *sym.Symbol { s := ctxt.Syms.Lookup(name, 0) s.Type = t s.Size = 0 - s.Attr |= AttrLocal | AttrReachable + s.Attr |= sym.AttrLocal | sym.AttrReachable return s } var ( - symgostring = groupSym("go.string.*", SGOSTRING) - symgofunc = groupSym("go.func.*", SGOFUNC) - symgcbits = groupSym("runtime.gcbits.*", SGCBITS) + symgostring = groupSym("go.string.*", sym.SGOSTRING) + symgofunc = groupSym("go.func.*", sym.SGOFUNC) + symgcbits = groupSym("runtime.gcbits.*", sym.SGCBITS) ) - var symgofuncrel *Symbol + var symgofuncrel *sym.Symbol if !ctxt.DynlinkingGo() { - if UseRelro() { - symgofuncrel = groupSym("go.funcrel.*", SGOFUNCRELRO) + if ctxt.UseRelro() { + symgofuncrel = groupSym("go.funcrel.*", sym.SGOFUNCRELRO) } else { symgofuncrel = symgofunc } } symitablink := ctxt.Syms.Lookup("runtime.itablink", 0) - symitablink.Type = SITABLINK + symitablink.Type = sym.SITABLINK symt = ctxt.Syms.Lookup("runtime.symtab", 0) - symt.Attr |= AttrLocal - symt.Type = SSYMTAB + symt.Attr |= sym.AttrLocal + symt.Type = sym.SSYMTAB symt.Size = 0 - symt.Attr |= AttrReachable + symt.Attr |= sym.AttrReachable nitablinks := 0 @@ -466,53 +430,53 @@ func (ctxt *Link) symtab() { // just defined above will be first. // hide the specific symbols. for _, s := range ctxt.Syms.Allsym { - if !s.Attr.Reachable() || s.Attr.Special() || s.Type != SRODATA { + if !s.Attr.Reachable() || s.Attr.Special() || s.Type != sym.SRODATA { continue } switch { case strings.HasPrefix(s.Name, "type."): if !ctxt.DynlinkingGo() { - s.Attr |= AttrNotInSymbolTable + s.Attr |= sym.AttrNotInSymbolTable } - if UseRelro() { - s.Type = STYPERELRO + if ctxt.UseRelro() { + s.Type = sym.STYPERELRO s.Outer = symtyperel } else { - s.Type = STYPE + s.Type = sym.STYPE s.Outer = symtype } - case strings.HasPrefix(s.Name, "go.importpath.") && UseRelro(): + case strings.HasPrefix(s.Name, "go.importpath.") && ctxt.UseRelro(): // Keep go.importpath symbols in the same section as types and // names, as they can be referred to by a section offset. - s.Type = STYPERELRO + s.Type = sym.STYPERELRO case strings.HasPrefix(s.Name, "go.itablink."): nitablinks++ - s.Type = SITABLINK - s.Attr |= AttrNotInSymbolTable + s.Type = sym.SITABLINK + s.Attr |= sym.AttrNotInSymbolTable s.Outer = symitablink case strings.HasPrefix(s.Name, "go.string."): - s.Type = SGOSTRING - s.Attr |= AttrNotInSymbolTable + s.Type = sym.SGOSTRING + s.Attr |= sym.AttrNotInSymbolTable s.Outer = symgostring case strings.HasPrefix(s.Name, "runtime.gcbits."): - s.Type = SGCBITS - s.Attr |= AttrNotInSymbolTable + s.Type = sym.SGCBITS + s.Attr |= sym.AttrNotInSymbolTable s.Outer = symgcbits case strings.HasSuffix(s.Name, "·f"): if !ctxt.DynlinkingGo() { - s.Attr |= AttrNotInSymbolTable + s.Attr |= sym.AttrNotInSymbolTable } - if UseRelro() { - s.Type = SGOFUNCRELRO + if ctxt.UseRelro() { + s.Type = sym.SGOFUNCRELRO s.Outer = symgofuncrel } else { - s.Type = SGOFUNC + s.Type = sym.SGOFUNC s.Outer = symgofunc } @@ -520,34 +484,34 @@ func (ctxt *Link) symtab() { strings.HasPrefix(s.Name, "gclocals."), strings.HasPrefix(s.Name, "gclocals·"), strings.HasPrefix(s.Name, "inltree."): - s.Type = SGOFUNC - s.Attr |= AttrNotInSymbolTable + s.Type = sym.SGOFUNC + s.Attr |= sym.AttrNotInSymbolTable s.Outer = symgofunc s.Align = 4 liveness += (s.Size + int64(s.Align) - 1) &^ (int64(s.Align) - 1) } } - if Buildmode == BuildmodeShared { + if ctxt.BuildMode == BuildModeShared { abihashgostr := ctxt.Syms.Lookup("go.link.abihash."+filepath.Base(*flagOutfile), 0) - abihashgostr.Attr |= AttrReachable - abihashgostr.Type = SRODATA + abihashgostr.Attr |= sym.AttrReachable + abihashgostr.Type = sym.SRODATA hashsym := ctxt.Syms.Lookup("go.link.abihashbytes", 0) - Addaddr(ctxt, abihashgostr, hashsym) - adduint(ctxt, abihashgostr, uint64(hashsym.Size)) + abihashgostr.AddAddr(ctxt.Arch, hashsym) + abihashgostr.AddUint(ctxt.Arch, uint64(hashsym.Size)) } - if Buildmode == BuildmodePlugin || ctxt.Syms.ROLookup("plugin.Open", 0) != nil { + if ctxt.BuildMode == BuildModePlugin || ctxt.Syms.ROLookup("plugin.Open", 0) != nil { for _, l := range ctxt.Library { s := ctxt.Syms.Lookup("go.link.pkghashbytes."+l.Pkg, 0) - s.Attr |= AttrReachable - s.Type = SRODATA - s.Size = int64(len(l.hash)) - s.P = []byte(l.hash) + s.Attr |= sym.AttrReachable + s.Type = sym.SRODATA + s.Size = int64(len(l.Hash)) + s.P = []byte(l.Hash) str := ctxt.Syms.Lookup("go.link.pkghash."+l.Pkg, 0) - str.Attr |= AttrReachable - str.Type = SRODATA - Addaddr(ctxt, str, s) - adduint(ctxt, str, uint64(len(l.hash))) + str.Attr |= sym.AttrReachable + str.Type = sym.SRODATA + str.AddAddr(ctxt.Arch, s) + str.AddUint(ctxt.Arch, uint64(len(l.Hash))) } } @@ -559,99 +523,99 @@ func (ctxt *Link) symtab() { // This code uses several global variables that are set by pcln.go:pclntab. moduledata := ctxt.Moduledata // The pclntab slice - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.pclntab", 0)) - adduint(ctxt, moduledata, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size)) - adduint(ctxt, moduledata, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0)) + moduledata.AddUint(ctxt.Arch, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size)) + moduledata.AddUint(ctxt.Arch, uint64(ctxt.Syms.Lookup("runtime.pclntab", 0).Size)) // The ftab slice - Addaddrplus(ctxt, moduledata, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabPclntabOffset)) - adduint(ctxt, moduledata, uint64(pclntabNfunc+1)) - adduint(ctxt, moduledata, uint64(pclntabNfunc+1)) + moduledata.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabPclntabOffset)) + moduledata.AddUint(ctxt.Arch, uint64(pclntabNfunc+1)) + moduledata.AddUint(ctxt.Arch, uint64(pclntabNfunc+1)) // The filetab slice - Addaddrplus(ctxt, moduledata, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabFiletabOffset)) - adduint(ctxt, moduledata, uint64(len(ctxt.Filesyms))+1) - adduint(ctxt, moduledata, uint64(len(ctxt.Filesyms))+1) + moduledata.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup("runtime.pclntab", 0), int64(pclntabFiletabOffset)) + moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Filesyms))+1) + moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Filesyms))+1) // findfunctab - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.findfunctab", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.findfunctab", 0)) // minpc, maxpc - Addaddr(ctxt, moduledata, pclntabFirstFunc) - Addaddrplus(ctxt, moduledata, pclntabLastFunc, pclntabLastFunc.Size) + moduledata.AddAddr(ctxt.Arch, pclntabFirstFunc) + moduledata.AddAddrPlus(ctxt.Arch, pclntabLastFunc, pclntabLastFunc.Size) // pointers to specific parts of the module - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.text", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.etext", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.noptrdata", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.enoptrdata", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.data", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.edata", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.bss", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.ebss", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.noptrbss", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.enoptrbss", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.end", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.gcdata", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.gcbss", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.types", 0)) - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.etypes", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.text", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.etext", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.noptrdata", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.enoptrdata", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.data", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.edata", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.bss", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.ebss", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.noptrbss", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.enoptrbss", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.end", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.gcdata", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.gcbss", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.types", 0)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.etypes", 0)) // text section information - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.textsectionmap", 0)) - adduint(ctxt, moduledata, uint64(nsections)) - adduint(ctxt, moduledata, uint64(nsections)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.textsectionmap", 0)) + moduledata.AddUint(ctxt.Arch, uint64(nsections)) + moduledata.AddUint(ctxt.Arch, uint64(nsections)) // The typelinks slice typelinkSym := ctxt.Syms.Lookup("runtime.typelink", 0) ntypelinks := uint64(typelinkSym.Size) / 4 - Addaddr(ctxt, moduledata, typelinkSym) - adduint(ctxt, moduledata, ntypelinks) - adduint(ctxt, moduledata, ntypelinks) + moduledata.AddAddr(ctxt.Arch, typelinkSym) + moduledata.AddUint(ctxt.Arch, ntypelinks) + moduledata.AddUint(ctxt.Arch, ntypelinks) // The itablinks slice - Addaddr(ctxt, moduledata, ctxt.Syms.Lookup("runtime.itablink", 0)) - adduint(ctxt, moduledata, uint64(nitablinks)) - adduint(ctxt, moduledata, uint64(nitablinks)) + moduledata.AddAddr(ctxt.Arch, ctxt.Syms.Lookup("runtime.itablink", 0)) + moduledata.AddUint(ctxt.Arch, uint64(nitablinks)) + moduledata.AddUint(ctxt.Arch, uint64(nitablinks)) // The ptab slice if ptab := ctxt.Syms.ROLookup("go.plugin.tabs", 0); ptab != nil && ptab.Attr.Reachable() { - ptab.Attr |= AttrLocal - ptab.Type = SRODATA + ptab.Attr |= sym.AttrLocal + ptab.Type = sym.SRODATA nentries := uint64(len(ptab.P) / 8) // sizeof(nameOff) + sizeof(typeOff) - Addaddr(ctxt, moduledata, ptab) - adduint(ctxt, moduledata, nentries) - adduint(ctxt, moduledata, nentries) + moduledata.AddAddr(ctxt.Arch, ptab) + moduledata.AddUint(ctxt.Arch, nentries) + moduledata.AddUint(ctxt.Arch, nentries) } else { - adduint(ctxt, moduledata, 0) - adduint(ctxt, moduledata, 0) - adduint(ctxt, moduledata, 0) + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) } - if Buildmode == BuildmodePlugin { - addgostring(ctxt, moduledata, "go.link.thispluginpath", *flagPluginPath) + if ctxt.BuildMode == BuildModePlugin { + addgostring(ctxt, moduledata, "go.link.thispluginpath", objabi.PathToPrefix(*flagPluginPath)) pkghashes := ctxt.Syms.Lookup("go.link.pkghashes", 0) - pkghashes.Attr |= AttrReachable - pkghashes.Attr |= AttrLocal - pkghashes.Type = SRODATA + pkghashes.Attr |= sym.AttrReachable + pkghashes.Attr |= sym.AttrLocal + pkghashes.Type = sym.SRODATA for i, l := range ctxt.Library { // pkghashes[i].name addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkgname.%d", i), l.Pkg) // pkghashes[i].linktimehash - addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkglinkhash.%d", i), string(l.hash)) + addgostring(ctxt, pkghashes, fmt.Sprintf("go.link.pkglinkhash.%d", i), l.Hash) // pkghashes[i].runtimehash hash := ctxt.Syms.ROLookup("go.link.pkghash."+l.Pkg, 0) - Addaddr(ctxt, pkghashes, hash) + pkghashes.AddAddr(ctxt.Arch, hash) } - Addaddr(ctxt, moduledata, pkghashes) - adduint(ctxt, moduledata, uint64(len(ctxt.Library))) - adduint(ctxt, moduledata, uint64(len(ctxt.Library))) + moduledata.AddAddr(ctxt.Arch, pkghashes) + moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Library))) + moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Library))) } else { - adduint(ctxt, moduledata, 0) // pluginpath - adduint(ctxt, moduledata, 0) - adduint(ctxt, moduledata, 0) // pkghashes slice - adduint(ctxt, moduledata, 0) - adduint(ctxt, moduledata, 0) + moduledata.AddUint(ctxt.Arch, 0) // pluginpath + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) // pkghashes slice + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) } if len(ctxt.Shlibs) > 0 { thismodulename := filepath.Base(*flagOutfile) - switch Buildmode { - case BuildmodeExe, BuildmodePIE: + switch ctxt.BuildMode { + case BuildModeExe, BuildModePIE: // When linking an executable, outfile is just "a.out". Make // it something slightly more comprehensible. thismodulename = "the executable" @@ -659,9 +623,9 @@ func (ctxt *Link) symtab() { addgostring(ctxt, moduledata, "go.link.thismodulename", thismodulename) modulehashes := ctxt.Syms.Lookup("go.link.abihashes", 0) - modulehashes.Attr |= AttrReachable - modulehashes.Attr |= AttrLocal - modulehashes.Type = SRODATA + modulehashes.Attr |= sym.AttrReachable + modulehashes.Attr |= sym.AttrLocal + modulehashes.Type = sym.SRODATA for i, shlib := range ctxt.Shlibs { // modulehashes[i].modulename @@ -673,13 +637,26 @@ func (ctxt *Link) symtab() { // modulehashes[i].runtimehash abihash := ctxt.Syms.Lookup("go.link.abihash."+modulename, 0) - abihash.Attr |= AttrReachable - Addaddr(ctxt, modulehashes, abihash) + abihash.Attr |= sym.AttrReachable + modulehashes.AddAddr(ctxt.Arch, abihash) } - Addaddr(ctxt, moduledata, modulehashes) - adduint(ctxt, moduledata, uint64(len(ctxt.Shlibs))) - adduint(ctxt, moduledata, uint64(len(ctxt.Shlibs))) + moduledata.AddAddr(ctxt.Arch, modulehashes) + moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Shlibs))) + moduledata.AddUint(ctxt.Arch, uint64(len(ctxt.Shlibs))) + } else { + moduledata.AddUint(ctxt.Arch, 0) // modulename + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) // moduleshashes slice + moduledata.AddUint(ctxt.Arch, 0) + moduledata.AddUint(ctxt.Arch, 0) + } + + hasmain := ctxt.BuildMode == BuildModeExe || ctxt.BuildMode == BuildModePIE + if hasmain { + moduledata.AddUint8(1) + } else { + moduledata.AddUint8(0) } // The rest of moduledata is zero initialized. @@ -688,12 +665,12 @@ func (ctxt *Link) symtab() { // compiler-provided size, so read it from the type data. moduledatatype := ctxt.Syms.ROLookup("type.runtime.moduledata", 0) moduledata.Size = decodetypeSize(ctxt.Arch, moduledatatype) - Symgrow(moduledata, moduledata.Size) + moduledata.Grow(moduledata.Size) lastmoduledatap := ctxt.Syms.Lookup("runtime.lastmoduledatap", 0) - if lastmoduledatap.Type != SDYNIMPORT { - lastmoduledatap.Type = SNOPTRDATA + if lastmoduledatap.Type != sym.SDYNIMPORT { + lastmoduledatap.Type = sym.SNOPTRDATA lastmoduledatap.Size = 0 // overwrite existing value - Addaddr(ctxt, lastmoduledatap, moduledata) + lastmoduledatap.AddAddr(ctxt.Arch, moduledata) } } diff --git a/src/cmd/link/internal/ld/typelink.go b/src/cmd/link/internal/ld/typelink.go index a3badb3b4fc..6b5ab080f3e 100644 --- a/src/cmd/link/internal/ld/typelink.go +++ b/src/cmd/link/internal/ld/typelink.go @@ -6,6 +6,7 @@ package ld import ( "cmd/internal/objabi" + "cmd/link/internal/sym" "sort" ) @@ -13,7 +14,7 @@ type byTypeStr []typelinkSortKey type typelinkSortKey struct { TypeStr string - Type *Symbol + Type *sym.Symbol } func (s byTypeStr) Less(i, j int) bool { return s[i].TypeStr < s[j].TypeStr } @@ -27,17 +28,17 @@ func (ctxt *Link) typelink() { typelinks := byTypeStr{} for _, s := range ctxt.Syms.Allsym { if s.Attr.Reachable() && s.Attr.MakeTypelink() { - typelinks = append(typelinks, typelinkSortKey{decodetypeStr(s), s}) + typelinks = append(typelinks, typelinkSortKey{decodetypeStr(ctxt.Arch, s), s}) } } sort.Sort(typelinks) tl := ctxt.Syms.Lookup("runtime.typelink", 0) - tl.Type = STYPELINK - tl.Attr |= AttrReachable | AttrLocal + tl.Type = sym.STYPELINK + tl.Attr |= sym.AttrReachable | sym.AttrLocal tl.Size = int64(4 * len(typelinks)) tl.P = make([]byte, tl.Size) - tl.R = make([]Reloc, len(typelinks)) + tl.R = make([]sym.Reloc, len(typelinks)) for i, s := range typelinks { r := &tl.R[i] r.Sym = s.Type diff --git a/src/cmd/link/internal/ld/util.go b/src/cmd/link/internal/ld/util.go index 4b726367e8e..9b75dfa1f63 100644 --- a/src/cmd/link/internal/ld/util.go +++ b/src/cmd/link/internal/ld/util.go @@ -5,7 +5,7 @@ package ld import ( - "bytes" + "cmd/link/internal/sym" "encoding/binary" "fmt" "os" @@ -23,14 +23,6 @@ func Cputime() float64 { return time.Since(startTime).Seconds() } -func cstring(x []byte) string { - i := bytes.IndexByte(x, '\x00') - if i >= 0 { - x = x[:i] - } - return string(x) -} - func tokenize(s string) []string { var f []string for { @@ -92,10 +84,7 @@ func Exit(code int) { // Exitf logs an error message then calls Exit(2). func Exitf(format string, a ...interface{}) { fmt.Fprintf(os.Stderr, os.Args[0]+": "+format+"\n", a...) - if coutbuf.f != nil { - coutbuf.f.Close() - mayberemoveoutfile() - } + nerrors++ Exit(2) } @@ -105,7 +94,7 @@ func Exitf(format string, a ...interface{}) { // // Logging an error means that on exit cmd/link will delete any // output file and return a non-zero error code. -func Errorf(s *Symbol, format string, args ...interface{}) { +func Errorf(s *sym.Symbol, format string, args ...interface{}) { if s != nil { format = s.Name + ": " + format } diff --git a/src/cmd/link/internal/ld/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go similarity index 63% rename from src/cmd/link/internal/ld/ldelf.go rename to src/cmd/link/internal/loadelf/ldelf.go index 340d9b22533..793fd961d12 100644 --- a/src/cmd/link/internal/ld/ldelf.go +++ b/src/cmd/link/internal/loadelf/ldelf.go @@ -1,10 +1,17 @@ -package ld +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loadelf implements an ELF file reader. +package loadelf import ( "bytes" "cmd/internal/bio" "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" + "debug/elf" "encoding/binary" "fmt" "io" @@ -169,6 +176,14 @@ const ( ElfNotePrXfpreg = 0x46e62b7f ) +// TODO(crawshaw): de-duplicate with cmd/link/internal/ld/elf.go. +const ( + ELF64SYMSIZE = 24 + ELF32SYMSIZE = 16 + + SHT_ARM_ATTRIBUTES = 0x70000003 +) + type ElfHdrBytes struct { Ident [16]uint8 Type [2]uint8 @@ -266,7 +281,7 @@ type ElfSect struct { align uint64 entsize uint64 base []byte - sym *Symbol + sym *sym.Symbol } type ElfObj struct { @@ -278,7 +293,6 @@ type ElfObj struct { e binary.ByteOrder sect []ElfSect nsect uint - shstrtab string nsymtab int symtab *ElfSect symstr *ElfSect @@ -305,7 +319,7 @@ type ElfSym struct { type_ uint8 other uint8 shndx uint16 - sym *Symbol + sym *sym.Symbol } var ElfMagic = [4]uint8{0x7F, 'E', 'L', 'F'} @@ -391,15 +405,13 @@ func (a *elfAttributeList) done() bool { // find the one we are looking for. This format is slightly documented in "ELF // for the ARM Architecture" but mostly this is derived from reading the source // to gold and readelf. -func parseArmAttributes(ctxt *Link, e binary.ByteOrder, data []byte) { +func parseArmAttributes(e binary.ByteOrder, initEhdrFlags uint32, data []byte) (ehdrFlags uint32, err error) { // We assume the soft-float ABI unless we see a tag indicating otherwise. - if ehdr.flags == 0x5000002 { - ehdr.flags = 0x5000202 + if initEhdrFlags == 0x5000002 { + ehdrFlags = 0x5000202 } if data[0] != 'A' { - // TODO(dfc) should this be ctxt.Diag ? - ctxt.Logf(".ARM.attributes has unexpected format %c\n", data[0]) - return + return 0, fmt.Errorf(".ARM.attributes has unexpected format %c\n", data[0]) } data = data[1:] for len(data) != 0 { @@ -409,9 +421,7 @@ func parseArmAttributes(ctxt *Link, e binary.ByteOrder, data []byte) { nulIndex := bytes.IndexByte(sectiondata, 0) if nulIndex < 0 { - // TODO(dfc) should this be ctxt.Diag ? - ctxt.Logf("corrupt .ARM.attributes (section name not NUL-terminated)\n") - return + return 0, fmt.Errorf("corrupt .ARM.attributes (section name not NUL-terminated)\n") } name := string(sectiondata[:nulIndex]) sectiondata = sectiondata[nulIndex+1:] @@ -425,61 +435,50 @@ func parseArmAttributes(ctxt *Link, e binary.ByteOrder, data []byte) { subsectiondata := sectiondata[sz+4 : subsectionsize] sectiondata = sectiondata[subsectionsize:] - if subsectiontag == TagFile { - attrList := elfAttributeList{data: subsectiondata} - for !attrList.done() { - attr := attrList.armAttr() - if attr.tag == TagABIVFPArgs && attr.ival == 1 { - ehdr.flags = 0x5000402 // has entry point, Version5 EABI, hard-float ABI - } - } - if attrList.err != nil { - // TODO(dfc) should this be ctxt.Diag ? - ctxt.Logf("could not parse .ARM.attributes\n") + if subsectiontag != TagFile { + continue + } + attrList := elfAttributeList{data: subsectiondata} + for !attrList.done() { + attr := attrList.armAttr() + if attr.tag == TagABIVFPArgs && attr.ival == 1 { + ehdrFlags = 0x5000402 // has entry point, Version5 EABI, hard-float ABI } } + if attrList.err != nil { + return 0, fmt.Errorf("could not parse .ARM.attributes\n") + } } } + return ehdrFlags, nil } -func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { - if ctxt.Debugvlog != 0 { - ctxt.Logf("%5.2f ldelf %s\n", Cputime(), pn) +// Load loads the ELF file pn from f. +// Symbols are written into syms, and a slice of the text symbols is returned. +// +// On ARM systems, Load will attempt to determine what ELF header flags to +// emit by scanning the attributes in the ELF file being loaded. The +// parameter initEhdrFlags contains the current header flags for the output +// object, and the returnd ehdrFlags contains what this Load function computes. +// TODO: find a better place for this logic. +func Load(arch *sys.Arch, syms *sym.Symbols, f *bio.Reader, pkg string, length int64, pn string, initEhdrFlags uint32) (textp []*sym.Symbol, ehdrFlags uint32, err error) { + errorf := func(str string, args ...interface{}) ([]*sym.Symbol, uint32, error) { + return nil, 0, fmt.Errorf("loadelf: %s: %v", pn, fmt.Sprintf(str, args...)) } - localSymVersion := ctxt.Syms.IncVersion() + localSymVersion := syms.IncVersion() base := f.Offset() - var add uint64 - var e binary.ByteOrder - var elfobj *ElfObj - var flag int - var hdr *ElfHdrBytes var hdrbuf [64]uint8 - var info uint64 - var is64 int - var j int - var n int - var name string - var p []byte - var r []Reloc - var rela int - var rp *Reloc - var rsect *ElfSect - var s *Symbol - var sect *ElfSect - var sym ElfSym - var symbols []*Symbol if _, err := io.ReadFull(f, hdrbuf[:]); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("malformed elf file: %v", err) } - hdr = new(ElfHdrBytes) + hdr := new(ElfHdrBytes) binary.Read(bytes.NewReader(hdrbuf[:]), binary.BigEndian, hdr) // only byte arrays; byte order doesn't matter if string(hdr.Ident[:4]) != "\x7FELF" { - Errorf(nil, "%s: malformed elf file", pn) - return + return errorf("malformed elf file, bad header") } + var e binary.ByteOrder switch hdr.Ident[5] { case ElfDataLsb: e = binary.LittleEndian @@ -488,12 +487,11 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { e = binary.BigEndian default: - Errorf(nil, "%s: malformed elf file", pn) - return + return errorf("malformed elf file, unknown header") } // read header - elfobj = new(ElfObj) + elfobj := new(ElfObj) elfobj.e = e elfobj.f = f @@ -501,7 +499,7 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { elfobj.length = length elfobj.name = pn - is64 = 0 + is64 := 0 if hdr.Ident[4] == ElfClass64 { is64 = 1 hdr := new(ElfHdrBytes64) @@ -537,66 +535,55 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { elfobj.is64 = is64 if v := uint32(hdr.Ident[6]); v != elfobj.version { - Errorf(nil, "%s: malformed elf version: got %d, want %d", pn, v, elfobj.version) - return + return errorf("malformed elf version: got %d, want %d", v, elfobj.version) } if e.Uint16(hdr.Type[:]) != ElfTypeRelocatable { - Errorf(nil, "%s: elf but not elf relocatable object", pn) - return + return errorf("elf but not elf relocatable object") } - switch SysArch.Family { + switch arch.Family { default: - Errorf(nil, "%s: elf %s unimplemented", pn, SysArch.Name) - return + return errorf("elf %s unimplemented", arch.Name) case sys.MIPS: if elfobj.machine != ElfMachMips || hdr.Ident[4] != ElfClass32 { - Errorf(nil, "%s: elf object but not mips", pn) - return + return errorf("elf object but not mips") } case sys.MIPS64: if elfobj.machine != ElfMachMips || hdr.Ident[4] != ElfClass64 { - Errorf(nil, "%s: elf object but not mips64", pn) - return + return errorf("elf object but not mips64") } case sys.ARM: if e != binary.LittleEndian || elfobj.machine != ElfMachArm || hdr.Ident[4] != ElfClass32 { - Errorf(nil, "%s: elf object but not arm", pn) - return + return errorf("elf object but not arm") } case sys.AMD64: if e != binary.LittleEndian || elfobj.machine != ElfMachAmd64 || hdr.Ident[4] != ElfClass64 { - Errorf(nil, "%s: elf object but not amd64", pn) - return + return errorf("elf object but not amd64") } case sys.ARM64: if e != binary.LittleEndian || elfobj.machine != ElfMachArm64 || hdr.Ident[4] != ElfClass64 { - Errorf(nil, "%s: elf object but not arm64", pn) - return + return errorf("elf object but not arm64") } case sys.I386: if e != binary.LittleEndian || elfobj.machine != ElfMach386 || hdr.Ident[4] != ElfClass32 { - Errorf(nil, "%s: elf object but not 386", pn) - return + return errorf("elf object but not 386") } case sys.PPC64: if elfobj.machine != ElfMachPower64 || hdr.Ident[4] != ElfClass64 { - Errorf(nil, "%s: elf object but not ppc64", pn) - return + return errorf("elf object but not ppc64") } case sys.S390X: if elfobj.machine != ElfMachS390 || hdr.Ident[4] != ElfClass64 { - Errorf(nil, "%s: elf object but not s390x", pn) - return + return errorf("elf object but not s390x") } } @@ -606,16 +593,14 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { elfobj.nsect = uint(elfobj.shnum) for i := 0; uint(i) < elfobj.nsect; i++ { if f.Seek(int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 { - Errorf(nil, "%s: malformed elf file", pn) - return + return errorf("malformed elf file: negative seek") } - sect = &elfobj.sect[i] + sect := &elfobj.sect[i] if is64 != 0 { var b ElfSectBytes64 if err := binary.Read(f, e, &b); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("malformed elf file: %v", err) } sect.nameoff = e.Uint32(b.Name[:]) @@ -632,8 +617,7 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { var b ElfSectBytes if err := binary.Read(f, e, &b); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("malformed elf file: %v", err) } sect.nameoff = e.Uint32(b.Name[:]) @@ -651,14 +635,12 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { // read section string table and translate names if elfobj.shstrndx >= uint32(elfobj.nsect) { - Errorf(nil, "%s: malformed elf file: shstrndx out of range %d >= %d", pn, elfobj.shstrndx, elfobj.nsect) - return + return errorf("malformed elf file: shstrndx out of range %d >= %d", elfobj.shstrndx, elfobj.nsect) } - sect = &elfobj.sect[elfobj.shstrndx] + sect := &elfobj.sect[elfobj.shstrndx] if err := elfmap(elfobj, sect); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("malformed elf file: %v", err) } for i := 0; uint(i) < elfobj.nsect; i++ { if elfobj.sect[i].nameoff != 0 { @@ -675,8 +657,7 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { } if elfobj.symtab.link <= 0 || elfobj.symtab.link >= uint32(elfobj.nsect) { - Errorf(nil, "%s: elf object has symbol table with invalid string table link", pn) - return + return errorf("elf object has symbol table with invalid string table link") } elfobj.symstr = &elfobj.sect[elfobj.symtab.link] @@ -687,12 +668,10 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { } if err := elfmap(elfobj, elfobj.symtab); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("malformed elf file: %v", err) } if err := elfmap(elfobj, elfobj.symstr); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("malformed elf file: %v", err) } // load text and data segments into memory. @@ -705,45 +684,46 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { sect = &elfobj.sect[i] if sect.type_ == SHT_ARM_ATTRIBUTES && sect.name == ".ARM.attributes" { if err := elfmap(elfobj, sect); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("%s: malformed elf file: %v", pn, err) + } + ehdrFlags, err = parseArmAttributes(e, initEhdrFlags, sect.base[:sect.size]) + if err != nil { + // TODO(dfc) should this return an error? + log.Printf("%s: %v", pn, err) } - parseArmAttributes(ctxt, e, sect.base[:sect.size]) } if (sect.type_ != ElfSectProgbits && sect.type_ != ElfSectNobits) || sect.flags&ElfSectFlagAlloc == 0 { continue } if sect.type_ != ElfSectNobits { if err := elfmap(elfobj, sect); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("%s: malformed elf file: %v", pn, err) } } - name = fmt.Sprintf("%s(%s)", pkg, sect.name) - s = ctxt.Syms.Lookup(name, localSymVersion) + name := fmt.Sprintf("%s(%s)", pkg, sect.name) + s := syms.Lookup(name, localSymVersion) switch int(sect.flags) & (ElfSectFlagAlloc | ElfSectFlagWrite | ElfSectFlagExec) { default: - Errorf(nil, "%s: unexpected flags for ELF section %s", pn, sect.name) - return + return errorf("%s: unexpected flags for ELF section %s", pn, sect.name) case ElfSectFlagAlloc: - s.Type = SRODATA + s.Type = sym.SRODATA case ElfSectFlagAlloc + ElfSectFlagWrite: if sect.type_ == ElfSectNobits { - s.Type = SNOPTRBSS + s.Type = sym.SNOPTRBSS } else { - s.Type = SNOPTRDATA + s.Type = sym.SNOPTRDATA } case ElfSectFlagAlloc + ElfSectFlagExec: - s.Type = STEXT + s.Type = sym.STEXT } if sect.name == ".got" || sect.name == ".toc" { - s.Type = SELFGOT + s.Type = sym.SELFGOT } if sect.type_ == ElfSectProgbits { s.P = sect.base @@ -757,118 +737,118 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { // enter sub-symbols into symbol table. // symbol 0 is the null symbol. - symbols = make([]*Symbol, elfobj.nsymtab) + symbols := make([]*sym.Symbol, elfobj.nsymtab) for i := 1; i < elfobj.nsymtab; i++ { - if err := readelfsym(ctxt, elfobj, i, &sym, 1, localSymVersion); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + var elfsym ElfSym + if err := readelfsym(arch, syms, elfobj, i, &elfsym, 1, localSymVersion); err != nil { + return errorf("%s: malformed elf file: %v", pn, err) } - symbols[i] = sym.sym - if sym.type_ != ElfSymTypeFunc && sym.type_ != ElfSymTypeObject && sym.type_ != ElfSymTypeNone && sym.type_ != ElfSymTypeCommon { + symbols[i] = elfsym.sym + if elfsym.type_ != ElfSymTypeFunc && elfsym.type_ != ElfSymTypeObject && elfsym.type_ != ElfSymTypeNone && elfsym.type_ != ElfSymTypeCommon { continue } - if sym.shndx == ElfSymShnCommon || sym.type_ == ElfSymTypeCommon { - s = sym.sym - if uint64(s.Size) < sym.size { - s.Size = int64(sym.size) + if elfsym.shndx == ElfSymShnCommon || elfsym.type_ == ElfSymTypeCommon { + s := elfsym.sym + if uint64(s.Size) < elfsym.size { + s.Size = int64(elfsym.size) } - if s.Type == 0 || s.Type == SXREF { - s.Type = SNOPTRBSS + if s.Type == 0 || s.Type == sym.SXREF { + s.Type = sym.SNOPTRBSS } continue } - if uint(sym.shndx) >= elfobj.nsect || sym.shndx == 0 { + if uint(elfsym.shndx) >= elfobj.nsect || elfsym.shndx == 0 { continue } // even when we pass needSym == 1 to readelfsym, it might still return nil to skip some unwanted symbols - if sym.sym == nil { + if elfsym.sym == nil { continue } - sect = &elfobj.sect[sym.shndx] + sect = &elfobj.sect[elfsym.shndx] if sect.sym == nil { - if strings.HasPrefix(sym.name, ".Linfo_string") { // clang does this + if strings.HasPrefix(elfsym.name, ".Linfo_string") { // clang does this continue } - if sym.name == "" && sym.type_ == 0 && sect.name == ".debug_str" { + if elfsym.name == "" && elfsym.type_ == 0 && sect.name == ".debug_str" { // This reportedly happens with clang 3.7 on ARM. // See issue 13139. continue } - if strings.HasPrefix(sym.name, ".LASF") { // gcc on s390x does this + if strings.HasPrefix(elfsym.name, ".LASF") { // gcc on s390x does this continue } - Errorf(sym.sym, "%s: sym#%d: ignoring symbol in section %d (type %d)", pn, i, sym.shndx, sym.type_) - continue + return errorf("%v: sym#%d: ignoring symbol in section %d (type %d)", elfsym.sym, i, elfsym.shndx, elfsym.type_) } - s = sym.sym + s := elfsym.sym if s.Outer != nil { if s.Attr.DuplicateOK() { continue } - Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sect.sym.Name) + return errorf("duplicate symbol reference: %s in both %s and %s", s.Name, s.Outer.Name, sect.sym.Name) } s.Sub = sect.sym.Sub sect.sym.Sub = s - s.Type = sect.sym.Type | s.Type&^SMASK | SSUB + s.Type = sect.sym.Type + s.Attr |= sym.AttrSubSymbol if !s.Attr.CgoExportDynamic() { s.Dynimplib = "" // satisfy dynimport } - s.Value = int64(sym.value) - s.Size = int64(sym.size) + s.Value = int64(elfsym.value) + s.Size = int64(elfsym.size) s.Outer = sect.sym - if sect.sym.Type == STEXT { + if sect.sym.Type == sym.STEXT { if s.Attr.External() && !s.Attr.DuplicateOK() { - Errorf(s, "%s: duplicate symbol definition", pn) + return errorf("%v: duplicate symbol definition", s) } - s.Attr |= AttrExternal + s.Attr |= sym.AttrExternal } if elfobj.machine == ElfMachPower64 { - flag = int(sym.other) >> 5 + flag := int(elfsym.other) >> 5 if 2 <= flag && flag <= 6 { s.Localentry = 1 << uint(flag-2) } else if flag == 7 { - Errorf(s, "%s: invalid sym.other 0x%x", pn, sym.other) + return errorf("%v: invalid sym.other 0x%x", s, elfsym.other) } } } // Sort outer lists by address, adding to textp. // This keeps textp in increasing address order. - for i := 0; uint(i) < elfobj.nsect; i++ { - s = elfobj.sect[i].sym + for i := uint(0); i < elfobj.nsect; i++ { + s := elfobj.sect[i].sym if s == nil { continue } if s.Sub != nil { - s.Sub = listsort(s.Sub) + s.Sub = sym.SortSub(s.Sub) } - if s.Type == STEXT { + if s.Type == sym.STEXT { if s.Attr.OnList() { - log.Fatalf("symbol %s listed multiple times", s.Name) + return errorf("symbol %s listed multiple times", s.Name) } - s.Attr |= AttrOnList - ctxt.Textp = append(ctxt.Textp, s) + s.Attr |= sym.AttrOnList + textp = append(textp, s) for s = s.Sub; s != nil; s = s.Sub { if s.Attr.OnList() { - log.Fatalf("symbol %s listed multiple times", s.Name) + return errorf("symbol %s listed multiple times", s.Name) } - s.Attr |= AttrOnList - ctxt.Textp = append(ctxt.Textp, s) + s.Attr |= sym.AttrOnList + textp = append(textp, s) } } } // load relocations - for i := 0; uint(i) < elfobj.nsect; i++ { - rsect = &elfobj.sect[i] + for i := uint(0); i < elfobj.nsect; i++ { + rsect := &elfobj.sect[i] if rsect.type_ != ElfSectRela && rsect.type_ != ElfSectRel { continue } @@ -877,19 +857,19 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { } sect = &elfobj.sect[rsect.info] if err := elfmap(elfobj, rsect); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + return errorf("malformed elf file: %v", err) } - rela = 0 + rela := 0 if rsect.type_ == ElfSectRela { rela = 1 } - n = int(rsect.size / uint64(4+4*is64) / uint64(2+rela)) - r = make([]Reloc, n) - p = rsect.base - for j = 0; j < n; j++ { - add = 0 - rp = &r[j] + n := int(rsect.size / uint64(4+4*is64) / uint64(2+rela)) + r := make([]sym.Reloc, n) + p := rsect.base + for j := 0; j < n; j++ { + var add uint64 + rp := &r[j] + var info uint64 if is64 != 0 { // 64-bit rel/rela rp.Off = int32(e.Uint64(p)) @@ -924,21 +904,23 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { if info>>32 == 0 { // absolute relocation, don't bother reading the null symbol rp.Sym = nil } else { - if err := readelfsym(ctxt, elfobj, int(info>>32), &sym, 0, 0); err != nil { - Errorf(nil, "%s: malformed elf file: %v", pn, err) - return + var elfsym ElfSym + if err := readelfsym(arch, syms, elfobj, int(info>>32), &elfsym, 0, 0); err != nil { + return errorf("malformed elf file: %v", err) } - sym.sym = symbols[info>>32] - if sym.sym == nil { - Errorf(nil, "%s: malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", pn, sect.sym.Name, j, int(info>>32), sym.name, sym.shndx, sym.type_) - return + elfsym.sym = symbols[info>>32] + if elfsym.sym == nil { + return errorf("malformed elf file: %s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", sect.sym.Name, j, int(info>>32), elfsym.name, elfsym.shndx, elfsym.type_) } - rp.Sym = sym.sym + rp.Sym = elfsym.sym } rp.Type = 256 + objabi.RelocType(info) - rp.Siz = relSize(ctxt, pn, uint32(info)) + rp.Siz, err = relSize(arch, pn, uint32(info)) + if err != nil { + return nil, 0, err + } if rela != 0 { rp.Add = int64(add) } else { @@ -948,7 +930,7 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { } else if rp.Siz == 8 { rp.Add = int64(e.Uint64(sect.base[rp.Off:])) } else { - Errorf(nil, "invalid rela size %d", rp.Siz) + return errorf("invalid rela size %d", rp.Siz) } } @@ -961,13 +943,15 @@ func ldelf(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { } //print("rel %s %d %d %s %#llx\n", sect->sym->name, rp->type, rp->siz, rp->sym->name, rp->add); - sort.Sort(rbyoff(r[:n])) + sort.Sort(sym.RelocByOff(r[:n])) // just in case - s = sect.sym + s := sect.sym s.R = r s.R = s.R[:n] } + + return textp, ehdrFlags, nil } func section(elfobj *ElfObj, name string) *ElfSect { @@ -1000,57 +984,57 @@ func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) { return nil } -func readelfsym(ctxt *Link, elfobj *ElfObj, i int, sym *ElfSym, needSym int, localSymVersion int) (err error) { +func readelfsym(arch *sys.Arch, syms *sym.Symbols, elfobj *ElfObj, i int, elfsym *ElfSym, needSym int, localSymVersion int) (err error) { if i >= elfobj.nsymtab || i < 0 { err = fmt.Errorf("invalid elf symbol index") return err } if i == 0 { - Errorf(nil, "readym: read null symbol!") + return fmt.Errorf("readym: read null symbol!") } if elfobj.is64 != 0 { b := new(ElfSymBytes64) binary.Read(bytes.NewReader(elfobj.symtab.base[i*ELF64SYMSIZE:(i+1)*ELF64SYMSIZE]), elfobj.e, b) - sym.name = cstring(elfobj.symstr.base[elfobj.e.Uint32(b.Name[:]):]) - sym.value = elfobj.e.Uint64(b.Value[:]) - sym.size = elfobj.e.Uint64(b.Size[:]) - sym.shndx = elfobj.e.Uint16(b.Shndx[:]) - sym.bind = b.Info >> 4 - sym.type_ = b.Info & 0xf - sym.other = b.Other + elfsym.name = cstring(elfobj.symstr.base[elfobj.e.Uint32(b.Name[:]):]) + elfsym.value = elfobj.e.Uint64(b.Value[:]) + elfsym.size = elfobj.e.Uint64(b.Size[:]) + elfsym.shndx = elfobj.e.Uint16(b.Shndx[:]) + elfsym.bind = b.Info >> 4 + elfsym.type_ = b.Info & 0xf + elfsym.other = b.Other } else { b := new(ElfSymBytes) binary.Read(bytes.NewReader(elfobj.symtab.base[i*ELF32SYMSIZE:(i+1)*ELF32SYMSIZE]), elfobj.e, b) - sym.name = cstring(elfobj.symstr.base[elfobj.e.Uint32(b.Name[:]):]) - sym.value = uint64(elfobj.e.Uint32(b.Value[:])) - sym.size = uint64(elfobj.e.Uint32(b.Size[:])) - sym.shndx = elfobj.e.Uint16(b.Shndx[:]) - sym.bind = b.Info >> 4 - sym.type_ = b.Info & 0xf - sym.other = b.Other + elfsym.name = cstring(elfobj.symstr.base[elfobj.e.Uint32(b.Name[:]):]) + elfsym.value = uint64(elfobj.e.Uint32(b.Value[:])) + elfsym.size = uint64(elfobj.e.Uint32(b.Size[:])) + elfsym.shndx = elfobj.e.Uint16(b.Shndx[:]) + elfsym.bind = b.Info >> 4 + elfsym.type_ = b.Info & 0xf + elfsym.other = b.Other } - var s *Symbol - if sym.name == "_GLOBAL_OFFSET_TABLE_" { - sym.name = ".got" + var s *sym.Symbol + if elfsym.name == "_GLOBAL_OFFSET_TABLE_" { + elfsym.name = ".got" } - if sym.name == ".TOC." { + if elfsym.name == ".TOC." { // Magic symbol on ppc64. Will be set to this object // file's .got+0x8000. - sym.bind = ElfSymBindLocal + elfsym.bind = ElfSymBindLocal } - switch sym.type_ { + switch elfsym.type_ { case ElfSymTypeSection: - s = elfobj.sect[sym.shndx].sym + s = elfobj.sect[elfsym.shndx].sym case ElfSymTypeObject, ElfSymTypeFunc, ElfSymTypeNone, ElfSymTypeCommon: - switch sym.bind { + switch elfsym.bind { case ElfSymBindGlobal: if needSym != 0 { - s = ctxt.Syms.Lookup(sym.name, 0) + s = syms.Lookup(elfsym.name, 0) // for global scoped hidden symbols we should insert it into // symbol hash table, but mark them as hidden. @@ -1059,25 +1043,24 @@ func readelfsym(ctxt *Link, elfobj *ElfObj, i int, sym *ElfSym, needSym int, loc // TODO(minux): correctly handle __i686.get_pc_thunk.bx without // set dupok generally. See http://codereview.appspot.com/5823055/ // comment #5 for details. - if s != nil && sym.other == 2 { - s.Type |= SHIDDEN - s.Attr |= AttrDuplicateOK + if s != nil && elfsym.other == 2 { + s.Attr |= sym.AttrDuplicateOK | sym.AttrVisibilityHidden } } case ElfSymBindLocal: - if SysArch.Family == sys.ARM && (strings.HasPrefix(sym.name, "$a") || strings.HasPrefix(sym.name, "$d")) { + if arch.Family == sys.ARM && (strings.HasPrefix(elfsym.name, "$a") || strings.HasPrefix(elfsym.name, "$d")) { // binutils for arm generate these mapping // symbols, ignore these break } - if sym.name == ".TOC." { + if elfsym.name == ".TOC." { // We need to be able to look this up, // so put it in the hash table. if needSym != 0 { - s = ctxt.Syms.Lookup(sym.name, localSymVersion) - s.Type |= SHIDDEN + s = syms.Lookup(elfsym.name, localSymVersion) + s.Attr |= sym.AttrVisibilityHidden } break @@ -1087,56 +1070,36 @@ func readelfsym(ctxt *Link, elfobj *ElfObj, i int, sym *ElfSym, needSym int, loc // local names and hidden global names are unique // and should only be referenced by their index, not name, so we // don't bother to add them into the hash table - s = ctxt.Syms.newsym(sym.name, localSymVersion) + s = syms.Newsym(elfsym.name, localSymVersion) - s.Type |= SHIDDEN + s.Attr |= sym.AttrVisibilityHidden } case ElfSymBindWeak: if needSym != 0 { - s = ctxt.Syms.Lookup(sym.name, 0) - if sym.other == 2 { - s.Type |= SHIDDEN + s = syms.Lookup(elfsym.name, 0) + if elfsym.other == 2 { + s.Attr |= sym.AttrVisibilityHidden } } default: - err = fmt.Errorf("%s: invalid symbol binding %d", sym.name, sym.bind) + err = fmt.Errorf("%s: invalid symbol binding %d", elfsym.name, elfsym.bind) return err } } - if s != nil && s.Type == 0 && sym.type_ != ElfSymTypeSection { - s.Type = SXREF + // TODO(mwhudson): the test of VisibilityHidden here probably doesn't make + // sense and should be removed when someone has thought about it properly. + if s != nil && s.Type == 0 && !s.Attr.VisibilityHidden() && elfsym.type_ != ElfSymTypeSection { + s.Type = sym.SXREF } - sym.sym = s + elfsym.sym = s return nil } -type rbyoff []Reloc - -func (x rbyoff) Len() int { - return len(x) -} - -func (x rbyoff) Swap(i, j int) { - x[i], x[j] = x[j], x[i] -} - -func (x rbyoff) Less(i, j int) bool { - a := &x[i] - b := &x[j] - if a.Off < b.Off { - return true - } - if a.Off > b.Off { - return false - } - return false -} - -func relSize(ctxt *Link, pn string, elftype uint32) uint8 { +func relSize(arch *sys.Arch, pn string, elftype uint32) (uint8, error) { // TODO(mdempsky): Replace this with a struct-valued switch statement // once golang.org/issue/15164 is fixed or found to not impair cmd/link // performance. @@ -1149,77 +1112,84 @@ func relSize(ctxt *Link, pn string, elftype uint32) uint8 { S390X = uint32(sys.S390X) ) - switch uint32(SysArch.Family) | elftype<<24 { + switch uint32(arch.Family) | elftype<<24 { default: - Errorf(nil, "%s: unknown relocation type %d; compiled without -fpic?", pn, elftype) - fallthrough + return 0, fmt.Errorf("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype) - case S390X | R_390_8<<24: - return 1 + case S390X | uint32(elf.R_390_8)<<24: + return 1, nil - case PPC64 | R_PPC64_TOC16<<24, - PPC64 | R_PPC64_TOC16_LO<<24, - PPC64 | R_PPC64_TOC16_HI<<24, - PPC64 | R_PPC64_TOC16_HA<<24, - PPC64 | R_PPC64_TOC16_DS<<24, - PPC64 | R_PPC64_TOC16_LO_DS<<24, - PPC64 | R_PPC64_REL16_LO<<24, - PPC64 | R_PPC64_REL16_HI<<24, - PPC64 | R_PPC64_REL16_HA<<24, - S390X | R_390_16<<24, - S390X | R_390_GOT16<<24, - S390X | R_390_PC16<<24, - S390X | R_390_PC16DBL<<24, - S390X | R_390_PLT16DBL<<24: - return 2 + case PPC64 | uint32(elf.R_PPC64_TOC16)<<24, + PPC64 | uint32(elf.R_PPC64_TOC16_LO)<<24, + PPC64 | uint32(elf.R_PPC64_TOC16_HI)<<24, + PPC64 | uint32(elf.R_PPC64_TOC16_HA)<<24, + PPC64 | uint32(elf.R_PPC64_TOC16_DS)<<24, + PPC64 | uint32(elf.R_PPC64_TOC16_LO_DS)<<24, + PPC64 | uint32(elf.R_PPC64_REL16_LO)<<24, + PPC64 | uint32(elf.R_PPC64_REL16_HI)<<24, + PPC64 | uint32(elf.R_PPC64_REL16_HA)<<24, + S390X | uint32(elf.R_390_16)<<24, + S390X | uint32(elf.R_390_GOT16)<<24, + S390X | uint32(elf.R_390_PC16)<<24, + S390X | uint32(elf.R_390_PC16DBL)<<24, + S390X | uint32(elf.R_390_PLT16DBL)<<24: + return 2, nil - case ARM | R_ARM_ABS32<<24, - ARM | R_ARM_GOT32<<24, - ARM | R_ARM_PLT32<<24, - ARM | R_ARM_GOTOFF<<24, - ARM | R_ARM_GOTPC<<24, - ARM | R_ARM_THM_PC22<<24, - ARM | R_ARM_REL32<<24, - ARM | R_ARM_CALL<<24, - ARM | R_ARM_V4BX<<24, - ARM | R_ARM_GOT_PREL<<24, - ARM | R_ARM_PC24<<24, - ARM | R_ARM_JUMP24<<24, - AMD64 | R_X86_64_PC32<<24, - AMD64 | R_X86_64_PLT32<<24, - AMD64 | R_X86_64_GOTPCREL<<24, - AMD64 | R_X86_64_GOTPCRELX<<24, - AMD64 | R_X86_64_REX_GOTPCRELX<<24, - I386 | R_386_32<<24, - I386 | R_386_PC32<<24, - I386 | R_386_GOT32<<24, - I386 | R_386_PLT32<<24, - I386 | R_386_GOTOFF<<24, - I386 | R_386_GOTPC<<24, - I386 | R_386_GOT32X<<24, - PPC64 | R_PPC64_REL24<<24, - PPC64 | R_PPC_REL32<<24, - S390X | R_390_32<<24, - S390X | R_390_PC32<<24, - S390X | R_390_GOT32<<24, - S390X | R_390_PLT32<<24, - S390X | R_390_PC32DBL<<24, - S390X | R_390_PLT32DBL<<24, - S390X | R_390_GOTPCDBL<<24, - S390X | R_390_GOTENT<<24: - return 4 + case ARM | uint32(elf.R_ARM_ABS32)<<24, + ARM | uint32(elf.R_ARM_GOT32)<<24, + ARM | uint32(elf.R_ARM_PLT32)<<24, + ARM | uint32(elf.R_ARM_GOTOFF)<<24, + ARM | uint32(elf.R_ARM_GOTPC)<<24, + ARM | uint32(elf.R_ARM_THM_PC22)<<24, + ARM | uint32(elf.R_ARM_REL32)<<24, + ARM | uint32(elf.R_ARM_CALL)<<24, + ARM | uint32(elf.R_ARM_V4BX)<<24, + ARM | uint32(elf.R_ARM_GOT_PREL)<<24, + ARM | uint32(elf.R_ARM_PC24)<<24, + ARM | uint32(elf.R_ARM_JUMP24)<<24, + AMD64 | uint32(elf.R_X86_64_PC32)<<24, + AMD64 | uint32(elf.R_X86_64_PLT32)<<24, + AMD64 | uint32(elf.R_X86_64_GOTPCREL)<<24, + AMD64 | uint32(elf.R_X86_64_GOTPCRELX)<<24, + AMD64 | uint32(elf.R_X86_64_REX_GOTPCRELX)<<24, + I386 | uint32(elf.R_386_32)<<24, + I386 | uint32(elf.R_386_PC32)<<24, + I386 | uint32(elf.R_386_GOT32)<<24, + I386 | uint32(elf.R_386_PLT32)<<24, + I386 | uint32(elf.R_386_GOTOFF)<<24, + I386 | uint32(elf.R_386_GOTPC)<<24, + I386 | uint32(elf.R_386_GOT32X)<<24, + PPC64 | uint32(elf.R_PPC64_REL24)<<24, + PPC64 | uint32(elf.R_PPC_REL32)<<24, + S390X | uint32(elf.R_390_32)<<24, + S390X | uint32(elf.R_390_PC32)<<24, + S390X | uint32(elf.R_390_GOT32)<<24, + S390X | uint32(elf.R_390_PLT32)<<24, + S390X | uint32(elf.R_390_PC32DBL)<<24, + S390X | uint32(elf.R_390_PLT32DBL)<<24, + S390X | uint32(elf.R_390_GOTPCDBL)<<24, + S390X | uint32(elf.R_390_GOTENT)<<24: + return 4, nil - case AMD64 | R_X86_64_64<<24, - AMD64 | R_X86_64_PC64<<24, - PPC64 | R_PPC64_ADDR64<<24, - S390X | R_390_GLOB_DAT<<24, - S390X | R_390_RELATIVE<<24, - S390X | R_390_GOTOFF<<24, - S390X | R_390_GOTPC<<24, - S390X | R_390_64<<24, - S390X | R_390_PC64<<24, - S390X | R_390_GOT64<<24, - S390X | R_390_PLT64<<24: - return 8 + case AMD64 | uint32(elf.R_X86_64_64)<<24, + AMD64 | uint32(elf.R_X86_64_PC64)<<24, + PPC64 | uint32(elf.R_PPC64_ADDR64)<<24, + S390X | uint32(elf.R_390_GLOB_DAT)<<24, + S390X | uint32(elf.R_390_RELATIVE)<<24, + S390X | uint32(elf.R_390_GOTOFF)<<24, + S390X | uint32(elf.R_390_GOTPC)<<24, + S390X | uint32(elf.R_390_64)<<24, + S390X | uint32(elf.R_390_PC64)<<24, + S390X | uint32(elf.R_390_GOT64)<<24, + S390X | uint32(elf.R_390_PLT64)<<24: + return 8, nil } } + +func cstring(x []byte) string { + i := bytes.IndexByte(x, '\x00') + if i >= 0 { + x = x[:i] + } + return string(x) +} diff --git a/src/cmd/link/internal/ld/ldmacho.go b/src/cmd/link/internal/loadmacho/ldmacho.go similarity index 73% rename from src/cmd/link/internal/ld/ldmacho.go rename to src/cmd/link/internal/loadmacho/ldmacho.go index 7bfa67d3ccd..e6b0f70e382 100644 --- a/src/cmd/link/internal/ld/ldmacho.go +++ b/src/cmd/link/internal/loadmacho/ldmacho.go @@ -1,13 +1,19 @@ -package ld +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package loadmacho implements a Mach-O file reader. +package loadmacho import ( + "bytes" "cmd/internal/bio" "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "encoding/binary" "fmt" "io" - "log" "sort" ) @@ -43,6 +49,13 @@ const ( N_STAB = 0xe0 ) +// TODO(crawshaw): de-duplicate these symbols with cmd/internal/ld +const ( + MACHO_X86_64_RELOC_UNSIGNED = 0 + MACHO_X86_64_RELOC_SIGNED = 1 + MACHO_FAKE_GOTPCREL = 100 +) + type ldMachoObj struct { f *bio.Reader base int64 // off in f where Mach-O begins @@ -92,7 +105,7 @@ type ldMachoSect struct { flags uint32 res1 uint32 res2 uint32 - sym *Symbol + sym *sym.Symbol rel []ldMachoRel } @@ -123,7 +136,7 @@ type ldMachoSym struct { desc uint16 kind int8 value uint64 - sym *Symbol + sym *sym.Symbol } type ldMachoDysymtab struct { @@ -306,12 +319,9 @@ func macholoadrel(m *ldMachoObj, sect *ldMachoSect) int { if _, err := io.ReadFull(m.f, buf); err != nil { return -1 } - var p []byte - var r *ldMachoRel - var v uint32 - for i := 0; uint32(i) < sect.nreloc; i++ { - r = &rel[i] - p = buf[i*8:] + for i := uint32(0); i < sect.nreloc; i++ { + r := &rel[i] + p := buf[i*8:] r.addr = m.e.Uint32(p) // TODO(rsc): Wrong interpretation for big-endian bitfields? @@ -319,7 +329,7 @@ func macholoadrel(m *ldMachoObj, sect *ldMachoSect) int { // scatterbrained relocation r.scattered = 1 - v = r.addr >> 24 + v := r.addr >> 24 r.addr &= 0xFFFFFF r.type_ = uint8(v & 0xF) v >>= 4 @@ -328,7 +338,7 @@ func macholoadrel(m *ldMachoObj, sect *ldMachoSect) int { r.pcrel = uint8(v & 1) r.value = m.e.Uint32(p[4:]) } else { - v = m.e.Uint32(p[4:]) + v := m.e.Uint32(p[4:]) r.symnum = v & 0xFFFFFF v >>= 24 r.pcrel = uint8(v & 1) @@ -390,11 +400,9 @@ func macholoadsym(m *ldMachoObj, symtab *ldMachoSymtab) int { } sym := make([]ldMachoSym, symtab.nsym) p := symbuf - var s *ldMachoSym - var v uint32 - for i := 0; uint32(i) < symtab.nsym; i++ { - s = &sym[i] - v = m.e.Uint32(p) + for i := uint32(0); i < symtab.nsym; i++ { + s := &sym[i] + v := m.e.Uint32(p) if v >= symtab.strsize { return -1 } @@ -415,119 +423,93 @@ func macholoadsym(m *ldMachoObj, symtab *ldMachoSymtab) int { return 0 } -func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { - var err error - var j int - var is64 bool - var secaddr uint64 - var hdr [7 * 4]uint8 - var cmdp []byte - var dat []byte - var ncmd uint32 - var cmdsz uint32 - var ty uint32 - var sz uint32 - var off uint32 - var m *ldMachoObj - var e binary.ByteOrder - var sect *ldMachoSect - var rel *ldMachoRel - var rpi int - var s *Symbol - var s1 *Symbol - var outer *Symbol - var c *ldMachoCmd - var symtab *ldMachoSymtab - var dsymtab *ldMachoDysymtab - var sym *ldMachoSym - var r []Reloc - var rp *Reloc - var name string - - localSymVersion := ctxt.Syms.IncVersion() - base := f.Offset() - if _, err := io.ReadFull(f, hdr[:]); err != nil { - goto bad +// Load loads the Mach-O file pn from f. +// Symbols are written into syms, and a slice of the text symbols is returned. +func Load(arch *sys.Arch, syms *sym.Symbols, f *bio.Reader, pkg string, length int64, pn string) (textp []*sym.Symbol, err error) { + errorf := func(str string, args ...interface{}) ([]*sym.Symbol, error) { + return nil, fmt.Errorf("loadmacho: %v: %v", pn, fmt.Sprintf(str, args...)) } + localSymVersion := syms.IncVersion() + base := f.Offset() + + var hdr [7 * 4]uint8 + if _, err := io.ReadFull(f, hdr[:]); err != nil { + return errorf("reading hdr: %v", err) + } + + var e binary.ByteOrder if binary.BigEndian.Uint32(hdr[:])&^1 == 0xFEEDFACE { e = binary.BigEndian } else if binary.LittleEndian.Uint32(hdr[:])&^1 == 0xFEEDFACE { e = binary.LittleEndian } else { - err = fmt.Errorf("bad magic - not mach-o file") - goto bad + return errorf("bad magic - not mach-o file") } - is64 = e.Uint32(hdr[:]) == 0xFEEDFACF - ncmd = e.Uint32(hdr[4*4:]) - cmdsz = e.Uint32(hdr[5*4:]) + is64 := e.Uint32(hdr[:]) == 0xFEEDFACF + ncmd := e.Uint32(hdr[4*4:]) + cmdsz := e.Uint32(hdr[5*4:]) if ncmd > 0x10000 || cmdsz >= 0x01000000 { - err = fmt.Errorf("implausible mach-o header ncmd=%d cmdsz=%d", ncmd, cmdsz) - goto bad + return errorf("implausible mach-o header ncmd=%d cmdsz=%d", ncmd, cmdsz) } if is64 { f.Seek(4, 1) // skip reserved word in header } - m = new(ldMachoObj) + m := &ldMachoObj{ + f: f, + e: e, + cputype: uint(e.Uint32(hdr[1*4:])), + subcputype: uint(e.Uint32(hdr[2*4:])), + filetype: e.Uint32(hdr[3*4:]), + ncmd: uint(ncmd), + flags: e.Uint32(hdr[6*4:]), + is64: is64, + base: base, + length: length, + name: pn, + } - m.f = f - m.e = e - m.cputype = uint(e.Uint32(hdr[1*4:])) - m.subcputype = uint(e.Uint32(hdr[2*4:])) - m.filetype = e.Uint32(hdr[3*4:]) - m.ncmd = uint(ncmd) - m.flags = e.Uint32(hdr[6*4:]) - m.is64 = is64 - m.base = base - m.length = length - m.name = pn - - switch SysArch.Family { + switch arch.Family { default: - Errorf(nil, "%s: mach-o %s unimplemented", pn, SysArch.Name) - return + return errorf("mach-o %s unimplemented", arch.Name) case sys.AMD64: if e != binary.LittleEndian || m.cputype != LdMachoCpuAmd64 { - Errorf(nil, "%s: mach-o object but not amd64", pn) - return + return errorf("mach-o object but not amd64") } case sys.I386: if e != binary.LittleEndian || m.cputype != LdMachoCpu386 { - Errorf(nil, "%s: mach-o object but not 386", pn) - return + return errorf("mach-o object but not 386") } } m.cmd = make([]ldMachoCmd, ncmd) - off = uint32(len(hdr)) - cmdp = make([]byte, cmdsz) - if _, err2 := io.ReadFull(f, cmdp); err2 != nil { - err = fmt.Errorf("reading cmds: %v", err) - goto bad + cmdp := make([]byte, cmdsz) + if _, err := io.ReadFull(f, cmdp); err != nil { + return errorf("reading cmds: %v", err) } // read and parse load commands - c = nil + var c *ldMachoCmd - symtab = nil - dsymtab = nil + var symtab *ldMachoSymtab + var dsymtab *ldMachoDysymtab - for i := 0; uint32(i) < ncmd; i++ { - ty = e.Uint32(cmdp) - sz = e.Uint32(cmdp[4:]) + off := uint32(len(hdr)) + for i := uint32(0); i < ncmd; i++ { + ty := e.Uint32(cmdp) + sz := e.Uint32(cmdp[4:]) m.cmd[i].off = off unpackcmd(cmdp, m, &m.cmd[i], uint(ty), uint(sz)) cmdp = cmdp[sz:] off += sz if ty == LdMachoCmdSymtab { if symtab != nil { - err = fmt.Errorf("multiple symbol tables") - goto bad + return errorf("multiple symbol tables") } symtab = &m.cmd[i].sym @@ -541,8 +523,7 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { if (is64 && ty == LdMachoCmdSegment64) || (!is64 && ty == LdMachoCmdSegment) { if c != nil { - err = fmt.Errorf("multiple load commands") - goto bad + return errorf("multiple load commands") } c = &m.cmd[i] @@ -554,8 +535,7 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { // the memory anyway for the symbol images, so we might // as well use one large chunk. if c == nil { - err = fmt.Errorf("no load command") - goto bad + return errorf("no load command") } if symtab == nil { @@ -564,33 +544,29 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { } if int64(c.seg.fileoff+c.seg.filesz) >= length { - err = fmt.Errorf("load segment out of range") - goto bad + return errorf("load segment out of range") } - dat = make([]byte, c.seg.filesz) if f.Seek(m.base+int64(c.seg.fileoff), 0) < 0 { - err = fmt.Errorf("cannot load object data: %v", err) - goto bad + return errorf("cannot load object data: seek failed") } - if _, err2 := io.ReadFull(f, dat); err2 != nil { - err = fmt.Errorf("cannot load object data: %v", err) - goto bad + dat := make([]byte, c.seg.filesz) + if _, err := io.ReadFull(f, dat); err != nil { + return errorf("cannot load object data: %v", err) } - for i := 0; uint32(i) < c.seg.nsect; i++ { - sect = &c.seg.sect[i] + for i := uint32(0); i < c.seg.nsect; i++ { + sect := &c.seg.sect[i] if sect.segname != "__TEXT" && sect.segname != "__DATA" { continue } if sect.name == "__eh_frame" { continue } - name = fmt.Sprintf("%s(%s/%s)", pkg, sect.segname, sect.name) - s = ctxt.Syms.Lookup(name, localSymVersion) + name := fmt.Sprintf("%s(%s/%s)", pkg, sect.segname, sect.name) + s := syms.Lookup(name, localSymVersion) if s.Type != 0 { - err = fmt.Errorf("duplicate %s/%s", sect.segname, sect.name) - goto bad + return errorf("duplicate %s/%s", sect.segname, sect.name) } if sect.flags&0xff == 1 { // S_ZEROFILL @@ -602,16 +578,16 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { if sect.segname == "__TEXT" { if sect.name == "__text" { - s.Type = STEXT + s.Type = sym.STEXT } else { - s.Type = SRODATA + s.Type = sym.SRODATA } } else { if sect.name == "__bss" { - s.Type = SNOPTRBSS + s.Type = sym.SNOPTRBSS s.P = s.P[:0] } else { - s.Type = SNOPTRDATA + s.Type = sym.SNOPTRDATA } } @@ -620,80 +596,79 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { // enter sub-symbols into symbol table. // have to guess sizes from next symbol. - for i := 0; uint32(i) < symtab.nsym; i++ { - sym = &symtab.sym[i] - if sym.type_&N_STAB != 0 { + for i := uint32(0); i < symtab.nsym; i++ { + machsym := &symtab.sym[i] + if machsym.type_&N_STAB != 0 { continue } // TODO: check sym->type against outer->type. - name = sym.name + name := machsym.name if name[0] == '_' && name[1] != '\x00' { name = name[1:] } v := 0 - if sym.type_&N_EXT == 0 { + if machsym.type_&N_EXT == 0 { v = localSymVersion } - s = ctxt.Syms.Lookup(name, v) - if sym.type_&N_EXT == 0 { - s.Attr |= AttrDuplicateOK + s := syms.Lookup(name, v) + if machsym.type_&N_EXT == 0 { + s.Attr |= sym.AttrDuplicateOK } - sym.sym = s - if sym.sectnum == 0 { // undefined + machsym.sym = s + if machsym.sectnum == 0 { // undefined continue } - if uint32(sym.sectnum) > c.seg.nsect { - err = fmt.Errorf("reference to invalid section %d", sym.sectnum) - goto bad + if uint32(machsym.sectnum) > c.seg.nsect { + return errorf("reference to invalid section %d", machsym.sectnum) } - sect = &c.seg.sect[sym.sectnum-1] - outer = sect.sym + sect := &c.seg.sect[machsym.sectnum-1] + outer := sect.sym if outer == nil { - err = fmt.Errorf("reference to invalid section %s/%s", sect.segname, sect.name) - continue + continue // ignore reference to invalid section } if s.Outer != nil { if s.Attr.DuplicateOK() { continue } - Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sect.sym.Name) + return errorf("duplicate symbol reference: %s in both %s and %s", s.Name, s.Outer.Name, sect.sym.Name) } - s.Type = outer.Type | SSUB + s.Type = outer.Type + s.Attr |= sym.AttrSubSymbol s.Sub = outer.Sub outer.Sub = s s.Outer = outer - s.Value = int64(sym.value - sect.addr) + s.Value = int64(machsym.value - sect.addr) if !s.Attr.CgoExportDynamic() { s.Dynimplib = "" // satisfy dynimport } - if outer.Type == STEXT { + if outer.Type == sym.STEXT { if s.Attr.External() && !s.Attr.DuplicateOK() { - Errorf(s, "%s: duplicate symbol definition", pn) + return errorf("%v: duplicate symbol definition", s) } - s.Attr |= AttrExternal + s.Attr |= sym.AttrExternal } - sym.sym = s + machsym.sym = s } // Sort outer lists by address, adding to textp. // This keeps textp in increasing address order. for i := 0; uint32(i) < c.seg.nsect; i++ { - sect = &c.seg.sect[i] - s = sect.sym + sect := &c.seg.sect[i] + s := sect.sym if s == nil { continue } if s.Sub != nil { - s.Sub = listsort(s.Sub) + s.Sub = sym.SortSub(s.Sub) // assign sizes, now that we know symbols in sorted order. - for s1 = s.Sub; s1 != nil; s1 = s1.Sub { + for s1 := s.Sub; s1 != nil; s1 = s1.Sub { if s1.Sub != nil { s1.Size = s1.Sub.Value - s1.Value } else { @@ -702,26 +677,26 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { } } - if s.Type == STEXT { + if s.Type == sym.STEXT { if s.Attr.OnList() { - log.Fatalf("symbol %s listed multiple times", s.Name) + return errorf("symbol %s listed multiple times", s.Name) } - s.Attr |= AttrOnList - ctxt.Textp = append(ctxt.Textp, s) - for s1 = s.Sub; s1 != nil; s1 = s1.Sub { + s.Attr |= sym.AttrOnList + textp = append(textp, s) + for s1 := s.Sub; s1 != nil; s1 = s1.Sub { if s1.Attr.OnList() { - log.Fatalf("symbol %s listed multiple times", s1.Name) + return errorf("symbol %s listed multiple times", s1.Name) } - s1.Attr |= AttrOnList - ctxt.Textp = append(ctxt.Textp, s1) + s1.Attr |= sym.AttrOnList + textp = append(textp, s1) } } } // load relocations for i := 0; uint32(i) < c.seg.nsect; i++ { - sect = &c.seg.sect[i] - s = sect.sym + sect := &c.seg.sect[i] + s := sect.sym if s == nil { continue } @@ -729,17 +704,16 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { if sect.rel == nil { continue } - r = make([]Reloc, sect.nreloc) - rpi = 0 + r := make([]sym.Reloc, sect.nreloc) + rpi := 0 Reloc: - for j = 0; uint32(j) < sect.nreloc; j++ { - rp = &r[rpi] - rel = §.rel[j] + for j := uint32(0); j < sect.nreloc; j++ { + rp := &r[rpi] + rel := §.rel[j] if rel.scattered != 0 { - if SysArch.Family != sys.I386 { + if arch.Family != sys.I386 { // mach-o only uses scattered relocation on 32-bit platforms - Errorf(s, "unexpected scattered relocation") - continue + return errorf("%v: unexpected scattered relocation", s) } // on 386, rewrite scattered 4/1 relocation and some @@ -747,14 +721,12 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { // reference that it is. // assume that the second in the pair is in this section // and use that as the pc-relative base. - if uint32(j+1) >= sect.nreloc { - err = fmt.Errorf("unsupported scattered relocation %d", int(rel.type_)) - goto bad + if j+1 >= sect.nreloc { + return errorf("unsupported scattered relocation %d", int(rel.type_)) } if sect.rel[j+1].scattered == 0 || sect.rel[j+1].type_ != 1 || (rel.type_ != 4 && rel.type_ != 2) || uint64(sect.rel[j+1].value) < sect.addr || uint64(sect.rel[j+1].value) >= sect.addr+sect.size { - err = fmt.Errorf("unsupported scattered relocation %d/%d", int(rel.type_), int(sect.rel[j+1].type_)) - goto bad + return errorf("unsupported scattered relocation %d/%d", int(rel.type_), int(sect.rel[j+1].type_)) } rp.Siz = rel.length @@ -778,9 +750,8 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { // now consider the desired symbol. // find the section where it lives. - var ks *ldMachoSect for k := 0; uint32(k) < c.seg.nsect; k++ { - ks = &c.seg.sect[k] + ks := &c.seg.sect[k] if ks.addr <= uint64(rel.value) && uint64(rel.value) < ks.addr+ks.size { if ks.sym != nil { rp.Sym = ks.sym @@ -797,20 +768,17 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { // load indirect table for __pointers // fetch symbol number if dsymtab == nil || k < 0 || uint32(k) >= dsymtab.nindirectsyms || dsymtab.indir == nil { - err = fmt.Errorf("invalid scattered relocation: indirect symbol reference out of range") - goto bad + return errorf("invalid scattered relocation: indirect symbol reference out of range") } k = int(dsymtab.indir[k]) if k < 0 || uint32(k) >= symtab.nsym { - err = fmt.Errorf("invalid scattered relocation: symbol reference out of range") - goto bad + return errorf("invalid scattered relocation: symbol reference out of range") } rp.Sym = symtab.sym[k].sym } else { - err = fmt.Errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name) - goto bad + return errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name) } rpi++ @@ -822,9 +790,7 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { } } - err = fmt.Errorf("unsupported scattered relocation: invalid address %#x", rel.addr) - goto bad - + return errorf("unsupported scattered relocation: invalid address %#x", rel.addr) } rp.Siz = rel.length @@ -832,7 +798,7 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { rp.Off = int32(rel.addr) // Handle X86_64_RELOC_SIGNED referencing a section (rel->extrn == 0). - if SysArch.Family == sys.AMD64 && rel.extrn == 0 && rel.type_ == 1 { + if arch.Family == sys.AMD64 && rel.extrn == 0 && rel.type_ == MACHO_X86_64_RELOC_SIGNED { // Calculate the addend as the offset into the section. // // The rip-relative offset stored in the object file is encoded @@ -848,42 +814,46 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { // section found in the original object file. // // [For future reference, see Darwin's /usr/include/mach-o/x86_64/reloc.h] - secaddr = c.seg.sect[rel.symnum-1].addr + secaddr := c.seg.sect[rel.symnum-1].addr rp.Add = int64(uint64(int64(int32(e.Uint32(s.P[rp.Off:])))+int64(rp.Off)+4) - secaddr) } else { rp.Add = int64(int32(e.Uint32(s.P[rp.Off:]))) } + // An unsigned internal relocation has a value offset + // by the section address. + if arch.Family == sys.AMD64 && rel.extrn == 0 && rel.type_ == MACHO_X86_64_RELOC_UNSIGNED { + secaddr := c.seg.sect[rel.symnum-1].addr + rp.Add -= int64(secaddr) + } + // For i386 Mach-O PC-relative, the addend is written such that // it *is* the PC being subtracted. Use that to make // it match our version of PC-relative. - if rel.pcrel != 0 && SysArch.Family == sys.I386 { + if rel.pcrel != 0 && arch.Family == sys.I386 { rp.Add += int64(rp.Off) + int64(rp.Siz) } if rel.extrn == 0 { if rel.symnum < 1 || rel.symnum > c.seg.nsect { - err = fmt.Errorf("invalid relocation: section reference out of range %d vs %d", rel.symnum, c.seg.nsect) - goto bad + return errorf("invalid relocation: section reference out of range %d vs %d", rel.symnum, c.seg.nsect) } rp.Sym = c.seg.sect[rel.symnum-1].sym if rp.Sym == nil { - err = fmt.Errorf("invalid relocation: %s", c.seg.sect[rel.symnum-1].name) - goto bad + return errorf("invalid relocation: %s", c.seg.sect[rel.symnum-1].name) } // References to symbols in other sections // include that information in the addend. // We only care about the delta from the // section base. - if SysArch.Family == sys.I386 { + if arch.Family == sys.I386 { rp.Add -= int64(c.seg.sect[rel.symnum-1].addr) } } else { if rel.symnum >= symtab.nsym { - err = fmt.Errorf("invalid relocation: symbol reference out of range") - goto bad + return errorf("invalid relocation: symbol reference out of range") } rp.Sym = symtab.sym[rel.symnum].sym @@ -892,13 +862,18 @@ func ldmacho(ctxt *Link, f *bio.Reader, pkg string, length int64, pn string) { rpi++ } - sort.Sort(rbyoff(r[:rpi])) + sort.Sort(sym.RelocByOff(r[:rpi])) s.R = r s.R = s.R[:rpi] } - return - -bad: - Errorf(nil, "%s: malformed mach-o file: %v", pn, err) + return textp, nil +} + +func cstring(x []byte) string { + i := bytes.IndexByte(x, '\x00') + if i >= 0 { + x = x[:i] + } + return string(x) } diff --git a/src/cmd/link/internal/ld/ldpe.go b/src/cmd/link/internal/loadpe/ldpe.go similarity index 71% rename from src/cmd/link/internal/ld/ldpe.go rename to src/cmd/link/internal/loadpe/ldpe.go index 14f56d4b3c7..09c9e31c2e3 100644 --- a/src/cmd/link/internal/ld/ldpe.go +++ b/src/cmd/link/internal/loadpe/ldpe.go @@ -2,17 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ld +// Package loadpe implements a PE/COFF file reader. +package loadpe import ( "cmd/internal/bio" "cmd/internal/objabi" "cmd/internal/sys" + "cmd/link/internal/sym" "debug/pe" + "encoding/binary" "errors" "fmt" "io" - "log" "sort" "strings" ) @@ -101,6 +103,17 @@ const ( IMAGE_REL_AMD64_SSPAN32 = 0x0010 ) +// TODO(crawshaw): de-duplicate these symbols with cmd/internal/ld, ideally in debug/pe. +const ( + IMAGE_SCN_CNT_CODE = 0x00000020 + IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040 + IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080 + IMAGE_SCN_MEM_DISCARDABLE = 0x02000000 + IMAGE_SCN_MEM_EXECUTE = 0x20000000 + IMAGE_SCN_MEM_READ = 0x40000000 + IMAGE_SCN_MEM_WRITE = 0x80000000 +) + // TODO(brainman): maybe just add ReadAt method to bio.Reader instead of creating peBiobuf // peBiobuf makes bio.Reader look like io.ReaderAt. @@ -118,21 +131,13 @@ func (f *peBiobuf) ReadAt(p []byte, off int64) (int, error) { return n, nil } -func ldpe(ctxt *Link, input *bio.Reader, pkg string, length int64, pn string) { - err := ldpeError(ctxt, input, pkg, length, pn) - if err != nil { - Errorf(nil, "%s: malformed pe file: %v", pn, err) - } -} +// Load loads the PE file pn from input. +// Symbols are written into syms, and a slice of the text symbols is returned. +// If an .rsrc section is found, its symbol is returned as rsrc. +func Load(arch *sys.Arch, syms *sym.Symbols, input *bio.Reader, pkg string, length int64, pn string) (textp []*sym.Symbol, rsrc *sym.Symbol, err error) { + localSymVersion := syms.IncVersion() -func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn string) error { - if ctxt.Debugvlog != 0 { - ctxt.Logf("%5.2f ldpe %s\n", Cputime(), pn) - } - - localSymVersion := ctxt.Syms.IncVersion() - - sectsyms := make(map[*pe.Section]*Symbol) + sectsyms := make(map[*pe.Section]*sym.Symbol) sectdata := make(map[*pe.Section][]byte) // Some input files are archives containing multiple of @@ -144,7 +149,7 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin // TODO: replace pe.NewFile with pe.Load (grep for "add Load function" in debug/pe for details) f, err := pe.NewFile(sr) if err != nil { - return err + return nil, nil, err } defer f.Close() @@ -163,29 +168,29 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin } name := fmt.Sprintf("%s(%s)", pkg, sect.Name) - s := ctxt.Syms.Lookup(name, localSymVersion) + s := syms.Lookup(name, localSymVersion) switch sect.Characteristics & (IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE) { case IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ: //.rdata - s.Type = SRODATA + s.Type = sym.SRODATA case IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE: //.bss - s.Type = SNOPTRBSS + s.Type = sym.SNOPTRBSS case IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE: //.data - s.Type = SNOPTRDATA + s.Type = sym.SNOPTRDATA case IMAGE_SCN_CNT_CODE | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ: //.text - s.Type = STEXT + s.Type = sym.STEXT default: - return fmt.Errorf("unexpected flags %#06x for PE section %s", sect.Characteristics, sect.Name) + return nil, nil, fmt.Errorf("unexpected flags %#06x for PE section %s", sect.Characteristics, sect.Name) } - if s.Type != SNOPTRBSS { + if s.Type != sym.SNOPTRBSS { data, err := sect.Data() if err != nil { - return err + return nil, nil, err } sectdata[sect] = data s.P = data @@ -193,7 +198,7 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin s.Size = int64(sect.Size) sectsyms[sect] = s if sect.Name == ".rsrc" { - setpersrc(ctxt, s) + rsrc = s } } @@ -214,23 +219,23 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin continue } - rs := make([]Reloc, rsect.NumberOfRelocations) + rs := make([]sym.Reloc, rsect.NumberOfRelocations) for j, r := range rsect.Relocs { rp := &rs[j] if int(r.SymbolTableIndex) >= len(f.COFFSymbols) { - return fmt.Errorf("relocation number %d symbol index idx=%d cannot be large then number of symbols %d", j, r.SymbolTableIndex, len(f.COFFSymbols)) + return nil, nil, fmt.Errorf("relocation number %d symbol index idx=%d cannot be large then number of symbols %d", j, r.SymbolTableIndex, len(f.COFFSymbols)) } pesym := &f.COFFSymbols[r.SymbolTableIndex] - gosym, err := readpesym(ctxt, f, pesym, sectsyms, localSymVersion) + gosym, err := readpesym(arch, syms, f, pesym, sectsyms, localSymVersion) if err != nil { - return err + return nil, nil, err } if gosym == nil { name, err := pesym.FullName(f.StringTable) if err != nil { name = string(pesym.Name[:]) } - return fmt.Errorf("reloc of invalid sym %s idx=%d type=%d", name, r.SymbolTableIndex, pesym.Type) + return nil, nil, fmt.Errorf("reloc of invalid sym %s idx=%d type=%d", name, r.SymbolTableIndex, pesym.Type) } rp.Sym = gosym @@ -238,21 +243,20 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin rp.Off = int32(r.VirtualAddress) switch r.Type { default: - Errorf(sectsyms[rsect], "%s: unknown relocation type %d;", pn, r.Type) - fallthrough + return nil, nil, fmt.Errorf("%s: %v: unknown relocation type %v", pn, sectsyms[rsect], r.Type) case IMAGE_REL_I386_REL32, IMAGE_REL_AMD64_REL32, IMAGE_REL_AMD64_ADDR32, // R_X86_64_PC32 IMAGE_REL_AMD64_ADDR32NB: rp.Type = objabi.R_PCREL - rp.Add = int64(int32(Le32(sectdata[rsect][rp.Off:]))) + rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) case IMAGE_REL_I386_DIR32NB, IMAGE_REL_I386_DIR32: rp.Type = objabi.R_ADDR // load addend from image - rp.Add = int64(int32(Le32(sectdata[rsect][rp.Off:]))) + rp.Add = int64(int32(binary.LittleEndian.Uint32(sectdata[rsect][rp.Off:]))) case IMAGE_REL_AMD64_ADDR64: // R_X86_64_64 rp.Siz = 8 @@ -260,7 +264,7 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin rp.Type = objabi.R_ADDR // load addend from image - rp.Add = int64(Le64(sectdata[rsect][rp.Off:])) + rp.Add = int64(binary.LittleEndian.Uint64(sectdata[rsect][rp.Off:])) } // ld -r could generate multiple section symbols for the @@ -271,7 +275,7 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin } } - sort.Sort(rbyoff(rs[:rsect.NumberOfRelocations])) + sort.Sort(sym.RelocByOff(rs[:rsect.NumberOfRelocations])) s := sectsyms[rsect] s.R = rs @@ -286,7 +290,7 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin name, err := pesym.FullName(f.StringTable) if err != nil { - return err + return nil, nil, err } if name == "" { continue @@ -308,17 +312,17 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin } } - s, err := readpesym(ctxt, f, pesym, sectsyms, localSymVersion) + s, err := readpesym(arch, syms, f, pesym, sectsyms, localSymVersion) if err != nil { - return err + return nil, nil, err } if pesym.SectionNumber == 0 { // extern - if s.Type == SDYNIMPORT { + if s.Type == sym.SDYNIMPORT { s.Plt = -2 // flag for dynimport in PE object files. } - if s.Type == SXREF && pesym.Value > 0 { // global data - s.Type = SNOPTRDATA + if s.Type == sym.SXREF && pesym.Value > 0 { // global data + s.Type = sym.SNOPTRDATA s.Size = int64(pesym.Value) } @@ -326,35 +330,36 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin } else if pesym.SectionNumber > 0 && int(pesym.SectionNumber) <= len(f.Sections) { sect = f.Sections[pesym.SectionNumber-1] if _, found := sectsyms[sect]; !found { - Errorf(s, "%s: missing sect.sym", pn) + return nil, nil, fmt.Errorf("%s: %v: missing sect.sym", pn, s) } } else { - Errorf(s, "%s: sectnum < 0!", pn) + return nil, nil, fmt.Errorf("%s: %v: sectnum < 0!", pn, s) } if sect == nil { - return nil + return nil, rsrc, nil } if s.Outer != nil { if s.Attr.DuplicateOK() { continue } - Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sectsyms[sect].Name) + return nil, nil, fmt.Errorf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sectsyms[sect].Name) } sectsym := sectsyms[sect] s.Sub = sectsym.Sub sectsym.Sub = s - s.Type = sectsym.Type | SSUB + s.Type = sectsym.Type + s.Attr |= sym.AttrSubSymbol s.Value = int64(pesym.Value) s.Size = 4 s.Outer = sectsym - if sectsym.Type == STEXT { + if sectsym.Type == sym.STEXT { if s.Attr.External() && !s.Attr.DuplicateOK() { - Errorf(s, "%s: duplicate symbol definition", pn) + return nil, nil, fmt.Errorf("%s: duplicate symbol definition", s.Name) } - s.Attr |= AttrExternal + s.Attr |= sym.AttrExternal } } @@ -366,45 +371,45 @@ func ldpeError(ctxt *Link, input *bio.Reader, pkg string, length int64, pn strin continue } if s.Sub != nil { - s.Sub = listsort(s.Sub) + s.Sub = sym.SortSub(s.Sub) } - if s.Type == STEXT { + if s.Type == sym.STEXT { if s.Attr.OnList() { - log.Fatalf("symbol %s listed multiple times", s.Name) + return nil, nil, fmt.Errorf("symbol %s listed multiple times", s.Name) } - s.Attr |= AttrOnList - ctxt.Textp = append(ctxt.Textp, s) + s.Attr |= sym.AttrOnList + textp = append(textp, s) for s = s.Sub; s != nil; s = s.Sub { if s.Attr.OnList() { - log.Fatalf("symbol %s listed multiple times", s.Name) + return nil, nil, fmt.Errorf("symbol %s listed multiple times", s.Name) } - s.Attr |= AttrOnList - ctxt.Textp = append(ctxt.Textp, s) + s.Attr |= sym.AttrOnList + textp = append(textp, s) } } } - return nil + return textp, rsrc, nil } func issect(s *pe.COFFSymbol) bool { return s.StorageClass == IMAGE_SYM_CLASS_STATIC && s.Type == 0 && s.Name[0] == '.' } -func readpesym(ctxt *Link, f *pe.File, sym *pe.COFFSymbol, sectsyms map[*pe.Section]*Symbol, localSymVersion int) (*Symbol, error) { - symname, err := sym.FullName(f.StringTable) +func readpesym(arch *sys.Arch, syms *sym.Symbols, f *pe.File, pesym *pe.COFFSymbol, sectsyms map[*pe.Section]*sym.Symbol, localSymVersion int) (*sym.Symbol, error) { + symname, err := pesym.FullName(f.StringTable) if err != nil { return nil, err } var name string - if issect(sym) { - name = sectsyms[f.Sections[sym.SectionNumber-1]].Name + if issect(pesym) { + name = sectsyms[f.Sections[pesym.SectionNumber-1]].Name } else { name = symname if strings.HasPrefix(name, "__imp_") { name = name[6:] // __imp_Name => Name } - if SysArch.Family == sys.I386 && name[0] == '_' { + if arch.Family == sys.I386 && name[0] == '_' { name = name[1:] // _Name => Name } } @@ -414,27 +419,27 @@ func readpesym(ctxt *Link, f *pe.File, sym *pe.COFFSymbol, sectsyms map[*pe.Sect name = name[:i] } - var s *Symbol - switch sym.Type { + var s *sym.Symbol + switch pesym.Type { default: - return nil, fmt.Errorf("%s: invalid symbol type %d", symname, sym.Type) + return nil, fmt.Errorf("%s: invalid symbol type %d", symname, pesym.Type) case IMAGE_SYM_DTYPE_FUNCTION, IMAGE_SYM_DTYPE_NULL: - switch sym.StorageClass { + switch pesym.StorageClass { case IMAGE_SYM_CLASS_EXTERNAL: //global - s = ctxt.Syms.Lookup(name, 0) + s = syms.Lookup(name, 0) case IMAGE_SYM_CLASS_NULL, IMAGE_SYM_CLASS_STATIC, IMAGE_SYM_CLASS_LABEL: - s = ctxt.Syms.Lookup(name, localSymVersion) - s.Attr |= AttrDuplicateOK + s = syms.Lookup(name, localSymVersion) + s.Attr |= sym.AttrDuplicateOK default: - return nil, fmt.Errorf("%s: invalid symbol binding %d", symname, sym.StorageClass) + return nil, fmt.Errorf("%s: invalid symbol binding %d", symname, pesym.StorageClass) } } - if s != nil && s.Type == 0 && (sym.StorageClass != IMAGE_SYM_CLASS_STATIC || sym.Value != 0) { - s.Type = SXREF + if s != nil && s.Type == 0 && (pesym.StorageClass != IMAGE_SYM_CLASS_STATIC || pesym.Value != 0) { + s.Type = sym.SXREF } if strings.HasPrefix(symname, "__imp_") { s.Got = -2 // flag for __imp_ diff --git a/src/cmd/link/internal/mips/asm.go b/src/cmd/link/internal/mips/asm.go index 353f2c70fa0..306d53f5715 100644 --- a/src/cmd/link/internal/mips/asm.go +++ b/src/cmd/link/internal/mips/asm.go @@ -32,7 +32,10 @@ package mips import ( "cmd/internal/objabi" + "cmd/internal/sys" "cmd/link/internal/ld" + "cmd/link/internal/sym" + "debug/elf" "fmt" "log" ) @@ -41,51 +44,46 @@ func gentext(ctxt *ld.Link) { return } -func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { +func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { log.Fatalf("adddynrel not implemented") return false } -func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { - ld.Thearch.Lput(uint32(sectoff)) +func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { + ctxt.Out.Write32(uint32(sectoff)) elfsym := r.Xsym.ElfsymForReloc() switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: if r.Siz != 4 { - return -1 + return false } - ld.Thearch.Lput(ld.R_MIPS_32 | uint32(elfsym)<<8) - + ctxt.Out.Write32(uint32(elf.R_MIPS_32) | uint32(elfsym)<<8) case objabi.R_ADDRMIPS: - ld.Thearch.Lput(ld.R_MIPS_LO16 | uint32(elfsym)<<8) - + ctxt.Out.Write32(uint32(elf.R_MIPS_LO16) | uint32(elfsym)<<8) case objabi.R_ADDRMIPSU: - ld.Thearch.Lput(ld.R_MIPS_HI16 | uint32(elfsym)<<8) - + ctxt.Out.Write32(uint32(elf.R_MIPS_HI16) | uint32(elfsym)<<8) case objabi.R_ADDRMIPSTLS: - ld.Thearch.Lput(ld.R_MIPS_TLS_TPREL_LO16 | uint32(elfsym)<<8) - + ctxt.Out.Write32(uint32(elf.R_MIPS_TLS_TPREL_LO16) | uint32(elfsym)<<8) case objabi.R_CALLMIPS, objabi.R_JMPMIPS: - ld.Thearch.Lput(ld.R_MIPS_26 | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_MIPS_26) | uint32(elfsym)<<8) } - return 0 + return true } func elfsetupplt(ctxt *ld.Link) { return } -func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { - return -1 +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { + return false } -func applyrel(r *ld.Reloc, s *ld.Symbol, val *int64, t int64) { - o := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) +func applyrel(arch *sys.Arch, r *sym.Reloc, s *sym.Symbol, val *int64, t int64) { + o := arch.ByteOrder.Uint32(s.P[r.Off:]) switch r.Type { case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSTLS: *val = int64(o&0xffff0000 | uint32(t)&0xffff) @@ -96,15 +94,13 @@ func applyrel(r *ld.Reloc, s *ld.Symbol, val *int64, t int64) { } } -func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { - if ld.Linkmode == ld.LinkExternal { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { + if ctxt.LinkMode == ld.LinkExternal { switch r.Type { default: - return -1 - + return false case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: - - r.Done = 0 + r.Done = false // set up addend for eventual relocation via outer symbol. rs := r.Sym @@ -114,36 +110,32 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { rs = rs.Outer } - if rs.Type != ld.SHOSTOBJ && rs.Type != ld.SDYNIMPORT && rs.Sect == nil { + if rs.Type != sym.SHOSTOBJ && rs.Type != sym.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs - applyrel(r, s, val, r.Xadd) - return 0 - + applyrel(ctxt.Arch, r, s, val, r.Xadd) + return true case objabi.R_ADDRMIPSTLS, objabi.R_CALLMIPS, objabi.R_JMPMIPS: - r.Done = 0 + r.Done = false r.Xsym = r.Sym r.Xadd = r.Add - applyrel(r, s, val, r.Add) - return 0 + applyrel(ctxt.Arch, r, s, val, r.Add) + return true } } switch r.Type { case objabi.R_CONST: *val = r.Add - return 0 - + return true case objabi.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return 0 - + return true case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: t := ld.Symaddr(r.Sym) + r.Add - applyrel(r, s, val, t) - return 0 - + applyrel(ctxt.Arch, r, s, val, t) + return true case objabi.R_CALLMIPS, objabi.R_JMPMIPS: t := ld.Symaddr(r.Sym) + r.Add @@ -156,23 +148,22 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t) } - applyrel(r, s, val, t) - return 0 - + applyrel(ctxt.Arch, r, s, val, t) + return true case objabi.R_ADDRMIPSTLS: // thread pointer is at 0x7000 offset from the start of TLS data area t := ld.Symaddr(r.Sym) + r.Add - 0x7000 if t < -32768 || t >= 32678 { ld.Errorf(s, "TLS offset out of range %d", t) } - applyrel(r, s, val, t) - return 0 + applyrel(ctxt.Arch, r, s, val, t) + return true } - return -1 + return false } -func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { +func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { return -1 } @@ -181,15 +172,15 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f asmb\n", ld.Cputime()) } - if ld.Iself { + if ctxt.IsELF { ld.Asmbelfsetup() } sect := ld.Segtext.Sections[0] - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) for _, sect = range ld.Segtext.Sections[1:] { - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } @@ -198,7 +189,7 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } @@ -206,10 +197,10 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f datblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segdata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) - ld.Cseek(int64(ld.Segdwarf.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) /* output symbol table */ @@ -218,7 +209,7 @@ func asmb(ctxt *ld.Link) { ld.Lcsize = 0 symo := uint32(0) if !*ld.FlagS { - if !ld.Iself { + if !ctxt.IsELF { ld.Errorf(nil, "unsupported executable format") } if ctxt.Debugvlog != 0 { @@ -227,19 +218,19 @@ func asmb(ctxt *ld.Link) { symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) - ld.Cseek(int64(symo)) + ctxt.Out.SeekSet(int64(symo)) if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f elfsym\n", ld.Cputime()) } ld.Asmelfsym(ctxt) - ld.Cflush() - ld.Cwrite(ld.Elfstrdat) + ctxt.Out.Flush() + ctxt.Out.Write(ld.Elfstrdat) if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f dwarf\n", ld.Cputime()) } - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } @@ -248,15 +239,15 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f header\n", ld.Cputime()) } - ld.Cseek(0) - switch ld.Headtype { + ctxt.Out.SeekSet(0) + switch ctxt.HeadType { default: ld.Errorf(nil, "unsupported operating system") case objabi.Hlinux: ld.Asmbelf(ctxt, int64(symo)) } - ld.Cflush() + ctxt.Out.Flush() if *ld.FlagC { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) diff --git a/src/cmd/link/internal/mips/obj.go b/src/cmd/link/internal/mips/obj.go index 3ba02b794a5..c5d3451c39b 100644 --- a/src/cmd/link/internal/mips/obj.go +++ b/src/cmd/link/internal/mips/obj.go @@ -37,59 +37,45 @@ import ( "fmt" ) -// Reading object files. - -func Init() { +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchMIPS if objabi.GOARCH == "mipsle" { - ld.SysArch = sys.ArchMIPSLE - } else { - ld.SysArch = sys.ArchMIPS + arch = sys.ArchMIPSLE } - ld.Thearch.Funcalign = FuncAlign - ld.Thearch.Maxalign = MaxAlign - ld.Thearch.Minalign = MinAlign - ld.Thearch.Dwarfregsp = DWARFREGSP - ld.Thearch.Dwarfreglr = DWARFREGLR + theArch := ld.Arch{ + Funcalign: FuncAlign, + Maxalign: MaxAlign, + Minalign: MinAlign, + Dwarfregsp: DWARFREGSP, + Dwarfreglr: DWARFREGLR, - ld.Thearch.Adddynrel = adddynrel - ld.Thearch.Archinit = archinit - ld.Thearch.Archreloc = archreloc - ld.Thearch.Archrelocvariant = archrelocvariant - ld.Thearch.Asmb = asmb - ld.Thearch.Elfreloc1 = elfreloc1 - ld.Thearch.Elfsetupplt = elfsetupplt - ld.Thearch.Gentext = gentext - ld.Thearch.Machoreloc1 = machoreloc1 - if ld.SysArch == sys.ArchMIPSLE { - ld.Thearch.Lput = ld.Lputl - ld.Thearch.Wput = ld.Wputl - ld.Thearch.Vput = ld.Vputl - ld.Thearch.Append16 = ld.Append16l - ld.Thearch.Append32 = ld.Append32l - ld.Thearch.Append64 = ld.Append64l - } else { - ld.Thearch.Lput = ld.Lputb - ld.Thearch.Wput = ld.Wputb - ld.Thearch.Vput = ld.Vputb - ld.Thearch.Append16 = ld.Append16b - ld.Thearch.Append32 = ld.Append32b - ld.Thearch.Append64 = ld.Append64b + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Asmb: asmb, + Elfreloc1: elfreloc1, + Elfsetupplt: elfsetupplt, + Gentext: gentext, + Machoreloc1: machoreloc1, + + Linuxdynld: "/lib/ld.so.1", + + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", } - ld.Thearch.Linuxdynld = "/lib/ld.so.1" - - ld.Thearch.Freebsddynld = "XXX" - ld.Thearch.Openbsddynld = "XXX" - ld.Thearch.Netbsddynld = "XXX" - ld.Thearch.Dragonflydynld = "XXX" - ld.Thearch.Solarisdynld = "XXX" + return arch, theArch } func archinit(ctxt *ld.Link) { - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Exitf("unknown -H option: %v", ld.Headtype) + ld.Exitf("unknown -H option: %v", ctxt.HeadType) case objabi.Hlinux: /* mips elf */ ld.Elfinit(ctxt) ld.HEADR = ld.ELFRESERVE diff --git a/src/cmd/link/internal/mips64/asm.go b/src/cmd/link/internal/mips64/asm.go index 3425681ac20..295a0aafaed 100644 --- a/src/cmd/link/internal/mips64/asm.go +++ b/src/cmd/link/internal/mips64/asm.go @@ -34,18 +34,20 @@ import ( "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/ld" + "cmd/link/internal/sym" + "debug/elf" "fmt" "log" ) func gentext(ctxt *ld.Link) {} -func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { +func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { log.Fatalf("adddynrel not implemented") return false } -func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { +func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { // mips64 ELF relocation (endian neutral) // offset uint64 // sym uint32 @@ -55,62 +57,56 @@ func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { // type uint8 // addend int64 - ld.Thearch.Vput(uint64(sectoff)) + ctxt.Out.Write64(uint64(sectoff)) elfsym := r.Xsym.ElfsymForReloc() - ld.Thearch.Lput(uint32(elfsym)) - ld.Cput(0) - ld.Cput(0) - ld.Cput(0) + ctxt.Out.Write32(uint32(elfsym)) + ctxt.Out.Write8(0) + ctxt.Out.Write8(0) + ctxt.Out.Write8(0) switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: switch r.Siz { case 4: - ld.Cput(ld.R_MIPS_32) + ctxt.Out.Write8(uint8(elf.R_MIPS_32)) case 8: - ld.Cput(ld.R_MIPS_64) + ctxt.Out.Write8(uint8(elf.R_MIPS_64)) default: - return -1 + return false } - case objabi.R_ADDRMIPS: - ld.Cput(ld.R_MIPS_LO16) - + ctxt.Out.Write8(uint8(elf.R_MIPS_LO16)) case objabi.R_ADDRMIPSU: - ld.Cput(ld.R_MIPS_HI16) - + ctxt.Out.Write8(uint8(elf.R_MIPS_HI16)) case objabi.R_ADDRMIPSTLS: - ld.Cput(ld.R_MIPS_TLS_TPREL_LO16) - + ctxt.Out.Write8(uint8(elf.R_MIPS_TLS_TPREL_LO16)) case objabi.R_CALLMIPS, objabi.R_JMPMIPS: - ld.Cput(ld.R_MIPS_26) + ctxt.Out.Write8(uint8(elf.R_MIPS_26)) } - ld.Thearch.Vput(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(r.Xadd)) - return 0 + return true } func elfsetupplt(ctxt *ld.Link) { return } -func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { - return -1 +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { + return false } -func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { - if ld.Linkmode == ld.LinkExternal { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { + if ctxt.LinkMode == ld.LinkExternal { switch r.Type { default: - return -1 - + return false case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: - r.Done = 0 + r.Done = false // set up addend for eventual relocation via outer symbol. rs := r.Sym @@ -120,66 +116,61 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { rs = rs.Outer } - if rs.Type != ld.SHOSTOBJ && rs.Type != ld.SDYNIMPORT && rs.Sect == nil { + if rs.Type != sym.SHOSTOBJ && rs.Type != sym.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs - return 0 - + return true case objabi.R_ADDRMIPSTLS, objabi.R_CALLMIPS, objabi.R_JMPMIPS: - r.Done = 0 + r.Done = false r.Xsym = r.Sym r.Xadd = r.Add - return 0 + return true } } switch r.Type { case objabi.R_CONST: *val = r.Add - return 0 - + return true case objabi.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return 0 - + return true case objabi.R_ADDRMIPS, objabi.R_ADDRMIPSU: t := ld.Symaddr(r.Sym) + r.Add - o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) + o1 := ctxt.Arch.ByteOrder.Uint32(s.P[r.Off:]) if r.Type == objabi.R_ADDRMIPS { *val = int64(o1&0xffff0000 | uint32(t)&0xffff) } else { *val = int64(o1&0xffff0000 | uint32((t+1<<15)>>16)&0xffff) } - return 0 - + return true case objabi.R_ADDRMIPSTLS: // thread pointer is at 0x7000 offset from the start of TLS data area t := ld.Symaddr(r.Sym) + r.Add - 0x7000 if t < -32768 || t >= 32678 { ld.Errorf(s, "TLS offset out of range %d", t) } - o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) + o1 := ctxt.Arch.ByteOrder.Uint32(s.P[r.Off:]) *val = int64(o1&0xffff0000 | uint32(t)&0xffff) - return 0 - + return true case objabi.R_CALLMIPS, objabi.R_JMPMIPS: // Low 26 bits = (S + A) >> 2 t := ld.Symaddr(r.Sym) + r.Add - o1 := ld.SysArch.ByteOrder.Uint32(s.P[r.Off:]) + o1 := ctxt.Arch.ByteOrder.Uint32(s.P[r.Off:]) *val = int64(o1&0xfc000000 | uint32(t>>2)&^0xfc000000) - return 0 + return true } - return -1 + return false } -func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { +func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { return -1 } @@ -188,15 +179,15 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f asmb\n", ld.Cputime()) } - if ld.Iself { + if ctxt.IsELF { ld.Asmbelfsetup() } sect := ld.Segtext.Sections[0] - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) for _, sect = range ld.Segtext.Sections[1:] { - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } @@ -204,14 +195,14 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Segrelrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrelrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen)) } @@ -219,10 +210,10 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f datblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segdata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) - ld.Cseek(int64(ld.Segdwarf.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) /* output symbol table */ @@ -235,9 +226,9 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f sym\n", ld.Cputime()) } - switch ld.Headtype { + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) } @@ -246,34 +237,31 @@ func asmb(ctxt *ld.Link) { symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) } - ld.Cseek(int64(symo)) - switch ld.Headtype { + ctxt.Out.SeekSet(int64(symo)) + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f elfsym\n", ld.Cputime()) } ld.Asmelfsym(ctxt) - ld.Cflush() - ld.Cwrite(ld.Elfstrdat) + ctxt.Out.Flush() + ctxt.Out.Write(ld.Elfstrdat) - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } case objabi.Hplan9: ld.Asmplan9sym(ctxt) - ld.Cflush() + ctxt.Out.Flush() sym := ctxt.Syms.Lookup("pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) - for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(sym.P[i]) - } - - ld.Cflush() + ctxt.Out.Write(sym.P) + ctxt.Out.Flush() } } } @@ -281,22 +269,22 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f header\n", ld.Cputime()) } - ld.Cseek(0) - switch ld.Headtype { + ctxt.Out.SeekSet(0) + switch ctxt.HeadType { default: case objabi.Hplan9: /* plan 9 */ magic := uint32(4*18*18 + 7) - if ld.SysArch == sys.ArchMIPS64LE { + if ctxt.Arch == sys.ArchMIPS64LE { magic = uint32(4*26*26 + 7) } - ld.Thearch.Lput(magic) /* magic */ - ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */ - ld.Thearch.Lput(uint32(ld.Segdata.Filelen)) - ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) - ld.Thearch.Lput(uint32(ld.Symsize)) /* nsyms */ - ld.Thearch.Lput(uint32(ld.Entryvalue(ctxt))) /* va of entry */ - ld.Thearch.Lput(0) - ld.Thearch.Lput(uint32(ld.Lcsize)) + ctxt.Out.Write32(magic) /* magic */ + ctxt.Out.Write32(uint32(ld.Segtext.Filelen)) /* sizes */ + ctxt.Out.Write32(uint32(ld.Segdata.Filelen)) + ctxt.Out.Write32(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) + ctxt.Out.Write32(uint32(ld.Symsize)) /* nsyms */ + ctxt.Out.Write32(uint32(ld.Entryvalue(ctxt))) /* va of entry */ + ctxt.Out.Write32(0) + ctxt.Out.Write32(uint32(ld.Lcsize)) case objabi.Hlinux, objabi.Hfreebsd, @@ -306,7 +294,7 @@ func asmb(ctxt *ld.Link) { ld.Asmbelf(ctxt, int64(symo)) } - ld.Cflush() + ctxt.Out.Flush() if *ld.FlagC { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) diff --git a/src/cmd/link/internal/mips64/obj.go b/src/cmd/link/internal/mips64/obj.go index 1a24a7eedec..83974e5b565 100644 --- a/src/cmd/link/internal/mips64/obj.go +++ b/src/cmd/link/internal/mips64/obj.go @@ -37,57 +37,43 @@ import ( "fmt" ) -func Init() { +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchMIPS64 if objabi.GOARCH == "mips64le" { - ld.SysArch = sys.ArchMIPS64LE - } else { - ld.SysArch = sys.ArchMIPS64 + arch = sys.ArchMIPS64LE } - ld.Thearch.Funcalign = funcAlign - ld.Thearch.Maxalign = maxAlign - ld.Thearch.Minalign = minAlign - ld.Thearch.Dwarfregsp = dwarfRegSP - ld.Thearch.Dwarfreglr = dwarfRegLR + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Asmb: asmb, + Elfreloc1: elfreloc1, + Elfsetupplt: elfsetupplt, + Gentext: gentext, + Machoreloc1: machoreloc1, - ld.Thearch.Adddynrel = adddynrel - ld.Thearch.Archinit = archinit - ld.Thearch.Archreloc = archreloc - ld.Thearch.Archrelocvariant = archrelocvariant - ld.Thearch.Asmb = asmb - ld.Thearch.Elfreloc1 = elfreloc1 - ld.Thearch.Elfsetupplt = elfsetupplt - ld.Thearch.Gentext = gentext - ld.Thearch.Machoreloc1 = machoreloc1 - if ld.SysArch == sys.ArchMIPS64LE { - ld.Thearch.Lput = ld.Lputl - ld.Thearch.Wput = ld.Wputl - ld.Thearch.Vput = ld.Vputl - ld.Thearch.Append16 = ld.Append16l - ld.Thearch.Append32 = ld.Append32l - ld.Thearch.Append64 = ld.Append64l - } else { - ld.Thearch.Lput = ld.Lputb - ld.Thearch.Wput = ld.Wputb - ld.Thearch.Vput = ld.Vputb - ld.Thearch.Append16 = ld.Append16b - ld.Thearch.Append32 = ld.Append32b - ld.Thearch.Append64 = ld.Append64b + Linuxdynld: "/lib64/ld64.so.1", + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", } - ld.Thearch.Linuxdynld = "/lib64/ld64.so.1" - - ld.Thearch.Freebsddynld = "XXX" - ld.Thearch.Openbsddynld = "XXX" - ld.Thearch.Netbsddynld = "XXX" - ld.Thearch.Dragonflydynld = "XXX" - ld.Thearch.Solarisdynld = "XXX" + return arch, theArch } func archinit(ctxt *ld.Link) { - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Exitf("unknown -H option: %v", ld.Headtype) + ld.Exitf("unknown -H option: %v", ctxt.HeadType) case objabi.Hplan9: /* plan 9 */ ld.HEADR = 32 diff --git a/src/cmd/link/internal/ld/objfile.go b/src/cmd/link/internal/objfile/objfile.go similarity index 76% rename from src/cmd/link/internal/ld/objfile.go rename to src/cmd/link/internal/objfile/objfile.go index 13dde21809c..67868be2a19 100644 --- a/src/cmd/link/internal/ld/objfile.go +++ b/src/cmd/link/internal/objfile/objfile.go @@ -2,9 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ld - -// Reading of Go object files. +// Package objfile reads Go object files for the Go linker, cmd/link. +// +// This package is similar to cmd/internal/objfile which also reads +// Go object files. +package objfile import ( "bufio" @@ -12,8 +14,8 @@ import ( "cmd/internal/bio" "cmd/internal/dwarf" "cmd/internal/objabi" - "crypto/sha1" - "encoding/base64" + "cmd/internal/sys" + "cmd/link/internal/sym" "io" "log" "strconv" @@ -30,36 +32,39 @@ var emptyPkg = []byte(`"".`) // objReader reads Go object files. type objReader struct { rd *bufio.Reader - ctxt *Link - lib *Library + arch *sys.Arch + syms *sym.Symbols + lib *sym.Library pn string - dupSym *Symbol + dupSym *sym.Symbol localSymVersion int // rdBuf is used by readString and readSymName as scratch for reading strings. rdBuf []byte // List of symbol references for the file being read. - refs []*Symbol + refs []*sym.Symbol data []byte - reloc []Reloc - pcdata []Pcdata - autom []Auto - funcdata []*Symbol + reloc []sym.Reloc + pcdata []sym.Pcdata + autom []sym.Auto + funcdata []*sym.Symbol funcdataoff []int64 - file []*Symbol + file []*sym.Symbol } -func LoadObjFile(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn string) { - +// Load loads an object file f into library lib. +// The symbols loaded are added to syms. +func Load(arch *sys.Arch, syms *sym.Symbols, f *bio.Reader, lib *sym.Library, length int64, pn string) { start := f.Offset() r := &objReader{ rd: f.Reader, lib: lib, - ctxt: ctxt, + arch: arch, + syms: syms, pn: pn, - dupSym: &Symbol{Name: ".dup"}, - localSymVersion: ctxt.Syms.IncVersion(), + dupSym: &sym.Symbol{Name: ".dup"}, + localSymVersion: syms.IncVersion(), } r.loadObjFile() if f.Offset() != start+length { @@ -68,8 +73,6 @@ func LoadObjFile(ctxt *Link, f *bio.Reader, lib *Library, length int64, pn strin } func (r *objReader) loadObjFile() { - pkg := objabi.PathToPrefix(r.lib.Pkg) - // Magic header var buf [8]uint8 r.readFull(buf[:]) @@ -89,14 +92,11 @@ func (r *objReader) loadObjFile() { if lib == "" { break } - l := addlib(r.ctxt, pkg, r.pn, lib) - if l != nil { - r.lib.imports = append(r.lib.imports, l) - } + r.lib.ImportStrings = append(r.lib.ImportStrings, lib) } // Symbol references - r.refs = []*Symbol{nil} // zeroth ref is nil + r.refs = []*sym.Symbol{nil} // zeroth ref is nil for { c, err := r.rd.Peek(1) if err != nil { @@ -139,16 +139,16 @@ func (r *objReader) readSlices() { n := r.readInt() r.data = make([]byte, n) n = r.readInt() - r.reloc = make([]Reloc, n) + r.reloc = make([]sym.Reloc, n) n = r.readInt() - r.pcdata = make([]Pcdata, n) + r.pcdata = make([]sym.Pcdata, n) n = r.readInt() - r.autom = make([]Auto, n) + r.autom = make([]sym.Auto, n) n = r.readInt() - r.funcdata = make([]*Symbol, n) + r.funcdata = make([]*sym.Symbol, n) r.funcdataoff = make([]int64, n) n = r.readInt() - r.file = make([]*Symbol, n) + r.file = make([]*sym.Symbol, n) } // Symbols are prefixed so their content doesn't get confused with the magic footer. @@ -163,7 +163,7 @@ func (r *objReader) readSym() { if c, err = r.rd.ReadByte(); err != nil { log.Fatalln("error reading input: ", err) } - t := abiSymKindToSymKind[c] + t := sym.AbiSymKindToSymKind[c] s := r.readSymIndex() flags := r.readInt() dupok := flags&1 != 0 @@ -176,9 +176,9 @@ func (r *objReader) readSym() { pkg := objabi.PathToPrefix(r.lib.Pkg) isdup := false - var dup *Symbol - if s.Type != 0 && s.Type != SXREF { - if (t == SDATA || t == SBSS || t == SNOPTRBSS) && len(data) == 0 && nreloc == 0 { + var dup *sym.Symbol + if s.Type != 0 && s.Type != sym.SXREF { + if (t == sym.SDATA || t == sym.SBSS || t == sym.SNOPTRBSS) && len(data) == 0 && nreloc == 0 { if s.Size < int64(size) { s.Size = int64(size) } @@ -188,10 +188,10 @@ func (r *objReader) readSym() { return } - if (s.Type == SDATA || s.Type == SBSS || s.Type == SNOPTRBSS) && len(s.P) == 0 && len(s.R) == 0 { + if (s.Type == sym.SDATA || s.Type == sym.SBSS || s.Type == sym.SNOPTRBSS) && len(s.P) == 0 && len(s.R) == 0 { goto overwrite } - if s.Type != SBSS && s.Type != SNOPTRBSS && !dupok && !s.Attr.DuplicateOK() { + if s.Type != sym.SBSS && s.Type != sym.SNOPTRBSS && !dupok && !s.Attr.DuplicateOK() { log.Fatalf("duplicate symbol %s (types %d and %d) in %s and %s", s.Name, s.Type, t, s.File, r.pn) } if len(s.P) > 0 { @@ -204,23 +204,23 @@ func (r *objReader) readSym() { overwrite: s.File = pkg if dupok { - s.Attr |= AttrDuplicateOK + s.Attr |= sym.AttrDuplicateOK } - if t == SXREF { + if t == sym.SXREF { log.Fatalf("bad sxref") } if t == 0 { log.Fatalf("missing type for %s in %s", s.Name, r.pn) } - if t == SBSS && (s.Type == SRODATA || s.Type == SNOPTRBSS) { + if t == sym.SBSS && (s.Type == sym.SRODATA || s.Type == sym.SNOPTRBSS) { t = s.Type } s.Type = t if s.Size < int64(size) { s.Size = int64(size) } - s.Attr.Set(AttrLocal, local) - s.Attr.Set(AttrMakeTypelink, makeTypelink) + s.Attr.Set(sym.AttrLocal, local) + s.Attr.Set(sym.AttrMakeTypelink, makeTypelink) if typ != nil { s.Gotype = typ } @@ -235,7 +235,7 @@ overwrite: } for i := 0; i < nreloc; i++ { - s.R[i] = Reloc{ + s.R[i] = sym.Reloc{ Off: r.readInt32(), Siz: r.readUint8(), Type: objabi.RelocType(r.readInt32()), @@ -245,21 +245,21 @@ overwrite: } } - if s.Type == STEXT { - s.FuncInfo = new(FuncInfo) + if s.Type == sym.STEXT { + s.FuncInfo = new(sym.FuncInfo) pc := s.FuncInfo pc.Args = r.readInt32() pc.Locals = r.readInt32() if r.readUint8() != 0 { - s.Attr |= AttrNoSplit + s.Attr |= sym.AttrNoSplit } flags := r.readInt() if flags&(1<<2) != 0 { - s.Attr |= AttrReflectMethod + s.Attr |= sym.AttrReflectMethod } if flags&(1<<3) != 0 { - s.Attr |= AttrShared + s.Attr |= sym.AttrShared } n := r.readInt() pc.Autom = r.autom[:n:n] @@ -268,7 +268,7 @@ overwrite: } for i := 0; i < n; i++ { - pc.Autom[i] = Auto{ + pc.Autom[i] = sym.Auto{ Asym: r.readSymIndex(), Aoffset: r.readInt32(), Name: r.readInt16(), @@ -310,7 +310,7 @@ overwrite: pc.File[i] = r.readSymIndex() } n = r.readInt() - pc.InlTree = make([]InlinedCall, n) + pc.InlTree = make([]sym.InlinedCall, n) for i := 0; i < n; i++ { pc.InlTree[i].Parent = r.readInt32() pc.InlTree[i].File = r.readSymIndex() @@ -318,28 +318,29 @@ overwrite: pc.InlTree[i].Func = r.readSymIndex() } + s.Lib = r.lib if !dupok { if s.Attr.OnList() { log.Fatalf("symbol %s listed multiple times", s.Name) } - s.Attr |= AttrOnList - r.lib.textp = append(r.lib.textp, s) + s.Attr |= sym.AttrOnList + r.lib.Textp = append(r.lib.Textp, s) } else { // there may ba a dup in another package // put into a temp list and add to text later if !isdup { - r.lib.dupTextSyms = append(r.lib.dupTextSyms, s) + r.lib.DupTextSyms = append(r.lib.DupTextSyms, s) } else { - r.lib.dupTextSyms = append(r.lib.dupTextSyms, dup) + r.lib.DupTextSyms = append(r.lib.DupTextSyms, dup) } } } - if s.Type == SDWARFINFO { + if s.Type == sym.SDWARFINFO { r.patchDWARFName(s) } } -func (r *objReader) patchDWARFName(s *Symbol) { +func (r *objReader) patchDWARFName(s *sym.Symbol) { // This is kind of ugly. Really the package name should not // even be included here. if s.Size < 1 || s.P[0] != dwarf.DW_ABRV_FUNCTION { @@ -386,7 +387,7 @@ func (r *objReader) readRef() { if v == 1 { v = r.localSymVersion } - s := r.ctxt.Syms.Lookup(name, v) + s := r.syms.Lookup(name, v) r.refs = append(r.refs, s) if s == nil || v != 0 { @@ -397,23 +398,23 @@ func (r *objReader) readRef() { if err != nil { log.Panicf("failed to parse $-symbol %s: %v", s.Name, err) } - s.Type = SRODATA - s.Attr |= AttrLocal + s.Type = sym.SRODATA + s.Attr |= sym.AttrLocal switch s.Name[:5] { case "$f32.": if uint64(uint32(x)) != x { log.Panicf("$-symbol %s too large: %d", s.Name, x) } - Adduint32(r.ctxt, s, uint32(x)) + s.AddUint32(r.arch, uint32(x)) case "$f64.", "$i64.": - Adduint64(r.ctxt, s, x) + s.AddUint64(r.arch, x) default: log.Panicf("unrecognized $-symbol: %s", s.Name) } - s.Attr.Set(AttrReachable, false) + s.Attr.Set(sym.AttrReachable, false) } if strings.HasPrefix(s.Name, "runtime.gcbits.") { - s.Attr |= AttrLocal + s.Attr |= sym.AttrLocal } } @@ -516,30 +517,6 @@ func (r *objReader) readSymName() string { r.readFull(r.rdBuf[:n]) } r.rdBuf = adjName[:0] // in case 2*n wasn't enough - - if Buildmode == BuildmodeShared || *FlagLinkshared { - // These types are included in the symbol - // table when dynamically linking. To keep - // binary size down, we replace the names - // with SHA-1 prefixes. - // - // Keep the type.. prefix, which parts of the - // linker (like the DWARF generator) know means - // the symbol is not decodable. - // - // Leave type.runtime. symbols alone, because - // other parts of the linker manipulates them, - // and also symbols whose names would not be - // shortened by this process. - if len(s) > 14 && strings.HasPrefix(s, "type.") && !strings.HasPrefix(s, "type.runtime.") { - hash := sha1.Sum([]byte(s)) - prefix := "type." - if s[5] == '.' { - prefix = "type.." - } - s = prefix + base64.StdEncoding.EncodeToString(hash[:6]) - } - } return s } adjName = append(adjName, origName[:i]...) @@ -550,7 +527,7 @@ func (r *objReader) readSymName() string { } // Reads the index of a symbol reference and resolves it to a symbol -func (r *objReader) readSymIndex() *Symbol { +func (r *objReader) readSymIndex() *sym.Symbol { i := r.readInt() return r.refs[i] } diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go index ee4e218125a..4e277f9aa98 100644 --- a/src/cmd/link/internal/ppc64/asm.go +++ b/src/cmd/link/internal/ppc64/asm.go @@ -32,7 +32,10 @@ package ppc64 import ( "cmd/internal/objabi" + "cmd/internal/sys" "cmd/link/internal/ld" + "cmd/link/internal/sym" + "debug/elf" "encoding/binary" "fmt" "log" @@ -87,11 +90,11 @@ func genplt(ctxt *ld.Link) { // // This assumes "case 1" from the ABI, where the caller needs // us to save and restore the TOC pointer. - var stubs []*ld.Symbol + var stubs []*sym.Symbol for _, s := range ctxt.Textp { for i := range s.R { r := &s.R[i] - if r.Type != 256+ld.R_PPC64_REL24 || r.Sym.Type != ld.SDYNIMPORT { + if r.Type != 256+objabi.RelocType(elf.R_PPC64_REL24) || r.Sym.Type != sym.SDYNIMPORT { continue } @@ -104,7 +107,7 @@ func genplt(ctxt *ld.Link) { stub := ctxt.Syms.Lookup(n, 0) if s.Attr.Reachable() { - stub.Attr |= ld.AttrReachable + stub.Attr |= sym.AttrReachable } if stub.Size == 0 { // Need outer to resolve .TOC. @@ -131,22 +134,23 @@ func genplt(ctxt *ld.Link) { func genaddmoduledata(ctxt *ld.Link) { addmoduledata := ctxt.Syms.ROLookup("runtime.addmoduledata", 0) - if addmoduledata.Type == ld.STEXT { + if addmoduledata.Type == sym.STEXT && ctxt.BuildMode != ld.BuildModePlugin { return } - addmoduledata.Attr |= ld.AttrReachable + addmoduledata.Attr |= sym.AttrReachable initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0) - initfunc.Type = ld.STEXT - initfunc.Attr |= ld.AttrLocal - initfunc.Attr |= ld.AttrReachable + initfunc.Type = sym.STEXT + initfunc.Attr |= sym.AttrLocal + initfunc.Attr |= sym.AttrReachable o := func(op uint32) { - ld.Adduint32(ctxt, initfunc, op) + initfunc.AddUint32(ctxt.Arch, op) } // addis r2, r12, .TOC.-func@ha - rel := ld.Addrel(initfunc) + rel := initfunc.AddRel() rel.Off = int32(initfunc.Size) rel.Siz = 8 rel.Sym = ctxt.Syms.Lookup(".TOC.", 0) + rel.Sym.Attr |= sym.AttrReachable rel.Type = objabi.R_ADDRPOWER_PCREL o(0x3c4c0000) // addi r2, r2, .TOC.-func@l @@ -156,16 +160,24 @@ func genaddmoduledata(ctxt *ld.Link) { // stdu r31, -32(r1) o(0xf801ffe1) // addis r3, r2, local.moduledata@got@ha - rel = ld.Addrel(initfunc) + rel = initfunc.AddRel() rel.Off = int32(initfunc.Size) rel.Siz = 8 - rel.Sym = ctxt.Syms.Lookup("local.moduledata", 0) + if s := ctxt.Syms.ROLookup("local.moduledata", 0); s != nil { + rel.Sym = s + } else if s := ctxt.Syms.ROLookup("local.pluginmoduledata", 0); s != nil { + rel.Sym = s + } else { + rel.Sym = ctxt.Syms.Lookup("runtime.firstmoduledata", 0) + } + rel.Sym.Attr |= sym.AttrReachable + rel.Sym.Attr |= sym.AttrLocal rel.Type = objabi.R_ADDRPOWER_GOT o(0x3c620000) // ld r3, local.moduledata@got@l(r3) o(0xe8630000) // bl runtime.addmoduledata - rel = ld.Addrel(initfunc) + rel = initfunc.AddRel() rel.Off = int32(initfunc.Size) rel.Siz = 4 rel.Sym = addmoduledata @@ -182,12 +194,15 @@ func genaddmoduledata(ctxt *ld.Link) { // blr o(0x4e800020) + if ctxt.BuildMode == ld.BuildModePlugin { + ctxt.Textp = append(ctxt.Textp, addmoduledata) + } initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0) ctxt.Textp = append(ctxt.Textp, initfunc) - initarray_entry.Attr |= ld.AttrReachable - initarray_entry.Attr |= ld.AttrLocal - initarray_entry.Type = ld.SINITARR - ld.Addaddr(ctxt, initarray_entry, initfunc) + initarray_entry.Attr |= sym.AttrReachable + initarray_entry.Attr |= sym.AttrLocal + initarray_entry.Type = sym.SINITARR + initarray_entry.AddAddr(ctxt.Arch, initfunc) } func gentext(ctxt *ld.Link) { @@ -195,14 +210,14 @@ func gentext(ctxt *ld.Link) { genaddmoduledata(ctxt) } - if ld.Linkmode == ld.LinkInternal { + if ctxt.LinkMode == ld.LinkInternal { genplt(ctxt) } } // Construct a call stub in stub that calls symbol targ via its PLT // entry. -func gencallstub(ctxt *ld.Link, abicase int, stub *ld.Symbol, targ *ld.Symbol) { +func gencallstub(ctxt *ld.Link, abicase int, stub *sym.Symbol, targ *sym.Symbol) { if abicase != 1 { // If we see R_PPC64_TOCSAVE or R_PPC64_REL24_NOTOC // relocations, we'll need to implement cases 2 and 3. @@ -211,13 +226,13 @@ func gencallstub(ctxt *ld.Link, abicase int, stub *ld.Symbol, targ *ld.Symbol) { plt := ctxt.Syms.Lookup(".plt", 0) - stub.Type = ld.STEXT + stub.Type = sym.STEXT // Save TOC pointer in TOC save slot - ld.Adduint32(ctxt, stub, 0xf8410018) // std r2,24(r1) + stub.AddUint32(ctxt.Arch, 0xf8410018) // std r2,24(r1) // Load the function pointer from the PLT. - r := ld.Addrel(stub) + r := stub.AddRel() r.Off = int32(stub.Size) r.Sym = plt @@ -227,9 +242,9 @@ func gencallstub(ctxt *ld.Link, abicase int, stub *ld.Symbol, targ *ld.Symbol) { r.Off += int32(r.Siz) } r.Type = objabi.R_POWER_TOC - r.Variant = ld.RV_POWER_HA - ld.Adduint32(ctxt, stub, 0x3d820000) // addis r12,r2,targ@plt@toc@ha - r = ld.Addrel(stub) + r.Variant = sym.RV_POWER_HA + stub.AddUint32(ctxt.Arch, 0x3d820000) // addis r12,r2,targ@plt@toc@ha + r = stub.AddRel() r.Off = int32(stub.Size) r.Sym = plt r.Add = int64(targ.Plt) @@ -238,26 +253,26 @@ func gencallstub(ctxt *ld.Link, abicase int, stub *ld.Symbol, targ *ld.Symbol) { r.Off += int32(r.Siz) } r.Type = objabi.R_POWER_TOC - r.Variant = ld.RV_POWER_LO - ld.Adduint32(ctxt, stub, 0xe98c0000) // ld r12,targ@plt@toc@l(r12) + r.Variant = sym.RV_POWER_LO + stub.AddUint32(ctxt.Arch, 0xe98c0000) // ld r12,targ@plt@toc@l(r12) // Jump to the loaded pointer - ld.Adduint32(ctxt, stub, 0x7d8903a6) // mtctr r12 - ld.Adduint32(ctxt, stub, 0x4e800420) // bctr + stub.AddUint32(ctxt.Arch, 0x7d8903a6) // mtctr r12 + stub.AddUint32(ctxt.Arch, 0x4e800420) // bctr } -func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { +func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { targ := r.Sym switch r.Type { default: if r.Type >= 256 { - ld.Errorf(s, "unexpected relocation type %d", r.Type) + ld.Errorf(s, "unexpected relocation type %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type)) return false } // Handle relocations found in ELF object files. - case 256 + ld.R_PPC64_REL24: + case 256 + objabi.RelocType(elf.R_PPC64_REL24): r.Type = objabi.R_CALLPOWER // This is a local call, so the caller isn't setting @@ -267,89 +282,89 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { // to use r12 to compute r2.) r.Add += int64(r.Sym.Localentry) * 4 - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { // Should have been handled in elfsetupplt ld.Errorf(s, "unexpected R_PPC64_REL24 for dyn import") } return true - case 256 + ld.R_PPC_REL32: + case 256 + objabi.RelocType(elf.R_PPC_REL32): r.Type = objabi.R_PCREL r.Add += 4 - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_PPC_REL32 for dyn import") } return true - case 256 + ld.R_PPC64_ADDR64: + case 256 + objabi.RelocType(elf.R_PPC64_ADDR64): r.Type = objabi.R_ADDR - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { // These happen in .toc sections ld.Adddynsym(ctxt, targ) rela := ctxt.Syms.Lookup(".rela", 0) - ld.Addaddrplus(ctxt, rela, s, int64(r.Off)) - ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_PPC64_ADDR64)) - ld.Adduint64(ctxt, rela, uint64(r.Add)) + rela.AddAddrPlus(ctxt.Arch, s, int64(r.Off)) + rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(targ.Dynid), uint32(elf.R_PPC64_ADDR64))) + rela.AddUint64(ctxt.Arch, uint64(r.Add)) r.Type = 256 // ignore during relocsym } return true - case 256 + ld.R_PPC64_TOC16: + case 256 + objabi.RelocType(elf.R_PPC64_TOC16): r.Type = objabi.R_POWER_TOC - r.Variant = ld.RV_POWER_LO | ld.RV_CHECK_OVERFLOW + r.Variant = sym.RV_POWER_LO | sym.RV_CHECK_OVERFLOW return true - case 256 + ld.R_PPC64_TOC16_LO: + case 256 + objabi.RelocType(elf.R_PPC64_TOC16_LO): r.Type = objabi.R_POWER_TOC - r.Variant = ld.RV_POWER_LO + r.Variant = sym.RV_POWER_LO return true - case 256 + ld.R_PPC64_TOC16_HA: + case 256 + objabi.RelocType(elf.R_PPC64_TOC16_HA): r.Type = objabi.R_POWER_TOC - r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW + r.Variant = sym.RV_POWER_HA | sym.RV_CHECK_OVERFLOW return true - case 256 + ld.R_PPC64_TOC16_HI: + case 256 + objabi.RelocType(elf.R_PPC64_TOC16_HI): r.Type = objabi.R_POWER_TOC - r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW + r.Variant = sym.RV_POWER_HI | sym.RV_CHECK_OVERFLOW return true - case 256 + ld.R_PPC64_TOC16_DS: + case 256 + objabi.RelocType(elf.R_PPC64_TOC16_DS): r.Type = objabi.R_POWER_TOC - r.Variant = ld.RV_POWER_DS | ld.RV_CHECK_OVERFLOW + r.Variant = sym.RV_POWER_DS | sym.RV_CHECK_OVERFLOW return true - case 256 + ld.R_PPC64_TOC16_LO_DS: + case 256 + objabi.RelocType(elf.R_PPC64_TOC16_LO_DS): r.Type = objabi.R_POWER_TOC - r.Variant = ld.RV_POWER_DS + r.Variant = sym.RV_POWER_DS return true - case 256 + ld.R_PPC64_REL16_LO: + case 256 + objabi.RelocType(elf.R_PPC64_REL16_LO): r.Type = objabi.R_PCREL - r.Variant = ld.RV_POWER_LO + r.Variant = sym.RV_POWER_LO r.Add += 2 // Compensate for relocation size of 2 return true - case 256 + ld.R_PPC64_REL16_HI: + case 256 + objabi.RelocType(elf.R_PPC64_REL16_HI): r.Type = objabi.R_PCREL - r.Variant = ld.RV_POWER_HI | ld.RV_CHECK_OVERFLOW + r.Variant = sym.RV_POWER_HI | sym.RV_CHECK_OVERFLOW r.Add += 2 return true - case 256 + ld.R_PPC64_REL16_HA: + case 256 + objabi.RelocType(elf.R_PPC64_REL16_HA): r.Type = objabi.R_PCREL - r.Variant = ld.RV_POWER_HA | ld.RV_CHECK_OVERFLOW + r.Variant = sym.RV_POWER_HA | sym.RV_CHECK_OVERFLOW r.Add += 2 return true } // Handle references to ELF symbols from our own object files. - if targ.Type != ld.SDYNIMPORT { + if targ.Type != sym.SDYNIMPORT { return true } @@ -358,83 +373,72 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return false } -func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { - ld.Thearch.Vput(uint64(sectoff)) +func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { + ctxt.Out.Write64(uint64(sectoff)) elfsym := r.Xsym.ElfsymForReloc() switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: switch r.Siz { case 4: - ld.Thearch.Vput(ld.R_PPC64_ADDR32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_PPC64_ADDR32) | uint64(elfsym)<<32) case 8: - ld.Thearch.Vput(ld.R_PPC64_ADDR64 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_PPC64_ADDR64) | uint64(elfsym)<<32) default: - return -1 + return false } - case objabi.R_POWER_TLS: - ld.Thearch.Vput(ld.R_PPC64_TLS | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_PPC64_TLS) | uint64(elfsym)<<32) case objabi.R_POWER_TLS_LE: - ld.Thearch.Vput(ld.R_PPC64_TPREL16 | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_PPC64_TPREL16) | uint64(elfsym)<<32) case objabi.R_POWER_TLS_IE: - ld.Thearch.Vput(ld.R_PPC64_GOT_TPREL16_HA | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_PPC64_GOT_TPREL16_LO_DS | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_PPC64_GOT_TPREL16_HA) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_PPC64_GOT_TPREL16_LO_DS) | uint64(elfsym)<<32) case objabi.R_ADDRPOWER: - ld.Thearch.Vput(ld.R_PPC64_ADDR16_HA | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_PPC64_ADDR16_LO | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_PPC64_ADDR16_HA) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_PPC64_ADDR16_LO) | uint64(elfsym)<<32) case objabi.R_ADDRPOWER_DS: - ld.Thearch.Vput(ld.R_PPC64_ADDR16_HA | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_PPC64_ADDR16_LO_DS | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_PPC64_ADDR16_HA) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_PPC64_ADDR16_LO_DS) | uint64(elfsym)<<32) case objabi.R_ADDRPOWER_GOT: - ld.Thearch.Vput(ld.R_PPC64_GOT16_HA | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_PPC64_GOT16_LO_DS | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_PPC64_GOT16_HA) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_PPC64_GOT16_LO_DS) | uint64(elfsym)<<32) case objabi.R_ADDRPOWER_PCREL: - ld.Thearch.Vput(ld.R_PPC64_REL16_HA | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_PPC64_REL16_LO | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_PPC64_REL16_HA) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_PPC64_REL16_LO) | uint64(elfsym)<<32) r.Xadd += 4 - case objabi.R_ADDRPOWER_TOCREL: - ld.Thearch.Vput(ld.R_PPC64_TOC16_HA | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_PPC64_TOC16_LO | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_PPC64_TOC16_HA) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_PPC64_TOC16_LO) | uint64(elfsym)<<32) case objabi.R_ADDRPOWER_TOCREL_DS: - ld.Thearch.Vput(ld.R_PPC64_TOC16_HA | uint64(elfsym)<<32) - ld.Thearch.Vput(uint64(r.Xadd)) - ld.Thearch.Vput(uint64(sectoff + 4)) - ld.Thearch.Vput(ld.R_PPC64_TOC16_LO_DS | uint64(elfsym)<<32) - + ctxt.Out.Write64(uint64(elf.R_PPC64_TOC16_HA) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(sectoff + 4)) + ctxt.Out.Write64(uint64(elf.R_PPC64_TOC16_LO_DS) | uint64(elfsym)<<32) case objabi.R_CALLPOWER: if r.Siz != 4 { - return -1 + return false } - ld.Thearch.Vput(ld.R_PPC64_REL24 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_PPC64_REL24) | uint64(elfsym)<<32) } - ld.Thearch.Vput(uint64(r.Xadd)) + ctxt.Out.Write64(uint64(r.Xadd)) - return 0 + return true } func elfsetupplt(ctxt *ld.Link) { @@ -448,13 +452,13 @@ func elfsetupplt(ctxt *ld.Link) { } } -func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { - return -1 +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { + return false } // Return the value of .TOC. for symbol s -func symtoc(ctxt *ld.Link, s *ld.Symbol) int64 { - var toc *ld.Symbol +func symtoc(ctxt *ld.Link, s *sym.Symbol) int64 { + var toc *sym.Symbol if s.Outer != nil { toc = ctxt.Syms.ROLookup(".TOC.", int(s.Outer.Version)) @@ -470,7 +474,7 @@ func symtoc(ctxt *ld.Link, s *ld.Symbol) int64 { return toc.Value } -func archrelocaddr(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { +func archrelocaddr(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { var o1, o2 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o1 = uint32(*val >> 32) @@ -499,16 +503,14 @@ func archrelocaddr(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { case objabi.R_ADDRPOWER: o1 |= (uint32(t) >> 16) & 0xffff o2 |= uint32(t) & 0xffff - case objabi.R_ADDRPOWER_DS: o1 |= (uint32(t) >> 16) & 0xffff if t&3 != 0 { ld.Errorf(s, "bad DS reloc for %s: %d", s.Name, ld.Symaddr(r.Sym)) } o2 |= uint32(t) & 0xfffc - default: - return -1 + return false } if ctxt.Arch.ByteOrder == binary.BigEndian { @@ -516,17 +518,17 @@ func archrelocaddr(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { } else { *val = int64(o2)<<32 | int64(o1) } - return 0 + return true } // resolve direct jump relocation r in s, and add trampoline if necessary -func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { +func trampoline(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol) { // Trampolines are created if the branch offset is too large and the linker cannot insert a call stub to handle it. // For internal linking, trampolines are always created for long calls. // For external linking, the linker can insert a call stub to handle a long call, but depends on having the TOC address in // r2. For those build modes with external linking where the TOC address is not maintained in r2, trampolines must be created. - if ld.Linkmode == ld.LinkExternal && (ctxt.DynlinkingGo() || ld.Buildmode == ld.BuildmodeCArchive || ld.Buildmode == ld.BuildmodeCShared || ld.Buildmode == ld.BuildmodePIE) { + if ctxt.LinkMode == ld.LinkExternal && (ctxt.DynlinkingGo() || ctxt.BuildMode == ld.BuildModeCArchive || ctxt.BuildMode == ld.BuildModeCShared || ctxt.BuildMode == ld.BuildModePIE) { // No trampolines needed since r2 contains the TOC return } @@ -537,8 +539,8 @@ func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { // If branch offset is too far then create a trampoline. - if (ld.Linkmode == ld.LinkExternal && s.Sect != r.Sym.Sect) || (ld.Linkmode == ld.LinkInternal && int64(int32(t<<6)>>6) != t) || (*ld.FlagDebugTramp > 1 && s.File != r.Sym.File) { - var tramp *ld.Symbol + if (ctxt.LinkMode == ld.LinkExternal && s.Sect != r.Sym.Sect) || (ctxt.LinkMode == ld.LinkInternal && int64(int32(t<<6)>>6) != t) || (*ld.FlagDebugTramp > 1 && s.File != r.Sym.File) { + var tramp *sym.Symbol for i := 0; ; i++ { // Using r.Add as part of the name is significant in functions like duffzero where the call @@ -563,29 +565,29 @@ func trampoline(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol) { // With internal linking, the trampoline can be used if it is not too far. // With external linking, the trampoline must be in this section for it to be reused. - if (ld.Linkmode == ld.LinkInternal && int64(int32(t<<6)>>6) == t) || (ld.Linkmode == ld.LinkExternal && s.Sect == tramp.Sect) { + if (ctxt.LinkMode == ld.LinkInternal && int64(int32(t<<6)>>6) == t) || (ctxt.LinkMode == ld.LinkExternal && s.Sect == tramp.Sect) { break } } if tramp.Type == 0 { - if ctxt.DynlinkingGo() || ld.Buildmode == ld.BuildmodeCArchive || ld.Buildmode == ld.BuildmodeCShared || ld.Buildmode == ld.BuildmodePIE { + if ctxt.DynlinkingGo() || ctxt.BuildMode == ld.BuildModeCArchive || ctxt.BuildMode == ld.BuildModeCShared || ctxt.BuildMode == ld.BuildModePIE { // Should have returned for above cases ld.Errorf(s, "unexpected trampoline for shared or dynamic linking\n") } else { ctxt.AddTramp(tramp) - gentramp(tramp, r.Sym, int64(r.Add)) + gentramp(ctxt.Arch, ctxt.LinkMode, tramp, r.Sym, int64(r.Add)) } } r.Sym = tramp r.Add = 0 // This was folded into the trampoline target address - r.Done = 0 + r.Done = false } default: - ld.Errorf(s, "trampoline called with non-jump reloc: %v", r.Type) + ld.Errorf(s, "trampoline called with non-jump reloc: %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type)) } } -func gentramp(tramp, target *ld.Symbol, offset int64) { +func gentramp(arch *sys.Arch, linkmode ld.LinkMode, tramp, target *sym.Symbol, offset int64) { // Used for default build mode for an executable // Address of the call target is generated using // relocation and doesn't depend on r2 (TOC). @@ -596,8 +598,8 @@ func gentramp(tramp, target *ld.Symbol, offset int64) { o2 := uint32(0x3bff0000) // addi r31,targetaddr lo // With external linking, the target address must be // relocated using LO and HA - if ld.Linkmode == ld.LinkExternal { - tr := ld.Addrel(tramp) + if linkmode == ld.LinkExternal { + tr := tramp.AddRel() tr.Off = 0 tr.Type = objabi.R_ADDRPOWER tr.Siz = 8 // generates 2 relocations: HA + LO @@ -615,32 +617,30 @@ func gentramp(tramp, target *ld.Symbol, offset int64) { } o3 := uint32(0x7fe903a6) // mtctr r31 o4 := uint32(0x4e800420) // bctr - ld.SysArch.ByteOrder.PutUint32(tramp.P, o1) - ld.SysArch.ByteOrder.PutUint32(tramp.P[4:], o2) - ld.SysArch.ByteOrder.PutUint32(tramp.P[8:], o3) - ld.SysArch.ByteOrder.PutUint32(tramp.P[12:], o4) + arch.ByteOrder.PutUint32(tramp.P, o1) + arch.ByteOrder.PutUint32(tramp.P[4:], o2) + arch.ByteOrder.PutUint32(tramp.P[8:], o3) + arch.ByteOrder.PutUint32(tramp.P[12:], o4) } -func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { - if ld.Linkmode == ld.LinkExternal { +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { + if ctxt.LinkMode == ld.LinkExternal { switch r.Type { default: - return -1 - + return false case objabi.R_POWER_TLS, objabi.R_POWER_TLS_LE, objabi.R_POWER_TLS_IE: - r.Done = 0 + r.Done = false // check Outer is nil, Type is TLSBSS? r.Xadd = r.Add r.Xsym = r.Sym - return 0 - + return true case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS, objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS, objabi.R_ADDRPOWER_GOT, objabi.R_ADDRPOWER_PCREL: - r.Done = 0 + r.Done = false // set up addend for eventual relocation via outer symbol. rs := r.Sym @@ -650,33 +650,29 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { rs = rs.Outer } - if rs.Type != ld.SHOSTOBJ && rs.Type != ld.SDYNIMPORT && rs.Sect == nil { + if rs.Type != sym.SHOSTOBJ && rs.Type != sym.SDYNIMPORT && rs.Sect == nil { ld.Errorf(s, "missing section for %s", rs.Name) } r.Xsym = rs - return 0 - + return true case objabi.R_CALLPOWER: - r.Done = 0 + r.Done = false r.Xsym = r.Sym r.Xadd = r.Add - return 0 + return true } } switch r.Type { case objabi.R_CONST: *val = r.Add - return 0 - + return true case objabi.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return 0 - + return true case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS: return archrelocaddr(ctxt, r, s, val) - case objabi.R_CALLPOWER: // Bits 6 through 29 = (S + A - P) >> 2 @@ -691,13 +687,11 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { ld.Errorf(s, "direct call too far: %s %x", r.Sym.Name, t) } *val |= int64(uint32(t) &^ 0xfc000003) - return 0 - + return true case objabi.R_POWER_TOC: // S + A - .TOC. *val = ld.Symaddr(r.Sym) + r.Add - symtoc(ctxt, s) - return 0 - + return true case objabi.R_POWER_TLS_LE: // The thread pointer points 0x7000 bytes after the start of the the // thread local storage area as documented in section "3.7.2 TLS @@ -708,23 +702,23 @@ func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { ld.Errorf(s, "TLS offset out of range %d", v) } *val = (*val &^ 0xffff) | (v & 0xffff) - return 0 + return true } - return -1 + return false } -func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { - switch r.Variant & ld.RV_TYPE_MASK { +func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { + switch r.Variant & sym.RV_TYPE_MASK { default: ld.Errorf(s, "unexpected relocation variant %d", r.Variant) fallthrough - case ld.RV_NONE: + case sym.RV_NONE: return t - case ld.RV_POWER_LO: - if r.Variant&ld.RV_CHECK_OVERFLOW != 0 { + case sym.RV_POWER_LO: + if r.Variant&sym.RV_CHECK_OVERFLOW != 0 { // Whether to check for signed or unsigned // overflow depends on the instruction var o1 uint32 @@ -750,15 +744,15 @@ func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { return int64(int16(t)) - case ld.RV_POWER_HA: + case sym.RV_POWER_HA: t += 0x8000 fallthrough // Fallthrough - case ld.RV_POWER_HI: + case sym.RV_POWER_HI: t >>= 16 - if r.Variant&ld.RV_CHECK_OVERFLOW != 0 { + if r.Variant&sym.RV_CHECK_OVERFLOW != 0 { // Whether to check for signed or unsigned // overflow depends on the instruction var o1 uint32 @@ -784,7 +778,7 @@ func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { return int64(int16(t)) - case ld.RV_POWER_DS: + case sym.RV_POWER_DS: var o1 uint32 if ctxt.Arch.ByteOrder == binary.BigEndian { o1 = uint32(ld.Be16(s.P[r.Off:])) @@ -794,7 +788,7 @@ func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { if t&3 != 0 { ld.Errorf(s, "relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t) } - if (r.Variant&ld.RV_CHECK_OVERFLOW != 0) && int64(int16(t)) != t { + if (r.Variant&sym.RV_CHECK_OVERFLOW != 0) && int64(int16(t)) != t { goto overflow } return int64(o1)&0x3 | int64(int16(t)) @@ -805,14 +799,14 @@ overflow: return t } -func addpltsym(ctxt *ld.Link, s *ld.Symbol) { +func addpltsym(ctxt *ld.Link, s *sym.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) - if ld.Iself { + if ctxt.IsELF { plt := ctxt.Syms.Lookup(".plt", 0) rela := ctxt.Syms.Lookup(".rela.plt", 0) if plt.Size == 0 { @@ -824,13 +818,13 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { // Write symbol resolver stub (just a branch to the // glink resolver stub) - r := ld.Addrel(glink) + r := glink.AddRel() r.Sym = glink r.Off = int32(glink.Size) r.Siz = 4 r.Type = objabi.R_CALLPOWER - ld.Adduint32(ctxt, glink, 0x48000000) // b .glink + glink.AddUint32(ctxt.Arch, 0x48000000) // b .glink // In the ppc64 ABI, the dynamic linker is responsible // for writing the entire PLT. We just need to @@ -842,16 +836,16 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { plt.Size += 8 - ld.Addaddrplus(ctxt, rela, plt, int64(s.Plt)) - ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_PPC64_JMP_SLOT)) - ld.Adduint64(ctxt, rela, 0) + rela.AddAddrPlus(ctxt.Arch, plt, int64(s.Plt)) + rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_PPC64_JMP_SLOT))) + rela.AddUint64(ctxt.Arch, 0) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } } // Generate the glink resolver stub if necessary and return the .glink section -func ensureglinkresolver(ctxt *ld.Link) *ld.Symbol { +func ensureglinkresolver(ctxt *ld.Link) *sym.Symbol { glink := ctxt.Syms.Lookup(".glink", 0) if glink.Size != 0 { return glink @@ -864,39 +858,39 @@ func ensureglinkresolver(ctxt *ld.Link) *ld.Symbol { // // This stub is PIC, so first get the PC of label 1 into r11. // Other things will be relative to this. - ld.Adduint32(ctxt, glink, 0x7c0802a6) // mflr r0 - ld.Adduint32(ctxt, glink, 0x429f0005) // bcl 20,31,1f - ld.Adduint32(ctxt, glink, 0x7d6802a6) // 1: mflr r11 - ld.Adduint32(ctxt, glink, 0x7c0803a6) // mtlf r0 + glink.AddUint32(ctxt.Arch, 0x7c0802a6) // mflr r0 + glink.AddUint32(ctxt.Arch, 0x429f0005) // bcl 20,31,1f + glink.AddUint32(ctxt.Arch, 0x7d6802a6) // 1: mflr r11 + glink.AddUint32(ctxt.Arch, 0x7c0803a6) // mtlf r0 // Compute the .plt array index from the entry point address. // Because this is PIC, everything is relative to label 1b (in // r11): // r0 = ((r12 - r11) - (res_0 - r11)) / 4 = (r12 - res_0) / 4 - ld.Adduint32(ctxt, glink, 0x3800ffd0) // li r0,-(res_0-1b)=-48 - ld.Adduint32(ctxt, glink, 0x7c006214) // add r0,r0,r12 - ld.Adduint32(ctxt, glink, 0x7c0b0050) // sub r0,r0,r11 - ld.Adduint32(ctxt, glink, 0x7800f082) // srdi r0,r0,2 + glink.AddUint32(ctxt.Arch, 0x3800ffd0) // li r0,-(res_0-1b)=-48 + glink.AddUint32(ctxt.Arch, 0x7c006214) // add r0,r0,r12 + glink.AddUint32(ctxt.Arch, 0x7c0b0050) // sub r0,r0,r11 + glink.AddUint32(ctxt.Arch, 0x7800f082) // srdi r0,r0,2 // r11 = address of the first byte of the PLT - r := ld.Addrel(glink) + r := glink.AddRel() r.Off = int32(glink.Size) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Siz = 8 r.Type = objabi.R_ADDRPOWER - ld.Adduint32(ctxt, glink, 0x3d600000) // addis r11,0,.plt@ha - ld.Adduint32(ctxt, glink, 0x396b0000) // addi r11,r11,.plt@l + glink.AddUint32(ctxt.Arch, 0x3d600000) // addis r11,0,.plt@ha + glink.AddUint32(ctxt.Arch, 0x396b0000) // addi r11,r11,.plt@l // Load r12 = dynamic resolver address and r11 = DSO // identifier from the first two doublewords of the PLT. - ld.Adduint32(ctxt, glink, 0xe98b0000) // ld r12,0(r11) - ld.Adduint32(ctxt, glink, 0xe96b0008) // ld r11,8(r11) + glink.AddUint32(ctxt.Arch, 0xe98b0000) // ld r12,0(r11) + glink.AddUint32(ctxt.Arch, 0xe96b0008) // ld r11,8(r11) // Jump to the dynamic resolver - ld.Adduint32(ctxt, glink, 0x7d8903a6) // mtctr r12 - ld.Adduint32(ctxt, glink, 0x4e800420) // bctr + glink.AddUint32(ctxt.Arch, 0x7d8903a6) // mtctr r12 + glink.AddUint32(ctxt.Arch, 0x4e800420) // bctr // The symbol resolvers must immediately follow. // res_0: @@ -915,12 +909,12 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f asmb\n", ld.Cputime()) } - if ld.Iself { + if ctxt.IsELF { ld.Asmbelfsetup() } for _, sect := range ld.Segtext.Sections { - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) // Handle additional text sections with Codeblk if sect.Name == ".text" { ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) @@ -933,14 +927,14 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Segrelrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f relrodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrelrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen)) } @@ -948,10 +942,10 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f datblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segdata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) - ld.Cseek(int64(ld.Segdwarf.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) /* output symbol table */ @@ -964,9 +958,9 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f sym\n", ld.Cputime()) } - switch ld.Headtype { + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) } @@ -975,34 +969,31 @@ func asmb(ctxt *ld.Link) { symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) } - ld.Cseek(int64(symo)) - switch ld.Headtype { + ctxt.Out.SeekSet(int64(symo)) + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f elfsym\n", ld.Cputime()) } ld.Asmelfsym(ctxt) - ld.Cflush() - ld.Cwrite(ld.Elfstrdat) + ctxt.Out.Flush() + ctxt.Out.Write(ld.Elfstrdat) - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } case objabi.Hplan9: ld.Asmplan9sym(ctxt) - ld.Cflush() + ctxt.Out.Flush() sym := ctxt.Syms.Lookup("pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) - for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(sym.P[i]) - } - - ld.Cflush() + ctxt.Out.Write(sym.P) + ctxt.Out.Flush() } } } @@ -1010,18 +1001,18 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f header\n", ld.Cputime()) } - ld.Cseek(0) - switch ld.Headtype { + ctxt.Out.SeekSet(0) + switch ctxt.HeadType { default: case objabi.Hplan9: /* plan 9 */ - ld.Thearch.Lput(0x647) /* magic */ - ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */ - ld.Thearch.Lput(uint32(ld.Segdata.Filelen)) - ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) - ld.Thearch.Lput(uint32(ld.Symsize)) /* nsyms */ - ld.Thearch.Lput(uint32(ld.Entryvalue(ctxt))) /* va of entry */ - ld.Thearch.Lput(0) - ld.Thearch.Lput(uint32(ld.Lcsize)) + ctxt.Out.Write32(0x647) /* magic */ + ctxt.Out.Write32(uint32(ld.Segtext.Filelen)) /* sizes */ + ctxt.Out.Write32(uint32(ld.Segdata.Filelen)) + ctxt.Out.Write32(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) + ctxt.Out.Write32(uint32(ld.Symsize)) /* nsyms */ + ctxt.Out.Write32(uint32(ld.Entryvalue(ctxt))) /* va of entry */ + ctxt.Out.Write32(0) + ctxt.Out.Write32(uint32(ld.Lcsize)) case objabi.Hlinux, objabi.Hfreebsd, @@ -1031,7 +1022,7 @@ func asmb(ctxt *ld.Link) { ld.Asmbelf(ctxt, int64(symo)) } - ld.Cflush() + ctxt.Out.Flush() if *ld.FlagC { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) diff --git a/src/cmd/link/internal/ppc64/obj.go b/src/cmd/link/internal/ppc64/obj.go index 2d11eb5e498..273d9b42cb9 100644 --- a/src/cmd/link/internal/ppc64/obj.go +++ b/src/cmd/link/internal/ppc64/obj.go @@ -37,59 +37,47 @@ import ( "fmt" ) -func Init() { +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchPPC64 if objabi.GOARCH == "ppc64le" { - ld.SysArch = sys.ArchPPC64LE - } else { - ld.SysArch = sys.ArchPPC64 + arch = sys.ArchPPC64LE } - ld.Thearch.Funcalign = funcAlign - ld.Thearch.Maxalign = maxAlign - ld.Thearch.Minalign = minAlign - ld.Thearch.Dwarfregsp = dwarfRegSP - ld.Thearch.Dwarfreglr = dwarfRegLR + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, - ld.Thearch.Adddynrel = adddynrel - ld.Thearch.Archinit = archinit - ld.Thearch.Archreloc = archreloc - ld.Thearch.Archrelocvariant = archrelocvariant - ld.Thearch.Asmb = asmb - ld.Thearch.Elfreloc1 = elfreloc1 - ld.Thearch.Elfsetupplt = elfsetupplt - ld.Thearch.Gentext = gentext - ld.Thearch.Trampoline = trampoline - ld.Thearch.Machoreloc1 = machoreloc1 - if ld.SysArch == sys.ArchPPC64LE { - ld.Thearch.Lput = ld.Lputl - ld.Thearch.Wput = ld.Wputl - ld.Thearch.Vput = ld.Vputl - ld.Thearch.Append16 = ld.Append16l - ld.Thearch.Append32 = ld.Append32l - ld.Thearch.Append64 = ld.Append64l - } else { - ld.Thearch.Lput = ld.Lputb - ld.Thearch.Wput = ld.Wputb - ld.Thearch.Vput = ld.Vputb - ld.Thearch.Append16 = ld.Append16b - ld.Thearch.Append32 = ld.Append32b - ld.Thearch.Append64 = ld.Append64b + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Asmb: asmb, + Elfreloc1: elfreloc1, + Elfsetupplt: elfsetupplt, + Gentext: gentext, + Trampoline: trampoline, + Machoreloc1: machoreloc1, + + // TODO(austin): ABI v1 uses /usr/lib/ld.so.1, + Linuxdynld: "/lib64/ld64.so.1", + + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", } - // TODO(austin): ABI v1 uses /usr/lib/ld.so.1 - ld.Thearch.Linuxdynld = "/lib64/ld64.so.1" - - ld.Thearch.Freebsddynld = "XXX" - ld.Thearch.Openbsddynld = "XXX" - ld.Thearch.Netbsddynld = "XXX" - ld.Thearch.Dragonflydynld = "XXX" - ld.Thearch.Solarisdynld = "XXX" + return arch, theArch } func archinit(ctxt *ld.Link) { - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Exitf("unknown -H option: %v", ld.Headtype) + ld.Exitf("unknown -H option: %v", ctxt.HeadType) case objabi.Hplan9: /* plan 9 */ ld.HEADR = 32 @@ -105,7 +93,7 @@ func archinit(ctxt *ld.Link) { } case objabi.Hlinux: /* ppc64 elf */ - if ld.SysArch == sys.ArchPPC64 { + if ctxt.Arch == sys.ArchPPC64 { *ld.FlagD = true // TODO(austin): ELF ABI v1 not supported yet } ld.Elfinit(ctxt) diff --git a/src/cmd/link/internal/s390x/asm.go b/src/cmd/link/internal/s390x/asm.go index 7f120c7ef1d..634ba98dd3b 100644 --- a/src/cmd/link/internal/s390x/asm.go +++ b/src/cmd/link/internal/s390x/asm.go @@ -32,7 +32,9 @@ package s390x import ( "cmd/internal/objabi" + "cmd/internal/sys" "cmd/link/internal/ld" + "cmd/link/internal/sym" "debug/elf" "fmt" ) @@ -52,55 +54,55 @@ func gentext(ctxt *ld.Link) { return } addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0) - if addmoduledata.Type == ld.STEXT && ld.Buildmode != ld.BuildmodePlugin { + if addmoduledata.Type == sym.STEXT && ctxt.BuildMode != ld.BuildModePlugin { // we're linking a module containing the runtime -> no need for // an init function return } - addmoduledata.Attr |= ld.AttrReachable + addmoduledata.Attr |= sym.AttrReachable initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0) - initfunc.Type = ld.STEXT - initfunc.Attr |= ld.AttrLocal - initfunc.Attr |= ld.AttrReachable + initfunc.Type = sym.STEXT + initfunc.Attr |= sym.AttrLocal + initfunc.Attr |= sym.AttrReachable // larl %r2, - ld.Adduint8(ctxt, initfunc, 0xc0) - ld.Adduint8(ctxt, initfunc, 0x20) - lmd := ld.Addrel(initfunc) + initfunc.AddUint8(0xc0) + initfunc.AddUint8(0x20) + lmd := initfunc.AddRel() lmd.Off = int32(initfunc.Size) lmd.Siz = 4 lmd.Sym = ctxt.Moduledata lmd.Type = objabi.R_PCREL - lmd.Variant = ld.RV_390_DBL + lmd.Variant = sym.RV_390_DBL lmd.Add = 2 + int64(lmd.Siz) - ld.Adduint32(ctxt, initfunc, 0) + initfunc.AddUint32(ctxt.Arch, 0) // jg - ld.Adduint8(ctxt, initfunc, 0xc0) - ld.Adduint8(ctxt, initfunc, 0xf4) - rel := ld.Addrel(initfunc) + initfunc.AddUint8(0xc0) + initfunc.AddUint8(0xf4) + rel := initfunc.AddRel() rel.Off = int32(initfunc.Size) rel.Siz = 4 rel.Sym = ctxt.Syms.Lookup("runtime.addmoduledata", 0) rel.Type = objabi.R_CALL - rel.Variant = ld.RV_390_DBL + rel.Variant = sym.RV_390_DBL rel.Add = 2 + int64(rel.Siz) - ld.Adduint32(ctxt, initfunc, 0) + initfunc.AddUint32(ctxt.Arch, 0) // undef (for debugging) - ld.Adduint32(ctxt, initfunc, 0) - if ld.Buildmode == ld.BuildmodePlugin { + initfunc.AddUint32(ctxt.Arch, 0) + if ctxt.BuildMode == ld.BuildModePlugin { ctxt.Textp = append(ctxt.Textp, addmoduledata) } ctxt.Textp = append(ctxt.Textp, initfunc) initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0) - initarray_entry.Attr |= ld.AttrLocal - initarray_entry.Attr |= ld.AttrReachable - initarray_entry.Type = ld.SINITARR - ld.Addaddr(ctxt, initarray_entry, initfunc) + initarray_entry.Attr |= sym.AttrLocal + initarray_entry.Attr |= sym.AttrReachable + initarray_entry.Type = sym.SINITARR + initarray_entry.AddAddr(ctxt.Arch, initfunc) } -func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { +func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { targ := r.Sym switch r.Type { @@ -111,224 +113,221 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { } // Handle relocations found in ELF object files. - case 256 + ld.R_390_12, - 256 + ld.R_390_GOT12: + case 256 + objabi.RelocType(elf.R_390_12), + 256 + objabi.RelocType(elf.R_390_GOT12): ld.Errorf(s, "s390x 12-bit relocations have not been implemented (relocation type %d)", r.Type-256) return false - case 256 + ld.R_390_8, - 256 + ld.R_390_16, - 256 + ld.R_390_32, - 256 + ld.R_390_64: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_390_8), + 256 + objabi.RelocType(elf.R_390_16), + 256 + objabi.RelocType(elf.R_390_32), + 256 + objabi.RelocType(elf.R_390_64): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_390_nn relocation for dynamic symbol %s", targ.Name) } r.Type = objabi.R_ADDR return true - case 256 + ld.R_390_PC16, - 256 + ld.R_390_PC32, - 256 + ld.R_390_PC64: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_390_PC16), + 256 + objabi.RelocType(elf.R_390_PC32), + 256 + objabi.RelocType(elf.R_390_PC64): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_390_PCnn relocation for dynamic symbol %s", targ.Name) } - if targ.Type == 0 || targ.Type == ld.SXREF { + // TODO(mwhudson): the test of VisibilityHidden here probably doesn't make + // sense and should be removed when someone has thought about it properly. + if (targ.Type == 0 || targ.Type == sym.SXREF) && !targ.Attr.VisibilityHidden() { ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name) } r.Type = objabi.R_PCREL r.Add += int64(r.Siz) return true - case 256 + ld.R_390_GOT16, - 256 + ld.R_390_GOT32, - 256 + ld.R_390_GOT64: + case 256 + objabi.RelocType(elf.R_390_GOT16), + 256 + objabi.RelocType(elf.R_390_GOT32), + 256 + objabi.RelocType(elf.R_390_GOT64): ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return true - case 256 + ld.R_390_PLT16DBL, - 256 + ld.R_390_PLT32DBL: + case 256 + objabi.RelocType(elf.R_390_PLT16DBL), + 256 + objabi.RelocType(elf.R_390_PLT32DBL): r.Type = objabi.R_PCREL - r.Variant = ld.RV_390_DBL + r.Variant = sym.RV_390_DBL r.Add += int64(r.Siz) - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) } return true - case 256 + ld.R_390_PLT32, - 256 + ld.R_390_PLT64: + case 256 + objabi.RelocType(elf.R_390_PLT32), + 256 + objabi.RelocType(elf.R_390_PLT64): r.Type = objabi.R_PCREL r.Add += int64(r.Siz) - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) } return true - case 256 + ld.R_390_COPY: + case 256 + objabi.RelocType(elf.R_390_COPY): ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return false - case 256 + ld.R_390_GLOB_DAT: + case 256 + objabi.RelocType(elf.R_390_GLOB_DAT): ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return false - case 256 + ld.R_390_JMP_SLOT: + case 256 + objabi.RelocType(elf.R_390_JMP_SLOT): ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return false - case 256 + ld.R_390_RELATIVE: + case 256 + objabi.RelocType(elf.R_390_RELATIVE): ld.Errorf(s, "unimplemented S390x relocation: %v", r.Type-256) return false - case 256 + ld.R_390_GOTOFF: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_390_GOTOFF): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_390_GOTOFF relocation for dynamic symbol %s", targ.Name) } r.Type = objabi.R_GOTOFF return true - case 256 + ld.R_390_GOTPC: + case 256 + objabi.RelocType(elf.R_390_GOTPC): r.Type = objabi.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(r.Siz) return true - case 256 + ld.R_390_PC16DBL, - 256 + ld.R_390_PC32DBL: + case 256 + objabi.RelocType(elf.R_390_PC16DBL), + 256 + objabi.RelocType(elf.R_390_PC32DBL): r.Type = objabi.R_PCREL - r.Variant = ld.RV_390_DBL + r.Variant = sym.RV_390_DBL r.Add += int64(r.Siz) - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_390_PCnnDBL relocation for dynamic symbol %s", targ.Name) } return true - case 256 + ld.R_390_GOTPCDBL: + case 256 + objabi.RelocType(elf.R_390_GOTPCDBL): r.Type = objabi.R_PCREL - r.Variant = ld.RV_390_DBL + r.Variant = sym.RV_390_DBL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(r.Siz) return true - case 256 + ld.R_390_GOTENT: + case 256 + objabi.RelocType(elf.R_390_GOTENT): addgotsym(ctxt, targ) r.Type = objabi.R_PCREL - r.Variant = ld.RV_390_DBL + r.Variant = sym.RV_390_DBL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += int64(targ.Got) r.Add += int64(r.Siz) return true } // Handle references to ELF symbols from our own object files. - if targ.Type != ld.SDYNIMPORT { + if targ.Type != sym.SDYNIMPORT { return true } return false } -func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { - ld.Thearch.Vput(uint64(sectoff)) +func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { + ctxt.Out.Write64(uint64(sectoff)) elfsym := r.Xsym.ElfsymForReloc() switch r.Type { default: - return -1 - + return false case objabi.R_TLS_LE: switch r.Siz { default: - return -1 + return false case 4: // WARNING - silently ignored by linker in ELF64 - ld.Thearch.Vput(ld.R_390_TLS_LE32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_390_TLS_LE32) | uint64(elfsym)<<32) case 8: // WARNING - silently ignored by linker in ELF32 - ld.Thearch.Vput(ld.R_390_TLS_LE64 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_390_TLS_LE64) | uint64(elfsym)<<32) } - case objabi.R_TLS_IE: switch r.Siz { default: - return -1 + return false case 4: - ld.Thearch.Vput(ld.R_390_TLS_IEENT | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_390_TLS_IEENT) | uint64(elfsym)<<32) } - case objabi.R_ADDR: switch r.Siz { default: - return -1 + return false case 4: - ld.Thearch.Vput(ld.R_390_32 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_390_32) | uint64(elfsym)<<32) case 8: - ld.Thearch.Vput(ld.R_390_64 | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_390_64) | uint64(elfsym)<<32) } - case objabi.R_GOTPCREL: if r.Siz == 4 { - ld.Thearch.Vput(ld.R_390_GOTENT | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elf.R_390_GOTENT) | uint64(elfsym)<<32) } else { - return -1 + return false } - case objabi.R_PCREL, objabi.R_PCRELDBL, objabi.R_CALL: - elfrel := ld.R_390_NONE - isdbl := r.Variant&ld.RV_TYPE_MASK == ld.RV_390_DBL + elfrel := elf.R_390_NONE + isdbl := r.Variant&sym.RV_TYPE_MASK == sym.RV_390_DBL // TODO(mundaym): all DBL style relocations should be // signalled using the variant - see issue 14218. switch r.Type { case objabi.R_PCRELDBL, objabi.R_CALL: isdbl = true } - if r.Xsym.Type == ld.SDYNIMPORT && (r.Xsym.ElfType == elf.STT_FUNC || r.Type == objabi.R_CALL) { + if r.Xsym.Type == sym.SDYNIMPORT && (r.Xsym.ElfType == elf.STT_FUNC || r.Type == objabi.R_CALL) { if isdbl { switch r.Siz { case 2: - elfrel = ld.R_390_PLT16DBL + elfrel = elf.R_390_PLT16DBL case 4: - elfrel = ld.R_390_PLT32DBL + elfrel = elf.R_390_PLT32DBL } } else { switch r.Siz { case 4: - elfrel = ld.R_390_PLT32 + elfrel = elf.R_390_PLT32 case 8: - elfrel = ld.R_390_PLT64 + elfrel = elf.R_390_PLT64 } } } else { if isdbl { switch r.Siz { case 2: - elfrel = ld.R_390_PC16DBL + elfrel = elf.R_390_PC16DBL case 4: - elfrel = ld.R_390_PC32DBL + elfrel = elf.R_390_PC32DBL } } else { switch r.Siz { case 2: - elfrel = ld.R_390_PC16 + elfrel = elf.R_390_PC16 case 4: - elfrel = ld.R_390_PC32 + elfrel = elf.R_390_PC32 case 8: - elfrel = ld.R_390_PC64 + elfrel = elf.R_390_PC64 } } } - if elfrel == ld.R_390_NONE { - return -1 // unsupported size/dbl combination + if elfrel == elf.R_390_NONE { + return false // unsupported size/dbl combination } - ld.Thearch.Vput(uint64(elfrel) | uint64(elfsym)<<32) + ctxt.Out.Write64(uint64(elfrel) | uint64(elfsym)<<32) } - ld.Thearch.Vput(uint64(r.Xadd)) - return 0 + ctxt.Out.Write64(uint64(r.Xadd)) + return true } func elfsetupplt(ctxt *ld.Link) { @@ -336,83 +335,82 @@ func elfsetupplt(ctxt *ld.Link) { got := ctxt.Syms.Lookup(".got", 0) if plt.Size == 0 { // stg %r1,56(%r15) - ld.Adduint8(ctxt, plt, 0xe3) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0xf0) - ld.Adduint8(ctxt, plt, 0x38) - ld.Adduint8(ctxt, plt, 0x00) - ld.Adduint8(ctxt, plt, 0x24) + plt.AddUint8(0xe3) + plt.AddUint8(0x10) + plt.AddUint8(0xf0) + plt.AddUint8(0x38) + plt.AddUint8(0x00) + plt.AddUint8(0x24) // larl %r1,_GLOBAL_OFFSET_TABLE_ - ld.Adduint8(ctxt, plt, 0xc0) - ld.Adduint8(ctxt, plt, 0x10) - ld.Addpcrelplus(ctxt, plt, got, 6) + plt.AddUint8(0xc0) + plt.AddUint8(0x10) + plt.AddPCRelPlus(ctxt.Arch, got, 6) // mvc 48(8,%r15),8(%r1) - ld.Adduint8(ctxt, plt, 0xd2) - ld.Adduint8(ctxt, plt, 0x07) - ld.Adduint8(ctxt, plt, 0xf0) - ld.Adduint8(ctxt, plt, 0x30) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0x08) + plt.AddUint8(0xd2) + plt.AddUint8(0x07) + plt.AddUint8(0xf0) + plt.AddUint8(0x30) + plt.AddUint8(0x10) + plt.AddUint8(0x08) // lg %r1,16(%r1) - ld.Adduint8(ctxt, plt, 0xe3) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0x00) - ld.Adduint8(ctxt, plt, 0x04) + plt.AddUint8(0xe3) + plt.AddUint8(0x10) + plt.AddUint8(0x10) + plt.AddUint8(0x10) + plt.AddUint8(0x00) + plt.AddUint8(0x04) // br %r1 - ld.Adduint8(ctxt, plt, 0x07) - ld.Adduint8(ctxt, plt, 0xf1) + plt.AddUint8(0x07) + plt.AddUint8(0xf1) // nopr %r0 - ld.Adduint8(ctxt, plt, 0x07) - ld.Adduint8(ctxt, plt, 0x00) + plt.AddUint8(0x07) + plt.AddUint8(0x00) // nopr %r0 - ld.Adduint8(ctxt, plt, 0x07) - ld.Adduint8(ctxt, plt, 0x00) + plt.AddUint8(0x07) + plt.AddUint8(0x00) // nopr %r0 - ld.Adduint8(ctxt, plt, 0x07) - ld.Adduint8(ctxt, plt, 0x00) + plt.AddUint8(0x07) + plt.AddUint8(0x00) // assume got->size == 0 too - ld.Addaddrplus(ctxt, got, ctxt.Syms.Lookup(".dynamic", 0), 0) + got.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup(".dynamic", 0), 0) - ld.Adduint64(ctxt, got, 0) - ld.Adduint64(ctxt, got, 0) + got.AddUint64(ctxt.Arch, 0) + got.AddUint64(ctxt.Arch, 0) } } -func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { - return -1 +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { + return false } -func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { - if ld.Linkmode == ld.LinkExternal { - return -1 +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { + if ctxt.LinkMode == ld.LinkExternal { + return false } switch r.Type { case objabi.R_CONST: *val = r.Add - return 0 - + return true case objabi.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return 0 + return true } - return -1 + return false } -func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { - switch r.Variant & ld.RV_TYPE_MASK { +func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { + switch r.Variant & sym.RV_TYPE_MASK { default: ld.Errorf(s, "unexpected relocation variant %d", r.Variant) return t - case ld.RV_NONE: + case sym.RV_NONE: return t - case ld.RV_390_DBL: + case sym.RV_390_DBL: if (t & 1) != 0 { ld.Errorf(s, "%s+%v is not 2-byte aligned", r.Sym.Name, r.Sym.Value) } @@ -420,14 +418,14 @@ func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { } } -func addpltsym(ctxt *ld.Link, s *ld.Symbol) { +func addpltsym(ctxt *ld.Link, s *sym.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) - if ld.Iself { + if ctxt.IsELF { plt := ctxt.Syms.Lookup(".plt", 0) got := ctxt.Syms.Lookup(".got", 0) rela := ctxt.Syms.Lookup(".rela.plt", 0) @@ -436,45 +434,45 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { } // larl %r1,_GLOBAL_OFFSET_TABLE_+index - ld.Adduint8(ctxt, plt, 0xc0) - ld.Adduint8(ctxt, plt, 0x10) - ld.Addpcrelplus(ctxt, plt, got, got.Size+6) // need variant? + plt.AddUint8(0xc0) + plt.AddUint8(0x10) + plt.AddPCRelPlus(ctxt.Arch, got, got.Size+6) // need variant? // add to got: pointer to current pos in plt - ld.Addaddrplus(ctxt, got, plt, plt.Size+8) // weird but correct + got.AddAddrPlus(ctxt.Arch, plt, plt.Size+8) // weird but correct // lg %r1,0(%r1) - ld.Adduint8(ctxt, plt, 0xe3) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0x00) - ld.Adduint8(ctxt, plt, 0x00) - ld.Adduint8(ctxt, plt, 0x04) + plt.AddUint8(0xe3) + plt.AddUint8(0x10) + plt.AddUint8(0x10) + plt.AddUint8(0x00) + plt.AddUint8(0x00) + plt.AddUint8(0x04) // br %r1 - ld.Adduint8(ctxt, plt, 0x07) - ld.Adduint8(ctxt, plt, 0xf1) + plt.AddUint8(0x07) + plt.AddUint8(0xf1) // basr %r1,%r0 - ld.Adduint8(ctxt, plt, 0x0d) - ld.Adduint8(ctxt, plt, 0x10) + plt.AddUint8(0x0d) + plt.AddUint8(0x10) // lgf %r1,12(%r1) - ld.Adduint8(ctxt, plt, 0xe3) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0x10) - ld.Adduint8(ctxt, plt, 0x0c) - ld.Adduint8(ctxt, plt, 0x00) - ld.Adduint8(ctxt, plt, 0x14) + plt.AddUint8(0xe3) + plt.AddUint8(0x10) + plt.AddUint8(0x10) + plt.AddUint8(0x0c) + plt.AddUint8(0x00) + plt.AddUint8(0x14) // jg .plt - ld.Adduint8(ctxt, plt, 0xc0) - ld.Adduint8(ctxt, plt, 0xf4) + plt.AddUint8(0xc0) + plt.AddUint8(0xf4) - ld.Adduint32(ctxt, plt, uint32(-((plt.Size - 2) >> 1))) // roll-your-own relocation + plt.AddUint32(ctxt.Arch, uint32(-((plt.Size - 2) >> 1))) // roll-your-own relocation //.plt index - ld.Adduint32(ctxt, plt, uint32(rela.Size)) // rela size before current entry + plt.AddUint32(ctxt.Arch, uint32(rela.Size)) // rela size before current entry // rela - ld.Addaddrplus(ctxt, rela, got, got.Size-8) + rela.AddAddrPlus(ctxt.Arch, got, got.Size-8) - ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_JMP_SLOT)) - ld.Adduint64(ctxt, rela, 0) + rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_390_JMP_SLOT))) + rela.AddUint64(ctxt.Arch, 0) s.Plt = int32(plt.Size - 32) @@ -483,7 +481,7 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { } } -func addgotsym(ctxt *ld.Link, s *ld.Symbol) { +func addgotsym(ctxt *ld.Link, s *sym.Symbol) { if s.Got >= 0 { return } @@ -491,13 +489,13 @@ func addgotsym(ctxt *ld.Link, s *ld.Symbol) { ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) s.Got = int32(got.Size) - ld.Adduint64(ctxt, got, 0) + got.AddUint64(ctxt.Arch, 0) - if ld.Iself { + if ctxt.IsELF { rela := ctxt.Syms.Lookup(".rela", 0) - ld.Addaddrplus(ctxt, rela, got, int64(s.Got)) - ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_GLOB_DAT)) - ld.Adduint64(ctxt, rela, 0) + rela.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_390_GLOB_DAT))) + rela.AddUint64(ctxt.Arch, 0) } else { ld.Errorf(s, "addgotsym: unsupported binary format") } @@ -508,15 +506,15 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f asmb\n", ld.Cputime()) } - if ld.Iself { + if ctxt.IsELF { ld.Asmbelfsetup() } sect := ld.Segtext.Sections[0] - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) for _, sect = range ld.Segtext.Sections[1:] { - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } @@ -524,14 +522,14 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Segrelrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrelrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen)) } @@ -539,10 +537,10 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f datblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segdata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) - ld.Cseek(int64(ld.Segdwarf.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) /* output symbol table */ @@ -551,7 +549,7 @@ func asmb(ctxt *ld.Link) { ld.Lcsize = 0 symo := uint32(0) if !*ld.FlagS { - if !ld.Iself { + if !ctxt.IsELF { ld.Errorf(nil, "unsupported executable format") } if ctxt.Debugvlog != 0 { @@ -560,19 +558,19 @@ func asmb(ctxt *ld.Link) { symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) - ld.Cseek(int64(symo)) + ctxt.Out.SeekSet(int64(symo)) if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f elfsym\n", ld.Cputime()) } ld.Asmelfsym(ctxt) - ld.Cflush() - ld.Cwrite(ld.Elfstrdat) + ctxt.Out.Flush() + ctxt.Out.Write(ld.Elfstrdat) if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f dwarf\n", ld.Cputime()) } - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } @@ -580,15 +578,15 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f header\n", ld.Cputime()) } - ld.Cseek(0) - switch ld.Headtype { + ctxt.Out.SeekSet(0) + switch ctxt.HeadType { default: ld.Errorf(nil, "unsupported operating system") case objabi.Hlinux: ld.Asmbelf(ctxt, int64(symo)) } - ld.Cflush() + ctxt.Out.Flush() if *ld.FlagC { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) diff --git a/src/cmd/link/internal/s390x/obj.go b/src/cmd/link/internal/s390x/obj.go index cd5da6a42c5..9ac7eb82174 100644 --- a/src/cmd/link/internal/s390x/obj.go +++ b/src/cmd/link/internal/s390x/obj.go @@ -37,45 +37,43 @@ import ( "fmt" ) -func Init() { - ld.SysArch = sys.ArchS390X +func Init() (*sys.Arch, ld.Arch) { + arch := sys.ArchS390X - ld.Thearch.Funcalign = funcAlign - ld.Thearch.Maxalign = maxAlign - ld.Thearch.Minalign = minAlign - ld.Thearch.Dwarfregsp = dwarfRegSP - ld.Thearch.Dwarfreglr = dwarfRegLR + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, - ld.Thearch.Adddynrel = adddynrel - ld.Thearch.Archinit = archinit - ld.Thearch.Archreloc = archreloc - ld.Thearch.Archrelocvariant = archrelocvariant - ld.Thearch.Asmb = asmb // in asm.go - ld.Thearch.Elfreloc1 = elfreloc1 - ld.Thearch.Elfsetupplt = elfsetupplt - ld.Thearch.Gentext = gentext - ld.Thearch.Machoreloc1 = machoreloc1 - ld.Thearch.Lput = ld.Lputb - ld.Thearch.Wput = ld.Wputb - ld.Thearch.Vput = ld.Vputb - ld.Thearch.Append16 = ld.Append16b - ld.Thearch.Append32 = ld.Append32b - ld.Thearch.Append64 = ld.Append64b + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Asmb: asmb, // in asm.go + Elfreloc1: elfreloc1, + Elfsetupplt: elfsetupplt, + Gentext: gentext, + Machoreloc1: machoreloc1, - ld.Thearch.Linuxdynld = "/lib64/ld64.so.1" + Linuxdynld: "/lib64/ld64.so.1", - // not relevant for s390x - ld.Thearch.Freebsddynld = "XXX" - ld.Thearch.Openbsddynld = "XXX" - ld.Thearch.Netbsddynld = "XXX" - ld.Thearch.Dragonflydynld = "XXX" - ld.Thearch.Solarisdynld = "XXX" + // not relevant for s390x + Freebsddynld: "XXX", + Openbsddynld: "XXX", + Netbsddynld: "XXX", + Dragonflydynld: "XXX", + Solarisdynld: "XXX", + } + + return arch, theArch } func archinit(ctxt *ld.Link) { - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Exitf("unknown -H option: %v", ld.Headtype) + ld.Exitf("unknown -H option: %v", ctxt.HeadType) case objabi.Hlinux: // s390x ELF ld.Elfinit(ctxt) diff --git a/src/cmd/link/internal/sym/attribute.go b/src/cmd/link/internal/sym/attribute.go new file mode 100644 index 00000000000..45a7939d677 --- /dev/null +++ b/src/cmd/link/internal/sym/attribute.go @@ -0,0 +1,109 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sym + +// Attribute is a set of common symbol attributes. +type Attribute int32 + +const ( + // AttrDuplicateOK marks a symbol that can be present in multiple object + // files. + AttrDuplicateOK Attribute = 1 << iota + // AttrExternal marks function symbols loaded from host object files. + AttrExternal + // AttrNoSplit marks functions that cannot split the stack; the linker + // cares because it checks that there are no call chains of nosplit + // functions that require more than StackLimit bytes (see + // lib.go:dostkcheck) + AttrNoSplit + // AttrReachable marks symbols that are transitively referenced from the + // entry points. Unreachable symbols are not written to the output. + AttrReachable + // AttrCgoExportDynamic and AttrCgoExportStatic mark symbols referenced + // by directives written by cgo (in response to //export directives in + // the source). + AttrCgoExportDynamic + AttrCgoExportStatic + // AttrSpecial marks symbols that do not have their address (i.e. Value) + // computed by the usual mechanism of data.go:dodata() & + // data.go:address(). + AttrSpecial + // AttrStackCheck is used by dostkcheck to only check each NoSplit + // function's stack usage once. + AttrStackCheck + // AttrNotInSymbolTable marks symbols that are not written to the symbol table. + AttrNotInSymbolTable + // AttrOnList marks symbols that are on some list (such as the list of + // all text symbols, or one of the lists of data symbols) and is + // consulted to avoid bugs where a symbol is put on a list twice. + AttrOnList + // AttrLocal marks symbols that are only visible within the module + // (executable or shared library) being linked. Only relevant when + // dynamically linking Go code. + AttrLocal + // AttrReflectMethod marks certain methods from the reflect package that + // can be used to call arbitrary methods. If no symbol with this bit set + // is marked as reachable, more dead code elimination can be done. + AttrReflectMethod + // AttrMakeTypelink Amarks types that should be added to the typelink + // table. See typelinks.go:typelinks(). + AttrMakeTypelink + // AttrShared marks symbols compiled with the -shared option. + AttrShared + // AttrVisibilityHidden symbols are ELF symbols with + // visibility set to STV_HIDDEN. They become local symbols in + // the final executable. Only relevant when internally linking + // on an ELF platform. + AttrVisibilityHidden + // AttrSubSymbol mostly means that the symbol appears on the Sub list of some + // other symbol. Unfortunately, it's not 100% reliable; at least, it's not set + // correctly for the .TOC. symbol in Link.dodata. Usually the Outer field of the + // symbol points to the symbol whose list it is on, but that it is not set for the + // symbols added to .windynamic in initdynimport in pe.go. + // + // TODO(mwhudson): fix the inconsistencies noticed above. + // + // Sub lists are used when loading host objects (sections from the host object + // become regular linker symbols and symbols go on the Sub list of their section) + // and for constructing the global offset table when internally linking a dynamic + // executable. + // + // TOOD(mwhudson): perhaps a better name for this is AttrNonGoSymbol. + AttrSubSymbol + // AttrContainer is set on text symbols that are present as the .Outer for some + // other symbol. + AttrContainer + // 17 attributes defined so far. +) + +func (a Attribute) DuplicateOK() bool { return a&AttrDuplicateOK != 0 } +func (a Attribute) External() bool { return a&AttrExternal != 0 } +func (a Attribute) NoSplit() bool { return a&AttrNoSplit != 0 } +func (a Attribute) Reachable() bool { return a&AttrReachable != 0 } +func (a Attribute) CgoExportDynamic() bool { return a&AttrCgoExportDynamic != 0 } +func (a Attribute) CgoExportStatic() bool { return a&AttrCgoExportStatic != 0 } +func (a Attribute) Special() bool { return a&AttrSpecial != 0 } +func (a Attribute) StackCheck() bool { return a&AttrStackCheck != 0 } +func (a Attribute) NotInSymbolTable() bool { return a&AttrNotInSymbolTable != 0 } +func (a Attribute) OnList() bool { return a&AttrOnList != 0 } +func (a Attribute) Local() bool { return a&AttrLocal != 0 } +func (a Attribute) ReflectMethod() bool { return a&AttrReflectMethod != 0 } +func (a Attribute) MakeTypelink() bool { return a&AttrMakeTypelink != 0 } +func (a Attribute) Shared() bool { return a&AttrShared != 0 } +func (a Attribute) VisibilityHidden() bool { return a&AttrVisibilityHidden != 0 } +func (a Attribute) SubSymbol() bool { return a&AttrSubSymbol != 0 } +func (a Attribute) Container() bool { return a&AttrContainer != 0 } + +func (a Attribute) CgoExport() bool { + return a.CgoExportDynamic() || a.CgoExportStatic() +} + +func (a *Attribute) Set(flag Attribute, value bool) { + if value { + *a |= flag + } else { + *a &^= flag + } +} diff --git a/src/cmd/link/internal/sym/library.go b/src/cmd/link/internal/sym/library.go new file mode 100644 index 00000000000..ee96f4aaa94 --- /dev/null +++ b/src/cmd/link/internal/sym/library.go @@ -0,0 +1,22 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sym + +type Library struct { + Objref string + Srcref string + File string + Pkg string + Shlib string + Hash string + ImportStrings []string + Imports []*Library + Textp []*Symbol // text symbols defined in this library + DupTextSyms []*Symbol // dupok text symbols defined in this library +} + +func (l Library) String() string { + return l.Pkg +} diff --git a/src/cmd/link/internal/sym/reloc.go b/src/cmd/link/internal/sym/reloc.go new file mode 100644 index 00000000000..fc62c385f40 --- /dev/null +++ b/src/cmd/link/internal/sym/reloc.go @@ -0,0 +1,116 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sym + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "debug/elf" +) + +// Reloc is a relocation. +// +// The typical Reloc rewrites part of a symbol at offset Off to address Sym. +// A Reloc is stored in a slice on the Symbol it rewrites. +// +// Relocations are generated by the compiler as the type +// cmd/internal/obj.Reloc, which is encoded into the object file wire +// format and decoded by the linker into this type. A separate type is +// used to hold linker-specific state about the relocation. +// +// Some relocations are created by cmd/link. +type Reloc struct { + Off int32 // offset to rewrite + Siz uint8 // number of bytes to rewrite, 1, 2, or 4 + Done bool // set to true when relocation is complete + Variant RelocVariant // variation on Type + Type objabi.RelocType // the relocation type + Add int64 // addend + Xadd int64 // addend passed to external linker + Sym *Symbol // symbol the relocation addresses + Xsym *Symbol // symbol passed to external linker +} + +// RelocVariant is a linker-internal variation on a relocation. +type RelocVariant uint8 + +const ( + RV_NONE RelocVariant = iota + RV_POWER_LO + RV_POWER_HI + RV_POWER_HA + RV_POWER_DS + + // RV_390_DBL is a s390x-specific relocation variant that indicates that + // the value to be placed into the relocatable field should first be + // divided by 2. + RV_390_DBL + + RV_CHECK_OVERFLOW RelocVariant = 1 << 7 + RV_TYPE_MASK RelocVariant = RV_CHECK_OVERFLOW - 1 +) + +func RelocName(arch *sys.Arch, r objabi.RelocType) string { + // We didn't have some relocation types at Go1.4. + // Uncomment code when we include those in bootstrap code. + + switch { + case r >= 512: // Mach-O + // nr := (r - 512)>>1 + // switch ctxt.Arch.Family { + // case sys.AMD64: + // return macho.RelocTypeX86_64(nr).String() + // case sys.ARM: + // return macho.RelocTypeARM(nr).String() + // case sys.ARM64: + // return macho.RelocTypeARM64(nr).String() + // case sys.I386: + // return macho.RelocTypeGeneric(nr).String() + // default: + // panic("unreachable") + // } + case r >= 256: // ELF + nr := r - 256 + switch arch.Family { + case sys.AMD64: + return elf.R_X86_64(nr).String() + case sys.ARM: + return elf.R_ARM(nr).String() + case sys.ARM64: + return elf.R_AARCH64(nr).String() + case sys.I386: + return elf.R_386(nr).String() + case sys.MIPS, sys.MIPS64: + // return elf.R_MIPS(nr).String() + case sys.PPC64: + // return elf.R_PPC64(nr).String() + case sys.S390X: + // return elf.R_390(nr).String() + default: + panic("unreachable") + } + } + + return r.String() +} + +// RelocByOff implements sort.Interface for sorting relocations by offset. +type RelocByOff []Reloc + +func (x RelocByOff) Len() int { return len(x) } + +func (x RelocByOff) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x RelocByOff) Less(i, j int) bool { + a := &x[i] + b := &x[j] + if a.Off < b.Off { + return true + } + if a.Off > b.Off { + return false + } + return false +} diff --git a/src/cmd/link/internal/sym/segment.go b/src/cmd/link/internal/sym/segment.go new file mode 100644 index 00000000000..d5255bf1428 --- /dev/null +++ b/src/cmd/link/internal/sym/segment.go @@ -0,0 +1,58 @@ +// Inferno utils/8l/asm.c +// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/8l/asm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package sym + +// Terrible but standard terminology. +// A segment describes a block of file to load into memory. +// A section further describes the pieces of that block for +// use in debuggers and such. + +type Segment struct { + Rwx uint8 // permission as usual unix bits (5 = r-x etc) + Vaddr uint64 // virtual address + Length uint64 // length in memory + Fileoff uint64 // file offset + Filelen uint64 // length on disk + Sections []*Section +} + +type Section struct { + Rwx uint8 + Extnum int16 + Align int32 + Name string + Vaddr uint64 + Length uint64 + Seg *Segment + Elfsect interface{} // an *ld.ElfShdr + Reloff uint64 + Rellen uint64 +} diff --git a/src/cmd/link/internal/sym/symbol.go b/src/cmd/link/internal/sym/symbol.go new file mode 100644 index 00000000000..6faedf4fe27 --- /dev/null +++ b/src/cmd/link/internal/sym/symbol.go @@ -0,0 +1,382 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sym + +import ( + "cmd/internal/objabi" + "cmd/internal/sys" + "debug/elf" + "fmt" + "log" +) + +// Symbol is an entry in the symbol table. +type Symbol struct { + Name string + Extname string + Type SymKind + Version int16 + Attr Attribute + Localentry uint8 + Dynid int32 + Plt int32 + Got int32 + Align int32 + Elfsym int32 + LocalElfsym int32 + Value int64 + Size int64 + // ElfType is set for symbols read from shared libraries by ldshlibsyms. It + // is not set for symbols defined by the packages being linked or by symbols + // read by ldelf (and so is left as elf.STT_NOTYPE). + ElfType elf.SymType + Sub *Symbol + Outer *Symbol + Gotype *Symbol + Reachparent *Symbol + File string + Dynimplib string + Dynimpvers string + Sect *Section + FuncInfo *FuncInfo + Lib *Library // Package defining this symbol + // P contains the raw symbol data. + P []byte + R []Reloc +} + +func (s *Symbol) String() string { + if s.Version == 0 { + return s.Name + } + return fmt.Sprintf("%s<%d>", s.Name, s.Version) +} + +func (s *Symbol) ElfsymForReloc() int32 { + // If putelfsym created a local version of this symbol, use that in all + // relocations. + if s.LocalElfsym != 0 { + return s.LocalElfsym + } else { + return s.Elfsym + } +} + +func (s *Symbol) Len() int64 { + return s.Size +} + +func (s *Symbol) Grow(siz int64) { + if int64(int(siz)) != siz { + log.Fatalf("symgrow size %d too long", siz) + } + if int64(len(s.P)) >= siz { + return + } + if cap(s.P) < int(siz) { + p := make([]byte, 2*(siz+1)) + s.P = append(p[:0], s.P...) + } + s.P = s.P[:siz] +} + +func (s *Symbol) AddBytes(bytes []byte) int64 { + if s.Type == 0 { + s.Type = SDATA + } + s.Attr |= AttrReachable + s.P = append(s.P, bytes...) + s.Size = int64(len(s.P)) + + return s.Size +} + +func (s *Symbol) AddUint8(v uint8) int64 { + off := s.Size + if s.Type == 0 { + s.Type = SDATA + } + s.Attr |= AttrReachable + s.Size++ + s.P = append(s.P, v) + + return off +} + +func (s *Symbol) AddUint16(arch *sys.Arch, v uint16) int64 { + return s.AddUintXX(arch, uint64(v), 2) +} + +func (s *Symbol) AddUint32(arch *sys.Arch, v uint32) int64 { + return s.AddUintXX(arch, uint64(v), 4) +} + +func (s *Symbol) AddUint64(arch *sys.Arch, v uint64) int64 { + return s.AddUintXX(arch, v, 8) +} + +func (s *Symbol) AddUint(arch *sys.Arch, v uint64) int64 { + return s.AddUintXX(arch, v, arch.PtrSize) +} + +func (s *Symbol) SetUint8(arch *sys.Arch, r int64, v uint8) int64 { + return s.setUintXX(arch, r, uint64(v), 1) +} + +func (s *Symbol) SetUint32(arch *sys.Arch, r int64, v uint32) int64 { + return s.setUintXX(arch, r, uint64(v), 4) +} + +func (s *Symbol) SetUint(arch *sys.Arch, r int64, v uint64) int64 { + return s.setUintXX(arch, r, v, int64(arch.PtrSize)) +} + +func (s *Symbol) addAddrPlus(arch *sys.Arch, t *Symbol, add int64, typ objabi.RelocType) int64 { + if s.Type == 0 { + s.Type = SDATA + } + s.Attr |= AttrReachable + i := s.Size + s.Size += int64(arch.PtrSize) + s.Grow(s.Size) + r := s.AddRel() + r.Sym = t + r.Off = int32(i) + r.Siz = uint8(arch.PtrSize) + r.Type = typ + r.Add = add + return i + int64(r.Siz) +} + +func (s *Symbol) AddAddrPlus(arch *sys.Arch, t *Symbol, add int64) int64 { + return s.addAddrPlus(arch, t, add, objabi.R_ADDR) +} + +func (s *Symbol) AddCURelativeAddrPlus(arch *sys.Arch, t *Symbol, add int64) int64 { + return s.addAddrPlus(arch, t, add, objabi.R_ADDRCUOFF) +} + +func (s *Symbol) AddPCRelPlus(arch *sys.Arch, t *Symbol, add int64) int64 { + if s.Type == 0 { + s.Type = SDATA + } + s.Attr |= AttrReachable + i := s.Size + s.Size += 4 + s.Grow(s.Size) + r := s.AddRel() + r.Sym = t + r.Off = int32(i) + r.Add = add + r.Type = objabi.R_PCREL + r.Siz = 4 + if arch.Family == sys.S390X { + r.Variant = RV_390_DBL + } + return i + int64(r.Siz) +} + +func (s *Symbol) AddAddr(arch *sys.Arch, t *Symbol) int64 { + return s.AddAddrPlus(arch, t, 0) +} + +func (s *Symbol) SetAddrPlus(arch *sys.Arch, off int64, t *Symbol, add int64) int64 { + if s.Type == 0 { + s.Type = SDATA + } + s.Attr |= AttrReachable + if off+int64(arch.PtrSize) > s.Size { + s.Size = off + int64(arch.PtrSize) + s.Grow(s.Size) + } + + r := s.AddRel() + r.Sym = t + r.Off = int32(off) + r.Siz = uint8(arch.PtrSize) + r.Type = objabi.R_ADDR + r.Add = add + return off + int64(r.Siz) +} + +func (s *Symbol) SetAddr(arch *sys.Arch, off int64, t *Symbol) int64 { + return s.SetAddrPlus(arch, off, t, 0) +} + +func (s *Symbol) AddSize(arch *sys.Arch, t *Symbol) int64 { + if s.Type == 0 { + s.Type = SDATA + } + s.Attr |= AttrReachable + i := s.Size + s.Size += int64(arch.PtrSize) + s.Grow(s.Size) + r := s.AddRel() + r.Sym = t + r.Off = int32(i) + r.Siz = uint8(arch.PtrSize) + r.Type = objabi.R_SIZE + return i + int64(r.Siz) +} + +func (s *Symbol) AddAddrPlus4(t *Symbol, add int64) int64 { + if s.Type == 0 { + s.Type = SDATA + } + s.Attr |= AttrReachable + i := s.Size + s.Size += 4 + s.Grow(s.Size) + r := s.AddRel() + r.Sym = t + r.Off = int32(i) + r.Siz = 4 + r.Type = objabi.R_ADDR + r.Add = add + return i + int64(r.Siz) +} + +func (s *Symbol) AddRel() *Reloc { + s.R = append(s.R, Reloc{}) + return &s.R[len(s.R)-1] +} + +func (s *Symbol) AddUintXX(arch *sys.Arch, v uint64, wid int) int64 { + off := s.Size + s.setUintXX(arch, off, v, int64(wid)) + return off +} + +func (s *Symbol) setUintXX(arch *sys.Arch, off int64, v uint64, wid int64) int64 { + if s.Type == 0 { + s.Type = SDATA + } + s.Attr |= AttrReachable + if s.Size < off+wid { + s.Size = off + wid + s.Grow(s.Size) + } + + switch wid { + case 1: + s.P[off] = uint8(v) + case 2: + arch.ByteOrder.PutUint16(s.P[off:], uint16(v)) + case 4: + arch.ByteOrder.PutUint32(s.P[off:], uint32(v)) + case 8: + arch.ByteOrder.PutUint64(s.P[off:], v) + } + + return off + wid +} + +// SortSub sorts a linked-list (by Sub) of *Symbol by Value. +// Used for sub-symbols when loading host objects (see e.g. ldelf.go). +func SortSub(l *Symbol) *Symbol { + if l == nil || l.Sub == nil { + return l + } + + l1 := l + l2 := l + for { + l2 = l2.Sub + if l2 == nil { + break + } + l2 = l2.Sub + if l2 == nil { + break + } + l1 = l1.Sub + } + + l2 = l1.Sub + l1.Sub = nil + l1 = SortSub(l) + l2 = SortSub(l2) + + /* set up lead element */ + if l1.Value < l2.Value { + l = l1 + l1 = l1.Sub + } else { + l = l2 + l2 = l2.Sub + } + + le := l + + for { + if l1 == nil { + for l2 != nil { + le.Sub = l2 + le = l2 + l2 = l2.Sub + } + + le.Sub = nil + break + } + + if l2 == nil { + for l1 != nil { + le.Sub = l1 + le = l1 + l1 = l1.Sub + } + + break + } + + if l1.Value < l2.Value { + le.Sub = l1 + le = l1 + l1 = l1.Sub + } else { + le.Sub = l2 + le = l2 + l2 = l2.Sub + } + } + + le.Sub = nil + return l +} + +type FuncInfo struct { + Args int32 + Locals int32 + Autom []Auto + Pcsp Pcdata + Pcfile Pcdata + Pcline Pcdata + Pcinline Pcdata + Pcdata []Pcdata + Funcdata []*Symbol + Funcdataoff []int64 + File []*Symbol + InlTree []InlinedCall +} + +// InlinedCall is a node in a local inlining tree (FuncInfo.InlTree). +type InlinedCall struct { + Parent int32 // index of parent in InlTree + File *Symbol // file of the inlined call + Line int32 // line number of the inlined call + Func *Symbol // function that was inlined +} + +type Pcdata struct { + P []byte +} + +type Auto struct { + Asym *Symbol + Gotype *Symbol + Aoffset int32 + Name int16 +} diff --git a/src/cmd/link/internal/ld/symbols.go b/src/cmd/link/internal/sym/symbols.go similarity index 79% rename from src/cmd/link/internal/ld/symbols.go rename to src/cmd/link/internal/sym/symbols.go index 154507ddd75..98a5ae67b8b 100644 --- a/src/cmd/link/internal/ld/symbols.go +++ b/src/cmd/link/internal/sym/symbols.go @@ -28,7 +28,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package ld +package sym type Symbols struct { symbolBatch []Symbol @@ -39,7 +39,18 @@ type Symbols struct { Allsym []*Symbol } -func (syms *Symbols) newsym(name string, v int) *Symbol { +func NewSymbols() *Symbols { + return &Symbols{ + hash: []map[string]*Symbol{ + // preallocate about 2mb for hash of + // non static symbols + make(map[string]*Symbol, 100000), + }, + Allsym: make([]*Symbol, 0, 100000), + } +} + +func (syms *Symbols) Newsym(name string, v int) *Symbol { batch := syms.symbolBatch if len(batch) == 0 { batch = make([]Symbol, 1000) @@ -65,7 +76,7 @@ func (syms *Symbols) Lookup(name string, v int) *Symbol { if s != nil { return s } - s = syms.newsym(name, v) + s = syms.Newsym(name, v) s.Extname = s.Name m[name] = s return s @@ -82,3 +93,25 @@ func (syms *Symbols) IncVersion() int { syms.hash = append(syms.hash, make(map[string]*Symbol)) return len(syms.hash) - 1 } + +// Rename renames a symbol. +func (syms *Symbols) Rename(old, new string, v int) { + s := syms.hash[v][old] + s.Name = new + if s.Extname == old { + s.Extname = new + } + delete(syms.hash[v], old) + + dup := syms.hash[v][new] + if dup == nil { + syms.hash[v][new] = s + } else { + if s.Type == 0 { + *s = *dup + } else if dup.Type == 0 { + *dup = *s + syms.hash[v][new] = s + } + } +} diff --git a/src/cmd/link/internal/ld/symkind.go b/src/cmd/link/internal/sym/symkind.go similarity index 89% rename from src/cmd/link/internal/ld/symkind.go rename to src/cmd/link/internal/sym/symkind.go index c057f6cd0c9..1c409a673cf 100644 --- a/src/cmd/link/internal/ld/symkind.go +++ b/src/cmd/link/internal/sym/symkind.go @@ -28,10 +28,10 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package ld +package sym // A SymKind describes the kind of memory represented by a symbol. -type SymKind int16 +type SymKind uint8 // Defined SymKind values. // @@ -97,7 +97,6 @@ const ( SMACHOSYMTAB SMACHOINDIRECTPLT SMACHOINDIRECTGOT - SFILE SFILEPATH SCONST SDYNIMPORT @@ -105,15 +104,12 @@ const ( SDWARFSECT SDWARFINFO SDWARFRANGE - SSUB = SymKind(1 << 8) - SMASK = SymKind(SSUB - 1) - SHIDDEN = SymKind(1 << 9) - SCONTAINER = SymKind(1 << 10) // has a sub-symbol + SDWARFLOC ) -// abiSymKindToSymKind maps values read from object files (which are +// AbiSymKindToSymKind maps values read from object files (which are // of type cmd/internal/objabi.SymKind) to values of type SymKind. -var abiSymKindToSymKind = [...]SymKind{ +var AbiSymKindToSymKind = [...]SymKind{ Sxxx, STEXT, SRODATA, @@ -124,12 +120,13 @@ var abiSymKindToSymKind = [...]SymKind{ STLSBSS, SDWARFINFO, SDWARFRANGE, + SDWARFLOC, } -// readOnly are the symbol kinds that form read-only sections. In some +// ReadOnly are the symbol kinds that form read-only sections. In some // cases, if they will require relocations, they are transformed into // rel-ro sections using relROMap. -var readOnly = []SymKind{ +var ReadOnly = []SymKind{ STYPE, SSTRING, SGOSTRING, @@ -139,9 +136,9 @@ var readOnly = []SymKind{ SFUNCTAB, } -// relROMap describes the transformation of read-only symbols to rel-ro +// RelROMap describes the transformation of read-only symbols to rel-ro // symbols. -var relROMap = map[SymKind]SymKind{ +var RelROMap = map[SymKind]SymKind{ STYPE: STYPERELRO, SSTRING: SSTRINGRELRO, SGOSTRING: SGOSTRINGRELRO, diff --git a/src/cmd/link/internal/ld/symkind_string.go b/src/cmd/link/internal/sym/symkind_string.go similarity index 79% rename from src/cmd/link/internal/ld/symkind_string.go rename to src/cmd/link/internal/sym/symkind_string.go index 2178b50c366..716eabd850a 100644 --- a/src/cmd/link/internal/ld/symkind_string.go +++ b/src/cmd/link/internal/sym/symkind_string.go @@ -1,12 +1,12 @@ // Code generated by "stringer -type=SymKind"; DO NOT EDIT. -package ld +package sym import "fmt" -const _SymKind_name = "SxxxSTEXTSELFRXSECTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSMACHOPLTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASBSSSNOPTRBSSSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILESFILEPATHSCONSTSDYNIMPORTSHOSTOBJSDWARFSECTSDWARFINFOSDWARFRANGE" +const _SymKind_name = "SxxxSTEXTSELFRXSECTSTYPESSTRINGSGOSTRINGSGOFUNCSGCBITSSRODATASFUNCTABSELFROSECTSMACHOPLTSTYPERELROSSTRINGRELROSGOSTRINGRELROSGOFUNCRELROSGCBITSRELROSRODATARELROSFUNCTABRELROSTYPELINKSITABLINKSSYMTABSPCLNTABSELFSECTSMACHOSMACHOGOTSWINDOWSSELFGOTSNOPTRDATASINITARRSDATASBSSSNOPTRBSSSTLSBSSSXREFSMACHOSYMSTRSMACHOSYMTABSMACHOINDIRECTPLTSMACHOINDIRECTGOTSFILEPATHSCONSTSDYNIMPORTSHOSTOBJSDWARFSECTSDWARFINFOSDWARFRANGESDWARFLOC" -var _SymKind_index = [...]uint16{0, 4, 9, 19, 24, 31, 40, 47, 54, 61, 69, 79, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 214, 220, 229, 237, 244, 254, 262, 267, 271, 280, 287, 292, 304, 316, 333, 350, 355, 364, 370, 380, 388, 398, 408, 419} +var _SymKind_index = [...]uint16{0, 4, 9, 19, 24, 31, 40, 47, 54, 61, 69, 79, 88, 98, 110, 124, 136, 148, 160, 173, 182, 191, 198, 206, 214, 220, 229, 237, 244, 254, 262, 267, 271, 280, 287, 292, 304, 316, 333, 350, 359, 365, 375, 383, 393, 403, 414, 423} func (i SymKind) String() string { if i < 0 || i >= SymKind(len(_SymKind_index)-1) { diff --git a/src/cmd/link/internal/x86/asm.go b/src/cmd/link/internal/x86/asm.go index 3649e6a8f82..c76c2a5d0ef 100644 --- a/src/cmd/link/internal/x86/asm.go +++ b/src/cmd/link/internal/x86/asm.go @@ -32,17 +32,20 @@ package x86 import ( "cmd/internal/objabi" + "cmd/internal/sys" "cmd/link/internal/ld" + "cmd/link/internal/sym" + "debug/elf" "log" ) // Append 4 bytes to s and create a R_CALL relocation targeting t to fill them in. -func addcall(ctxt *ld.Link, s *ld.Symbol, t *ld.Symbol) { - s.Attr |= ld.AttrReachable +func addcall(ctxt *ld.Link, s *sym.Symbol, t *sym.Symbol) { + s.Attr |= sym.AttrReachable i := s.Size s.Size += 4 - ld.Symgrow(s, s.Size) - r := ld.Addrel(s) + s.Grow(s.Size) + r := s.AddRel() r.Sym = t r.Off = int32(i) r.Type = objabi.R_CALL @@ -53,12 +56,12 @@ func gentext(ctxt *ld.Link) { if ctxt.DynlinkingGo() { // We need get_pc_thunk. } else { - switch ld.Buildmode { - case ld.BuildmodeCArchive: - if !ld.Iself { + switch ctxt.BuildMode { + case ld.BuildModeCArchive: + if !ctxt.IsELF { return } - case ld.BuildmodePIE, ld.BuildmodeCShared, ld.BuildmodePlugin: + case ld.BuildModePIE, ld.BuildModeCShared, ld.BuildModePlugin: // We need get_pc_thunk. default: return @@ -66,7 +69,7 @@ func gentext(ctxt *ld.Link) { } // Generate little thunks that load the PC of the next instruction into a register. - thunks := make([]*ld.Symbol, 0, 7+len(ctxt.Textp)) + thunks := make([]*sym.Symbol, 0, 7+len(ctxt.Textp)) for _, r := range [...]struct { name string num uint8 @@ -81,12 +84,12 @@ func gentext(ctxt *ld.Link) { {"di", 7}, } { thunkfunc := ctxt.Syms.Lookup("__x86.get_pc_thunk."+r.name, 0) - thunkfunc.Type = ld.STEXT - thunkfunc.Attr |= ld.AttrLocal - thunkfunc.Attr |= ld.AttrReachable //TODO: remove? + thunkfunc.Type = sym.STEXT + thunkfunc.Attr |= sym.AttrLocal + thunkfunc.Attr |= sym.AttrReachable //TODO: remove? o := func(op ...uint8) { for _, op1 := range op { - ld.Adduint8(ctxt, thunkfunc, op1) + thunkfunc.AddUint8(op1) } } // 8b 04 24 mov (%esp),%eax @@ -100,21 +103,21 @@ func gentext(ctxt *ld.Link) { ctxt.Textp = append(thunks, ctxt.Textp...) // keep Textp in dependency order addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0) - if addmoduledata.Type == ld.STEXT && ld.Buildmode != ld.BuildmodePlugin { + if addmoduledata.Type == sym.STEXT && ctxt.BuildMode != ld.BuildModePlugin { // we're linking a module containing the runtime -> no need for // an init function return } - addmoduledata.Attr |= ld.AttrReachable + addmoduledata.Attr |= sym.AttrReachable initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0) - initfunc.Type = ld.STEXT - initfunc.Attr |= ld.AttrLocal - initfunc.Attr |= ld.AttrReachable + initfunc.Type = sym.STEXT + initfunc.Attr |= sym.AttrLocal + initfunc.Attr |= sym.AttrReachable o := func(op ...uint8) { for _, op1 := range op { - ld.Adduint8(ctxt, initfunc, op1) + initfunc.AddUint8(op1) } } @@ -133,13 +136,13 @@ func gentext(ctxt *ld.Link) { addcall(ctxt, initfunc, ctxt.Syms.Lookup("__x86.get_pc_thunk.cx", 0)) o(0x8d, 0x81) - ld.Addpcrelplus(ctxt, initfunc, ctxt.Moduledata, 6) + initfunc.AddPCRelPlus(ctxt.Arch, ctxt.Moduledata, 6) o(0x8d, 0x99) i := initfunc.Size initfunc.Size += 4 - ld.Symgrow(initfunc, initfunc.Size) - r := ld.Addrel(initfunc) + initfunc.Grow(initfunc.Size) + r := initfunc.AddRel() r.Sym = ctxt.Syms.Lookup("_GLOBAL_OFFSET_TABLE_", 0) r.Off = int32(i) r.Type = objabi.R_PCREL @@ -153,43 +156,45 @@ func gentext(ctxt *ld.Link) { o(0xc3) - if ld.Buildmode == ld.BuildmodePlugin { + if ctxt.BuildMode == ld.BuildModePlugin { ctxt.Textp = append(ctxt.Textp, addmoduledata) } ctxt.Textp = append(ctxt.Textp, initfunc) initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0) - initarray_entry.Attr |= ld.AttrReachable - initarray_entry.Attr |= ld.AttrLocal - initarray_entry.Type = ld.SINITARR - ld.Addaddr(ctxt, initarray_entry, initfunc) + initarray_entry.Attr |= sym.AttrReachable + initarray_entry.Attr |= sym.AttrLocal + initarray_entry.Type = sym.SINITARR + initarray_entry.AddAddr(ctxt.Arch, initfunc) } -func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { +func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool { targ := r.Sym switch r.Type { default: if r.Type >= 256 { - ld.Errorf(s, "unexpected relocation type %d", r.Type) + ld.Errorf(s, "unexpected relocation type %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type)) return false } // Handle relocations found in ELF object files. - case 256 + ld.R_386_PC32: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_386_PC32): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_386_PC32 relocation for dynamic symbol %s", targ.Name) } - if targ.Type == 0 || targ.Type == ld.SXREF { + // TODO(mwhudson): the test of VisibilityHidden here probably doesn't make + // sense and should be removed when someone has thought about it properly. + if (targ.Type == 0 || targ.Type == sym.SXREF) && !targ.Attr.VisibilityHidden() { ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name) } r.Type = objabi.R_PCREL r.Add += 4 return true - case 256 + ld.R_386_PLT32: + case 256 + objabi.RelocType(elf.R_386_PLT32): r.Type = objabi.R_PCREL r.Add += 4 - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add += int64(targ.Plt) @@ -197,8 +202,8 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return true - case 256 + ld.R_386_GOT32, 256 + ld.R_386_GOT32X: - if targ.Type != ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_386_GOT32), 256 + objabi.RelocType(elf.R_386_GOT32X): + if targ.Type != sym.SDYNIMPORT { // have symbol if r.Off >= 2 && s.P[r.Off-2] == 0x8b { // turn MOVL of GOT entry into LEAL of symbol address, relative to GOT. @@ -228,18 +233,18 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { r.Add += int64(targ.Got) return true - case 256 + ld.R_386_GOTOFF: + case 256 + objabi.RelocType(elf.R_386_GOTOFF): r.Type = objabi.R_GOTOFF return true - case 256 + ld.R_386_GOTPC: + case 256 + objabi.RelocType(elf.R_386_GOTPC): r.Type = objabi.R_PCREL r.Sym = ctxt.Syms.Lookup(".got", 0) r.Add += 4 return true - case 256 + ld.R_386_32: - if targ.Type == ld.SDYNIMPORT { + case 256 + objabi.RelocType(elf.R_386_32): + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected R_386_32 relocation for dynamic symbol %s", targ.Name) } r.Type = objabi.R_ADDR @@ -247,13 +252,13 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { case 512 + ld.MACHO_GENERIC_RELOC_VANILLA*2 + 0: r.Type = objabi.R_ADDR - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { ld.Errorf(s, "unexpected reloc for dynamic symbol %s", targ.Name) } return true case 512 + ld.MACHO_GENERIC_RELOC_VANILLA*2 + 1: - if targ.Type == ld.SDYNIMPORT { + if targ.Type == sym.SDYNIMPORT { addpltsym(ctxt, targ) r.Sym = ctxt.Syms.Lookup(".plt", 0) r.Add = int64(targ.Plt) @@ -265,7 +270,7 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return true case 512 + ld.MACHO_FAKE_GOTPCREL: - if targ.Type != ld.SDYNIMPORT { + if targ.Type != sym.SDYNIMPORT { // have symbol // turn MOVL of GOT entry into LEAL of symbol itself if r.Off < 2 || s.P[r.Off-2] != 0x8b { @@ -286,7 +291,7 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { } // Handle references to ELF symbols from our own object files. - if targ.Type != ld.SDYNIMPORT { + if targ.Type != sym.SDYNIMPORT { return true } switch r.Type { @@ -298,20 +303,20 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { return true case objabi.R_ADDR: - if s.Type != ld.SDATA { + if s.Type != sym.SDATA { break } - if ld.Iself { + if ctxt.IsELF { ld.Adddynsym(ctxt, targ) rel := ctxt.Syms.Lookup(".rel", 0) - ld.Addaddrplus(ctxt, rel, s, int64(r.Off)) - ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_386_32)) + rel.AddAddrPlus(ctxt.Arch, s, int64(r.Off)) + rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(targ.Dynid), uint32(elf.R_386_32))) r.Type = objabi.R_CONST // write r->add during relocsym r.Sym = nil return true } - if ld.Headtype == objabi.Hdarwin && s.Size == int64(ld.SysArch.PtrSize) && r.Off == 0 { + if ctxt.HeadType == objabi.Hdarwin && s.Size == int64(ctxt.Arch.PtrSize) && r.Off == 0 { // Mach-O relocations are a royal pain to lay out. // They use a compact stateful bytecode representation // that is too much bother to deal with. @@ -325,99 +330,89 @@ func adddynrel(ctxt *ld.Link, s *ld.Symbol, r *ld.Reloc) bool { ld.Adddynsym(ctxt, targ) got := ctxt.Syms.Lookup(".got", 0) - s.Type = got.Type | ld.SSUB + s.Type = got.Type + s.Attr |= sym.AttrSubSymbol s.Outer = got s.Sub = got.Sub got.Sub = s s.Value = got.Size - ld.Adduint32(ctxt, got, 0) - ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(targ.Dynid)) + got.AddUint32(ctxt.Arch, 0) + ctxt.Syms.Lookup(".linkedit.got", 0).AddUint32(ctxt.Arch, uint32(targ.Dynid)) r.Type = 256 // ignore during relocsym return true } - - if ld.Headtype == objabi.Hwindows && s.Size == int64(ld.SysArch.PtrSize) { - // nothing to do, the relocation will be laid out in pereloc1 - return true - } } return false } -func elfreloc1(ctxt *ld.Link, r *ld.Reloc, sectoff int64) int { - ld.Thearch.Lput(uint32(sectoff)) +func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { + ctxt.Out.Write32(uint32(sectoff)) elfsym := r.Xsym.ElfsymForReloc() switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: if r.Siz == 4 { - ld.Thearch.Lput(ld.R_386_32 | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_386_32) | uint32(elfsym)<<8) } else { - return -1 + return false } - case objabi.R_GOTPCREL: if r.Siz == 4 { - ld.Thearch.Lput(ld.R_386_GOTPC) + ctxt.Out.Write32(uint32(elf.R_386_GOTPC)) if r.Xsym.Name != "_GLOBAL_OFFSET_TABLE_" { - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput(ld.R_386_GOT32 | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(sectoff)) + ctxt.Out.Write32(uint32(elf.R_386_GOT32) | uint32(elfsym)<<8) } } else { - return -1 + return false } - case objabi.R_CALL: if r.Siz == 4 { - if r.Xsym.Type == ld.SDYNIMPORT { - ld.Thearch.Lput(ld.R_386_PLT32 | uint32(elfsym)<<8) + if r.Xsym.Type == sym.SDYNIMPORT { + ctxt.Out.Write32(uint32(elf.R_386_PLT32) | uint32(elfsym)<<8) } else { - ld.Thearch.Lput(ld.R_386_PC32 | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_386_PC32) | uint32(elfsym)<<8) } } else { - return -1 + return false } - case objabi.R_PCREL: if r.Siz == 4 { - ld.Thearch.Lput(ld.R_386_PC32 | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_386_PC32) | uint32(elfsym)<<8) } else { - return -1 + return false } - case objabi.R_TLS_LE: if r.Siz == 4 { - ld.Thearch.Lput(ld.R_386_TLS_LE | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_386_TLS_LE) | uint32(elfsym)<<8) } else { - return -1 + return false } - case objabi.R_TLS_IE: if r.Siz == 4 { - ld.Thearch.Lput(ld.R_386_GOTPC) - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput(ld.R_386_TLS_GOTIE | uint32(elfsym)<<8) + ctxt.Out.Write32(uint32(elf.R_386_GOTPC)) + ctxt.Out.Write32(uint32(sectoff)) + ctxt.Out.Write32(uint32(elf.R_386_TLS_GOTIE) | uint32(elfsym)<<8) } else { - return -1 + return false } } - return 0 + return true } -func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { +func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { var v uint32 rs := r.Xsym - if rs.Type == ld.SHOSTOBJ { + if rs.Type == sym.SHOSTOBJ { if rs.Dynid < 0 { - ld.Errorf(s, "reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) - return -1 + ld.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type) + return false } v = uint32(rs.Dynid) @@ -425,18 +420,16 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { } else { v = uint32(rs.Sect.Extnum) if v == 0 { - ld.Errorf(s, "reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) - return -1 + ld.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Sect.Name, rs.Type, rs.Type) + return false } } switch r.Type { default: - return -1 - + return false case objabi.R_ADDR: v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28 - case objabi.R_CALL, objabi.R_PCREL: v |= 1 << 24 // pc-relative bit @@ -445,44 +438,40 @@ func machoreloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) int { switch r.Siz { default: - return -1 - + return false case 1: v |= 0 << 25 - case 2: v |= 1 << 25 - case 4: v |= 2 << 25 - case 8: v |= 3 << 25 } - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput(v) - return 0 + out.Write32(uint32(sectoff)) + out.Write32(v) + return true } -func pereloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) bool { +func pereloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { var v uint32 rs := r.Xsym if rs.Dynid < 0 { - ld.Errorf(s, "reloc %d to non-coff symbol %s type=%d", r.Type, rs.Name, rs.Type) + ld.Errorf(s, "reloc %d (%s) to non-coff symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type) return false } - ld.Thearch.Lput(uint32(sectoff)) - ld.Thearch.Lput(uint32(rs.Dynid)) + out.Write32(uint32(sectoff)) + out.Write32(uint32(rs.Dynid)) switch r.Type { default: return false - case objabi.R_DWARFREF: + case objabi.R_DWARFSECREF: v = ld.IMAGE_REL_I386_SECREL case objabi.R_ADDR: @@ -493,29 +482,28 @@ func pereloc1(s *ld.Symbol, r *ld.Reloc, sectoff int64) bool { v = ld.IMAGE_REL_I386_REL32 } - ld.Thearch.Wput(uint16(v)) + out.Write16(uint16(v)) return true } -func archreloc(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, val *int64) int { - if ld.Linkmode == ld.LinkExternal { - return -1 +func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val *int64) bool { + if ctxt.LinkMode == ld.LinkExternal { + return false } switch r.Type { case objabi.R_CONST: *val = r.Add - return 0 - + return true case objabi.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)) - return 0 + return true } - return -1 + return false } -func archrelocvariant(ctxt *ld.Link, r *ld.Reloc, s *ld.Symbol, t int64) int64 { +func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 { log.Fatalf("unexpected relocation variant") return t } @@ -525,36 +513,36 @@ func elfsetupplt(ctxt *ld.Link) { got := ctxt.Syms.Lookup(".got.plt", 0) if plt.Size == 0 { // pushl got+4 - ld.Adduint8(ctxt, plt, 0xff) + plt.AddUint8(0xff) - ld.Adduint8(ctxt, plt, 0x35) - ld.Addaddrplus(ctxt, plt, got, 4) + plt.AddUint8(0x35) + plt.AddAddrPlus(ctxt.Arch, got, 4) // jmp *got+8 - ld.Adduint8(ctxt, plt, 0xff) + plt.AddUint8(0xff) - ld.Adduint8(ctxt, plt, 0x25) - ld.Addaddrplus(ctxt, plt, got, 8) + plt.AddUint8(0x25) + plt.AddAddrPlus(ctxt.Arch, got, 8) // zero pad - ld.Adduint32(ctxt, plt, 0) + plt.AddUint32(ctxt.Arch, 0) // assume got->size == 0 too - ld.Addaddrplus(ctxt, got, ctxt.Syms.Lookup(".dynamic", 0), 0) + got.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup(".dynamic", 0), 0) - ld.Adduint32(ctxt, got, 0) - ld.Adduint32(ctxt, got, 0) + got.AddUint32(ctxt.Arch, 0) + got.AddUint32(ctxt.Arch, 0) } } -func addpltsym(ctxt *ld.Link, s *ld.Symbol) { +func addpltsym(ctxt *ld.Link, s *sym.Symbol) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) - if ld.Iself { + if ctxt.IsELF { plt := ctxt.Syms.Lookup(".plt", 0) got := ctxt.Syms.Lookup(".got.plt", 0) rel := ctxt.Syms.Lookup(".rel.plt", 0) @@ -563,51 +551,51 @@ func addpltsym(ctxt *ld.Link, s *ld.Symbol) { } // jmpq *got+size - ld.Adduint8(ctxt, plt, 0xff) + plt.AddUint8(0xff) - ld.Adduint8(ctxt, plt, 0x25) - ld.Addaddrplus(ctxt, plt, got, got.Size) + plt.AddUint8(0x25) + plt.AddAddrPlus(ctxt.Arch, got, got.Size) // add to got: pointer to current pos in plt - ld.Addaddrplus(ctxt, got, plt, plt.Size) + got.AddAddrPlus(ctxt.Arch, plt, plt.Size) // pushl $x - ld.Adduint8(ctxt, plt, 0x68) + plt.AddUint8(0x68) - ld.Adduint32(ctxt, plt, uint32(rel.Size)) + plt.AddUint32(ctxt.Arch, uint32(rel.Size)) // jmp .plt - ld.Adduint8(ctxt, plt, 0xe9) + plt.AddUint8(0xe9) - ld.Adduint32(ctxt, plt, uint32(-(plt.Size + 4))) + plt.AddUint32(ctxt.Arch, uint32(-(plt.Size + 4))) // rel - ld.Addaddrplus(ctxt, rel, got, got.Size-4) + rel.AddAddrPlus(ctxt.Arch, got, got.Size-4) - ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_386_JMP_SLOT)) + rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(s.Dynid), uint32(elf.R_386_JMP_SLOT))) s.Plt = int32(plt.Size - 16) - } else if ld.Headtype == objabi.Hdarwin { + } else if ctxt.HeadType == objabi.Hdarwin { // Same laziness as in 6l. plt := ctxt.Syms.Lookup(".plt", 0) addgotsym(ctxt, s) - ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.plt", 0), uint32(s.Dynid)) + ctxt.Syms.Lookup(".linkedit.plt", 0).AddUint32(ctxt.Arch, uint32(s.Dynid)) // jmpq *got+size(IP) s.Plt = int32(plt.Size) - ld.Adduint8(ctxt, plt, 0xff) - ld.Adduint8(ctxt, plt, 0x25) - ld.Addaddrplus(ctxt, plt, ctxt.Syms.Lookup(".got", 0), int64(s.Got)) + plt.AddUint8(0xff) + plt.AddUint8(0x25) + plt.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup(".got", 0), int64(s.Got)) } else { ld.Errorf(s, "addpltsym: unsupported binary format") } } -func addgotsym(ctxt *ld.Link, s *ld.Symbol) { +func addgotsym(ctxt *ld.Link, s *sym.Symbol) { if s.Got >= 0 { return } @@ -615,14 +603,14 @@ func addgotsym(ctxt *ld.Link, s *ld.Symbol) { ld.Adddynsym(ctxt, s) got := ctxt.Syms.Lookup(".got", 0) s.Got = int32(got.Size) - ld.Adduint32(ctxt, got, 0) + got.AddUint32(ctxt.Arch, 0) - if ld.Iself { + if ctxt.IsELF { rel := ctxt.Syms.Lookup(".rel", 0) - ld.Addaddrplus(ctxt, rel, got, int64(s.Got)) - ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_386_GLOB_DAT)) - } else if ld.Headtype == objabi.Hdarwin { - ld.Adduint32(ctxt, ctxt.Syms.Lookup(".linkedit.got", 0), uint32(s.Dynid)) + rel.AddAddrPlus(ctxt.Arch, got, int64(s.Got)) + rel.AddUint32(ctxt.Arch, ld.ELF32_R_INFO(uint32(s.Dynid), uint32(elf.R_386_GLOB_DAT))) + } else if ctxt.HeadType == objabi.Hdarwin { + ctxt.Syms.Lookup(".linkedit.got", 0).AddUint32(ctxt.Arch, uint32(s.Dynid)) } else { ld.Errorf(s, "addgotsym: unsupported binary format") } @@ -633,16 +621,16 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f asmb\n", ld.Cputime()) } - if ld.Iself { + if ctxt.IsELF { ld.Asmbelfsetup() } sect := ld.Segtext.Sections[0] - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) // 0xCC is INT $3 - breakpoint instruction ld.CodeblkPad(ctxt, int64(sect.Vaddr), int64(sect.Length), []byte{0xCC}) for _, sect = range ld.Segtext.Sections[1:] { - ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) + ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length)) } @@ -651,14 +639,14 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f rodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Segrelrodata.Filelen > 0 { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f relrodatblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segrelrodata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen)) } @@ -666,14 +654,14 @@ func asmb(ctxt *ld.Link) { ctxt.Logf("%5.2f datblk\n", ld.Cputime()) } - ld.Cseek(int64(ld.Segdata.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff)) ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) - ld.Cseek(int64(ld.Segdwarf.Fileoff)) + ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff)) ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen)) machlink := uint32(0) - if ld.Headtype == objabi.Hdarwin { + if ctxt.HeadType == objabi.Hdarwin { machlink = uint32(ld.Domacholink(ctxt)) } @@ -686,9 +674,9 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f sym\n", ld.Cputime()) } - switch ld.Headtype { + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) } @@ -704,34 +692,31 @@ func asmb(ctxt *ld.Link) { symo = uint32(ld.Rnd(int64(symo), ld.PEFILEALIGN)) } - ld.Cseek(int64(symo)) - switch ld.Headtype { + ctxt.Out.SeekSet(int64(symo)) + switch ctxt.HeadType { default: - if ld.Iself { + if ctxt.IsELF { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f elfsym\n", ld.Cputime()) } ld.Asmelfsym(ctxt) - ld.Cflush() - ld.Cwrite(ld.Elfstrdat) + ctxt.Out.Flush() + ctxt.Out.Write(ld.Elfstrdat) - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } case objabi.Hplan9: ld.Asmplan9sym(ctxt) - ld.Cflush() + ctxt.Out.Flush() sym := ctxt.Syms.Lookup("pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) - for i := 0; int32(i) < ld.Lcsize; i++ { - ld.Cput(sym.P[i]) - } - - ld.Cflush() + ctxt.Out.Write(sym.P) + ctxt.Out.Flush() } case objabi.Hwindows: @@ -740,7 +725,7 @@ func asmb(ctxt *ld.Link) { } case objabi.Hdarwin: - if ld.Linkmode == ld.LinkExternal { + if ctxt.LinkMode == ld.LinkExternal { ld.Machoemitreloc(ctxt) } } @@ -749,20 +734,20 @@ func asmb(ctxt *ld.Link) { if ctxt.Debugvlog != 0 { ctxt.Logf("%5.2f headr\n", ld.Cputime()) } - ld.Cseek(0) - switch ld.Headtype { + ctxt.Out.SeekSet(0) + switch ctxt.HeadType { default: case objabi.Hplan9: /* plan9 */ magic := int32(4*11*11 + 7) - ld.Lputb(uint32(magic)) /* magic */ - ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */ - ld.Lputb(uint32(ld.Segdata.Filelen)) - ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) - ld.Lputb(uint32(ld.Symsize)) /* nsyms */ - ld.Lputb(uint32(ld.Entryvalue(ctxt))) /* va of entry */ - ld.Lputb(uint32(ld.Spsize)) /* sp offsets */ - ld.Lputb(uint32(ld.Lcsize)) /* line offsets */ + ctxt.Out.Write32b(uint32(magic)) /* magic */ + ctxt.Out.Write32b(uint32(ld.Segtext.Filelen)) /* sizes */ + ctxt.Out.Write32b(uint32(ld.Segdata.Filelen)) + ctxt.Out.Write32b(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) + ctxt.Out.Write32b(uint32(ld.Symsize)) /* nsyms */ + ctxt.Out.Write32b(uint32(ld.Entryvalue(ctxt))) /* va of entry */ + ctxt.Out.Write32b(uint32(ld.Spsize)) /* sp offsets */ + ctxt.Out.Write32b(uint32(ld.Lcsize)) /* line offsets */ case objabi.Hdarwin: ld.Asmbmacho(ctxt) @@ -778,5 +763,5 @@ func asmb(ctxt *ld.Link) { ld.Asmbpe(ctxt) } - ld.Cflush() + ctxt.Out.Flush() } diff --git a/src/cmd/link/internal/x86/obj.go b/src/cmd/link/internal/x86/obj.go index fa925d1aec5..6a744dc04e1 100644 --- a/src/cmd/link/internal/x86/obj.go +++ b/src/cmd/link/internal/x86/obj.go @@ -37,43 +37,41 @@ import ( "fmt" ) -func Init() { - ld.SysArch = sys.Arch386 +func Init() (*sys.Arch, ld.Arch) { + arch := sys.Arch386 - ld.Thearch.Funcalign = funcAlign - ld.Thearch.Maxalign = maxAlign - ld.Thearch.Minalign = minAlign - ld.Thearch.Dwarfregsp = dwarfRegSP - ld.Thearch.Dwarfreglr = dwarfRegLR + theArch := ld.Arch{ + Funcalign: funcAlign, + Maxalign: maxAlign, + Minalign: minAlign, + Dwarfregsp: dwarfRegSP, + Dwarfreglr: dwarfRegLR, - ld.Thearch.Adddynrel = adddynrel - ld.Thearch.Archinit = archinit - ld.Thearch.Archreloc = archreloc - ld.Thearch.Archrelocvariant = archrelocvariant - ld.Thearch.Asmb = asmb - ld.Thearch.Elfreloc1 = elfreloc1 - ld.Thearch.Elfsetupplt = elfsetupplt - ld.Thearch.Gentext = gentext - ld.Thearch.Machoreloc1 = machoreloc1 - ld.Thearch.PEreloc1 = pereloc1 - ld.Thearch.Lput = ld.Lputl - ld.Thearch.Wput = ld.Wputl - ld.Thearch.Vput = ld.Vputl - ld.Thearch.Append16 = ld.Append16l - ld.Thearch.Append32 = ld.Append32l - ld.Thearch.Append64 = ld.Append64l + Adddynrel: adddynrel, + Archinit: archinit, + Archreloc: archreloc, + Archrelocvariant: archrelocvariant, + Asmb: asmb, + Elfreloc1: elfreloc1, + Elfsetupplt: elfsetupplt, + Gentext: gentext, + Machoreloc1: machoreloc1, + PEreloc1: pereloc1, - ld.Thearch.Linuxdynld = "/lib/ld-linux.so.2" - ld.Thearch.Freebsddynld = "/usr/libexec/ld-elf.so.1" - ld.Thearch.Openbsddynld = "/usr/libexec/ld.so" - ld.Thearch.Netbsddynld = "/usr/libexec/ld.elf_so" - ld.Thearch.Solarisdynld = "/lib/ld.so.1" + Linuxdynld: "/lib/ld-linux.so.2", + Freebsddynld: "/usr/libexec/ld-elf.so.1", + Openbsddynld: "/usr/libexec/ld.so", + Netbsddynld: "/usr/libexec/ld.elf_so", + Solarisdynld: "/lib/ld.so.1", + } + + return arch, theArch } func archinit(ctxt *ld.Link) { - switch ld.Headtype { + switch ctxt.HeadType { default: - ld.Exitf("unknown -H option: %v", ld.Headtype) + ld.Exitf("unknown -H option: %v", ctxt.HeadType) case objabi.Hplan9: /* plan 9 */ ld.HEADR = 32 @@ -89,8 +87,6 @@ func archinit(ctxt *ld.Link) { } case objabi.Hdarwin: /* apple MACH */ - ld.Machoinit() - ld.HEADR = ld.INITIAL_MACHO_HEADR if *ld.FlagTextAddr == -1 { *ld.FlagTextAddr = 4096 + int64(ld.HEADR) diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 4ef184518e6..4ec03abc858 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -1,6 +1,13 @@ package main -import "testing" +import ( + "internal/testenv" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" +) var AuthorPaidByTheColumnInch struct { fog int ` @@ -28,3 +35,38 @@ func TestLargeSymName(t *testing.T) { // the bufio buffer. Issue #15104. _ = AuthorPaidByTheColumnInch } + +func TestIssue21703(t *testing.T) { + testenv.MustHaveGoBuild(t) + + const source = ` +package main +const X = "\n!\n" +func main() {} +` + + tmpdir, err := ioutil.TempDir("", "issue21703") + if err != nil { + t.Fatalf("failed to create temp dir: %v\n", err) + } + defer os.RemoveAll(tmpdir) + + err = ioutil.WriteFile(filepath.Join(tmpdir, "main.go"), []byte(source), 0666) + if err != nil { + t.Fatalf("failed to write main.go: %v\n", err) + } + + cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "main.go") + cmd.Dir = tmpdir + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("failed to compile main.go: %v, output: %s\n", err, out) + } + + cmd = exec.Command(testenv.GoToolPath(t), "tool", "link", "main.o") + cmd.Dir = tmpdir + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("failed to link main.o: %v, output: %s\n", err, out) + } +} diff --git a/src/cmd/link/main.go b/src/cmd/link/main.go index eab190d5b10..6bc9b5dcb60 100644 --- a/src/cmd/link/main.go +++ b/src/cmd/link/main.go @@ -6,6 +6,7 @@ package main import ( "cmd/internal/objabi" + "cmd/internal/sys" "cmd/link/internal/amd64" "cmd/link/internal/arm" "cmd/link/internal/arm64" @@ -26,7 +27,7 @@ import ( // // Before any argument parsing is done, the Init function of relevant // architecture package is called. The only job done in Init is -// configuration of the ld.Thearch and ld.SysArch variables. +// configuration of the architecture-specific variables. // // Then control flow passes to ld.Main, which parses flags, makes // some configuration decisions, and then gives the architecture @@ -34,26 +35,29 @@ import ( // via the ld.Thearch.Archinit function. func main() { + var arch *sys.Arch + var theArch ld.Arch + switch objabi.GOARCH { default: fmt.Fprintf(os.Stderr, "link: unknown architecture %q\n", objabi.GOARCH) os.Exit(2) case "386": - x86.Init() + arch, theArch = x86.Init() case "amd64", "amd64p32": - amd64.Init() + arch, theArch = amd64.Init() case "arm": - arm.Init() + arch, theArch = arm.Init() case "arm64": - arm64.Init() + arch, theArch = arm64.Init() case "mips", "mipsle": - mips.Init() + arch, theArch = mips.Init() case "mips64", "mips64le": - mips64.Init() + arch, theArch = mips64.Init() case "ppc64", "ppc64le": - ppc64.Init() + arch, theArch = ppc64.Init() case "s390x": - s390x.Init() + arch, theArch = s390x.Init() } - ld.Main() + ld.Main(arch, theArch) } diff --git a/src/cmd/nm/nm.go b/src/cmd/nm/nm.go index 2e2dd75018c..457239921bc 100644 --- a/src/cmd/nm/nm.go +++ b/src/cmd/nm/nm.go @@ -106,41 +106,62 @@ func nm(file string) { } defer f.Close() - syms, err := f.Symbols() - if err != nil { - errorf("reading %s: %v", file, err) + w := bufio.NewWriter(os.Stdout) + + entries := f.Entries() + + var found bool + + for _, e := range entries { + syms, err := e.Symbols() + if err != nil { + errorf("reading %s: %v", file, err) + } + if len(syms) == 0 { + continue + } + + found = true + + switch *sortOrder { + case "address": + sort.Slice(syms, func(i, j int) bool { return syms[i].Addr < syms[j].Addr }) + case "name": + sort.Slice(syms, func(i, j int) bool { return syms[i].Name < syms[j].Name }) + case "size": + sort.Slice(syms, func(i, j int) bool { return syms[i].Size > syms[j].Size }) + } + + for _, sym := range syms { + if len(entries) > 1 { + name := e.Name() + if name == "" { + fmt.Fprintf(w, "%s(%s):\t", file, "_go_.o") + } else { + fmt.Fprintf(w, "%s(%s):\t", file, name) + } + } else if filePrefix { + fmt.Fprintf(w, "%s:\t", file) + } + if sym.Code == 'U' { + fmt.Fprintf(w, "%8s", "") + } else { + fmt.Fprintf(w, "%8x", sym.Addr) + } + if *printSize { + fmt.Fprintf(w, " %10d", sym.Size) + } + fmt.Fprintf(w, " %c %s", sym.Code, sym.Name) + if *printType && sym.Type != "" { + fmt.Fprintf(w, " %s", sym.Type) + } + fmt.Fprintf(w, "\n") + } } - if len(syms) == 0 { + + if !found { errorf("reading %s: no symbols", file) } - switch *sortOrder { - case "address": - sort.Slice(syms, func(i, j int) bool { return syms[i].Addr < syms[j].Addr }) - case "name": - sort.Slice(syms, func(i, j int) bool { return syms[i].Name < syms[j].Name }) - case "size": - sort.Slice(syms, func(i, j int) bool { return syms[i].Size > syms[j].Size }) - } - - w := bufio.NewWriter(os.Stdout) - for _, sym := range syms { - if filePrefix { - fmt.Fprintf(w, "%s:\t", file) - } - if sym.Code == 'U' { - fmt.Fprintf(w, "%8s", "") - } else { - fmt.Fprintf(w, "%8x", sym.Addr) - } - if *printSize { - fmt.Fprintf(w, " %10d", sym.Size) - } - fmt.Fprintf(w, " %c %s", sym.Code, sym.Name) - if *printType && sym.Type != "" { - fmt.Fprintf(w, " %s", sym.Type) - } - fmt.Fprintf(w, "\n") - } w.Flush() } diff --git a/src/cmd/nm/nm_cgo_test.go b/src/cmd/nm/nm_cgo_test.go index de16f77ecc0..1dfdf7f21ac 100644 --- a/src/cmd/nm/nm_cgo_test.go +++ b/src/cmd/nm/nm_cgo_test.go @@ -11,26 +11,30 @@ import ( "testing" ) -func TestInternalLinkerCgoFile(t *testing.T) { - if !canInternalLink() { - t.Skip("skipping; internal linking is not supported") - } - testGoFile(t, true, false) -} - func canInternalLink() bool { switch runtime.GOOS { case "dragonfly": return false case "linux": switch runtime.GOARCH { - case "arm64", "mips64", "mips64le", "mips", "mipsle": + case "arm64", "mips64", "mips64le", "mips", "mipsle", "ppc64", "ppc64le": return false } } return true } -func TestExternalLinkerCgoFile(t *testing.T) { - testGoFile(t, true, true) +func TestInternalLinkerCgoExec(t *testing.T) { + if !canInternalLink() { + t.Skip("skipping; internal linking is not supported") + } + testGoExec(t, true, false) +} + +func TestExternalLinkerCgoExec(t *testing.T) { + testGoExec(t, true, true) +} + +func TestCgoLib(t *testing.T) { + testGoLib(t, true) } diff --git a/src/cmd/nm/nm_test.go b/src/cmd/nm/nm_test.go index 170d87a6901..4be5d0e74e2 100644 --- a/src/cmd/nm/nm_test.go +++ b/src/cmd/nm/nm_test.go @@ -54,7 +54,7 @@ func testMain(m *testing.M) int { return m.Run() } -func TestNonGoFiles(t *testing.T) { +func TestNonGoExecs(t *testing.T) { testfiles := []string{ "elf/testdata/gcc-386-freebsd-exec", "elf/testdata/gcc-amd64-linux-exec", @@ -75,8 +75,8 @@ func TestNonGoFiles(t *testing.T) { } } -func testGoFile(t *testing.T, iscgo, isexternallinker bool) { - tmpdir, err := ioutil.TempDir("", "TestGoFile") +func testGoExec(t *testing.T, iscgo, isexternallinker bool) { + tmpdir, err := ioutil.TempDir("", "TestGoExec") if err != nil { t.Fatal(err) } @@ -87,12 +87,13 @@ func testGoFile(t *testing.T, iscgo, isexternallinker bool) { if err != nil { t.Fatal(err) } - err = template.Must(template.New("main").Parse(testprog)).Execute(file, iscgo) + err = template.Must(template.New("main").Parse(testexec)).Execute(file, iscgo) + if e := file.Close(); err == nil { + err = e + } if err != nil { - file.Close() t.Fatal(err) } - file.Close() exe := filepath.Join(tmpdir, "a.exe") args := []string{"build", "-o", exe} @@ -156,11 +157,124 @@ func testGoFile(t *testing.T, iscgo, isexternallinker bool) { } } -func TestGoFile(t *testing.T) { - testGoFile(t, false, false) +func TestGoExec(t *testing.T) { + testGoExec(t, false, false) } -const testprog = ` +func testGoLib(t *testing.T, iscgo bool) { + tmpdir, err := ioutil.TempDir("", "TestGoLib") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + gopath := filepath.Join(tmpdir, "gopath") + libpath := filepath.Join(gopath, "src", "mylib") + + err = os.MkdirAll(libpath, 0777) + if err != nil { + t.Fatal(err) + } + src := filepath.Join(libpath, "a.go") + file, err := os.Create(src) + if err != nil { + t.Fatal(err) + } + err = template.Must(template.New("mylib").Parse(testlib)).Execute(file, iscgo) + if e := file.Close(); err == nil { + err = e + } + if err != nil { + t.Fatal(err) + } + + args := []string{"install", "mylib"} + cmd := exec.Command(testenv.GoToolPath(t), args...) + cmd.Env = append(os.Environ(), "GOPATH="+gopath) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("building test lib failed: %s %s", err, out) + } + pat := filepath.Join(gopath, "pkg", "*", "mylib.a") + ms, err := filepath.Glob(pat) + if err != nil { + t.Fatal(err) + } + if len(ms) == 0 { + t.Fatalf("cannot found paths for pattern %s", pat) + } + mylib := ms[0] + + out, err = exec.Command(testnmpath, mylib).CombinedOutput() + if err != nil { + t.Fatalf("go tool nm: %v\n%s", err, string(out)) + } + type symType struct { + Type string + Name string + CSym bool + Found bool + } + var syms = []symType{ + {"B", "%22%22.Testdata", false, false}, + {"T", "%22%22.Testfunc", false, false}, + } + if iscgo { + syms = append(syms, symType{"B", "%22%22.TestCgodata", false, false}) + syms = append(syms, symType{"T", "%22%22.TestCgofunc", false, false}) + if runtime.GOOS == "darwin" || (runtime.GOOS == "windows" && runtime.GOARCH == "386") { + syms = append(syms, symType{"D", "_cgodata", true, false}) + syms = append(syms, symType{"T", "_cgofunc", true, false}) + } else { + syms = append(syms, symType{"D", "cgodata", true, false}) + syms = append(syms, symType{"T", "cgofunc", true, false}) + } + } + scanner := bufio.NewScanner(bytes.NewBuffer(out)) + for scanner.Scan() { + f := strings.Fields(scanner.Text()) + var typ, name string + var csym bool + if iscgo { + if len(f) < 4 { + continue + } + csym = !strings.Contains(f[0], "_go_.o") + typ = f[2] + name = f[3] + } else { + if len(f) < 3 { + continue + } + typ = f[1] + name = f[2] + } + for i := range syms { + sym := &syms[i] + if sym.Type == typ && sym.Name == name && sym.CSym == csym { + if sym.Found { + t.Fatalf("duplicate symbol %s %s", sym.Type, sym.Name) + } + sym.Found = true + } + } + } + err = scanner.Err() + if err != nil { + t.Fatalf("error reading nm output: %v", err) + } + for _, sym := range syms { + if !sym.Found { + t.Errorf("cannot found symbol %s %s", sym.Type, sym.Name) + } + } +} + +func TestGoLib(t *testing.T) { + testGoLib(t, false) +} + +const testexec = ` package main import "fmt" @@ -179,3 +293,23 @@ func testfunc() { fmt.Printf("testdata=%p\n", &testdata) } ` + +const testlib = ` +package mylib + +{{if .}} +// int cgodata = 5; +// void cgofunc(void) {} +import "C" + +var TestCgodata = C.cgodata + +func TestCgofunc() { + C.cgofunc() +} +{{end}} + +var Testdata uint32 + +func Testfunc() {} +` diff --git a/src/cmd/objdump/objdump_test.go b/src/cmd/objdump/objdump_test.go index 47e51df3392..a2ca3296090 100644 --- a/src/cmd/objdump/objdump_test.go +++ b/src/cmd/objdump/objdump_test.go @@ -63,7 +63,7 @@ var x86Need = []string{ } var armNeed = []string{ - //"B.LS main.main(SB)", // TODO(rsc): restore; golang.org/issue/9021 + "B main.main(SB)", "BL main.Println(SB)", "RET", } @@ -148,6 +148,13 @@ func testDisasm(t *testing.T, printCode bool, flags ...string) { ok = false } } + if goarch == "386" { + if strings.Contains(text, "(IP)") { + t.Errorf("disassembly contains PC-Relative addressing on 386") + ok = false + } + } + if !ok { t.Logf("full disassembly:\n%s", text) } @@ -155,8 +162,6 @@ func testDisasm(t *testing.T, printCode bool, flags ...string) { func TestDisasm(t *testing.T) { switch runtime.GOARCH { - case "arm64": - t.Skipf("skipping on %s, issue 10106", runtime.GOARCH) case "mips", "mipsle", "mips64", "mips64le": t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) case "s390x": @@ -167,8 +172,6 @@ func TestDisasm(t *testing.T) { func TestDisasmCode(t *testing.T) { switch runtime.GOARCH { - case "arm64": - t.Skipf("skipping on %s, issue 10106", runtime.GOARCH) case "mips", "mipsle", "mips64", "mips64le": t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) case "s390x": @@ -185,8 +188,6 @@ func TestDisasmExtld(t *testing.T) { switch runtime.GOARCH { case "ppc64": t.Skipf("skipping on %s, no support for external linking, issue 9038", runtime.GOARCH) - case "arm64": - t.Skipf("skipping on %s, issue 10106", runtime.GOARCH) case "mips64", "mips64le", "mips", "mipsle": t.Skipf("skipping on %s, issue 12559 and 12560", runtime.GOARCH) case "s390x": @@ -204,10 +205,6 @@ func TestDisasmExtld(t *testing.T) { func TestDisasmGoobj(t *testing.T) { switch runtime.GOARCH { - case "arm": - t.Skipf("skipping on %s, issue 19811", runtime.GOARCH) - case "arm64": - t.Skipf("skipping on %s, issue 10106", runtime.GOARCH) case "mips", "mipsle", "mips64", "mips64le": t.Skipf("skipping on %s, issue 12559", runtime.GOARCH) case "s390x": @@ -244,6 +241,12 @@ func TestDisasmGoobj(t *testing.T) { ok = false } } + if runtime.GOARCH == "386" { + if strings.Contains(text, "(IP)") { + t.Errorf("disassembly contains PC-Relative addressing on 386") + ok = false + } + } if !ok { t.Logf("full disassembly:\n%s", text) } diff --git a/src/cmd/pack/pack.go b/src/cmd/pack/pack.go index 1c168f946bd..3abc83e0900 100644 --- a/src/cmd/pack/pack.go +++ b/src/cmd/pack/pack.go @@ -426,8 +426,15 @@ func readPkgdef(file string) (data []byte, err error) { // Read from file, collecting header for __.PKGDEF. // The header is from the beginning of the file until a line // containing just "!". The first line must begin with "go object ". + // + // Note: It's possible for "\n!\n" to appear within the binary + // package export data format. To avoid truncating the package + // definition prematurely (issue 21703), we keep keep track of + // how many "$$" delimiters we've seen. + rbuf := bufio.NewReader(f) var wbuf bytes.Buffer + markers := 0 for { line, err := rbuf.ReadBytes('\n') if err != nil { @@ -436,9 +443,12 @@ func readPkgdef(file string) (data []byte, err error) { if wbuf.Len() == 0 && !bytes.HasPrefix(line, []byte("go object ")) { return nil, errors.New("not a Go object file") } - if bytes.Equal(line, []byte("!\n")) { + if markers%2 == 0 && bytes.Equal(line, []byte("!\n")) { break } + if bytes.HasPrefix(line, []byte("$$")) { + markers++ + } wbuf.Write(line) } return wbuf.Bytes(), nil diff --git a/src/cmd/pack/pack_test.go b/src/cmd/pack/pack_test.go index 79d9cde292a..b2217c090fd 100644 --- a/src/cmd/pack/pack_test.go +++ b/src/cmd/pack/pack_test.go @@ -295,6 +295,37 @@ func TestLargeDefs(t *testing.T) { } } +// Test that "\n!\n" inside export data doesn't result in a truncated +// package definition when creating a .a archive from a .o Go object. +func TestIssue21703(t *testing.T) { + testenv.MustHaveGoBuild(t) + + dir := tmpDir(t) + defer os.RemoveAll(dir) + + const aSrc = `package a; const X = "\n!\n"` + err := ioutil.WriteFile(filepath.Join(dir, "a.go"), []byte(aSrc), 0666) + if err != nil { + t.Fatal(err) + } + + const bSrc = `package b; import _ "a"` + err = ioutil.WriteFile(filepath.Join(dir, "b.go"), []byte(bSrc), 0666) + if err != nil { + t.Fatal(err) + } + + run := func(args ...string) string { + return doRun(t, dir, args...) + } + + goBin := testenv.GoToolPath(t) + run(goBin, "build", "cmd/pack") // writes pack binary to dir + run(goBin, "tool", "compile", "a.go") + run("./pack", "c", "a.a", "a.o") + run(goBin, "tool", "compile", "-I", ".", "b.go") +} + // doRun runs a program in a directory and returns the output. func doRun(t *testing.T, dir string, args ...string) string { cmd := exec.Command(args[0], args[1:]...) diff --git a/src/cmd/test2json/main.go b/src/cmd/test2json/main.go new file mode 100644 index 00000000000..7bdc867bbe5 --- /dev/null +++ b/src/cmd/test2json/main.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test2json converts go test output to a machine-readable JSON stream. +// +// Usage: +// +// go tool test2json [-p pkg] [-t] [./pkg.test -test.v] +// +// Test2json runs the given test command and converts its output to JSON; +// with no command specified, test2json expects test output on standard input. +// It writes a corresponding stream of JSON events to standard output. +// There is no unnecessary input or output buffering, so that +// the JSON stream can be read for “live updates” of test status. +// +// The -p flag sets the package reported in each test event. +// +// The -t flag requests that time stamps be added to each test event. +// +// Note that test2json is only intended for converting a single test +// binary's output. To convert the output of a "go test" command, +// use "go test -json" instead of invoking test2json directly. +// +// Output Format +// +// The JSON stream is a newline-separated sequence of TestEvent objects +// corresponding to the Go struct: +// +// type TestEvent struct { +// Time time.Time // encodes as an RFC3339-format string +// Action string +// Package string +// Test string +// Elapsed float64 // seconds +// Output string +// } +// +// The Time field holds the time the event happened. +// It is conventionally omitted for cached test results. +// +// The Action field is one of a fixed set of action descriptions: +// +// run - the test has started running +// pause - the test has been paused +// cont - the test has continued running +// pass - the test passed +// fail - the test failed +// output - the test printed output +// +// The Package field, if present, specifies the package being tested. +// When the go command runs parallel tests in -json mode, events from +// different tests are interlaced; the Package field allows readers to +// separate them. +// +// The Test field, if present, specifies the test or example, or benchmark +// function that caused the event. Events for the overall package test +// do not set Test. +// +// The Elapsed field is set for "pass" and "fail" events. It gives the time +// elapsed for the specific test or the overall package test that passed or failed. +// +// The Output field is set for Action == "output" and is a portion of the test's output +// (standard output and standard error merged together). The output is +// unmodified except that invalid UTF-8 output from a test is coerced +// into valid UTF-8 by use of replacement characters. With that one exception, +// the concatenation of the Output fields of all output events is the exact +// output of the test execution. +// +package main + +import ( + "flag" + "fmt" + "io" + "os" + "os/exec" + + "cmd/internal/test2json" +) + +var ( + flagP = flag.String("p", "", "report `pkg` as the package being tested in each event") + flagT = flag.Bool("t", false, "include timestamps in events") +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: go tool test2json [-p pkg] [-t] [./pkg.test -test.v]\n") + os.Exit(2) +} + +func main() { + flag.Usage = usage + flag.Parse() + + var mode test2json.Mode + if *flagT { + mode |= test2json.Timestamp + } + c := test2json.NewConverter(os.Stdout, *flagP, mode) + defer c.Close() + + if flag.NArg() == 0 { + io.Copy(c, os.Stdin) + } else { + args := flag.Args() + cmd := exec.Command(args[0], args[1:]...) + w := &countWriter{0, c} + cmd.Stdout = w + cmd.Stderr = w + if err := cmd.Run(); err != nil { + if w.n > 0 { + // Assume command printed why it failed. + } else { + fmt.Fprintf(c, "test2json: %v\n", err) + } + c.Close() + os.Exit(1) + } + } +} + +type countWriter struct { + n int64 + w io.Writer +} + +func (w *countWriter) Write(b []byte) (int, error) { + w.n += int64(len(b)) + return w.w.Write(b) +} diff --git a/src/cmd/trace/goroutines.go b/src/cmd/trace/goroutines.go index f5a4ddb0564..d0d428cbe2f 100644 --- a/src/cmd/trace/goroutines.go +++ b/src/cmd/trace/goroutines.go @@ -121,13 +121,16 @@ func httpGoroutine(w http.ResponseWriter, r *http.Request) { analyzeGoroutines(events) var glist gdescList for _, g := range gs { - if g.PC != pc || g.ExecTime == 0 { + if g.PC != pc { continue } glist = append(glist, g) } sort.Sort(glist) - err = templGoroutine.Execute(w, glist) + err = templGoroutine.Execute(w, struct { + PC uint64 + GList gdescList + }{pc, glist}) if err != nil { http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) return @@ -142,14 +145,14 @@ var templGoroutine = template.Must(template.New("").Parse(`
  • - - - - + + + + -{{range $}} +{{range .GList}} diff --git a/src/cmd/trace/main.go b/src/cmd/trace/main.go index 8ea0ba01615..32e16dfb476 100644 --- a/src/cmd/trace/main.go +++ b/src/cmd/trace/main.go @@ -42,6 +42,7 @@ Supported profile types are: Flags: -http=addr: HTTP service address (e.g., ':6060') -pprof=type: print a pprof-like profile instead + -d: print debug info such as parsed events Note that while the various profiles available when launching 'go tool trace' work on every browser, the trace viewer itself @@ -52,6 +53,7 @@ and is only actively tested on that browser. var ( httpFlag = flag.String("http", "localhost:0", "HTTP service address (e.g., ':6060')") pprofFlag = flag.String("pprof", "", "print a pprof-like profile instead") + debugFlag = flag.Bool("d", false, "print debug information such as parsed events list") // The binary file name, left here for serveSVGProfile. programBinary string @@ -77,7 +79,7 @@ func main() { flag.Usage() } - var pprofFunc func(io.Writer) error + var pprofFunc func(io.Writer, string) error switch *pprofFlag { case "net": pprofFunc = pprofIO @@ -89,7 +91,7 @@ func main() { pprofFunc = pprofSched } if pprofFunc != nil { - if err := pprofFunc(os.Stdout); err != nil { + if err := pprofFunc(os.Stdout, ""); err != nil { dief("failed to generate pprof: %v\n", err) } os.Exit(0) @@ -103,13 +105,18 @@ func main() { dief("failed to create server socket: %v\n", err) } - log.Printf("Parsing trace...") + log.Print("Parsing trace...") events, err := parseEvents() if err != nil { dief("%v\n", err) } - log.Printf("Serializing trace...") + if *debugFlag { + trace.Print(events) + os.Exit(0) + } + + log.Print("Serializing trace...") params := &traceParams{ events: events, endTime: int64(1<<63 - 1), @@ -119,13 +126,12 @@ func main() { dief("%v\n", err) } - log.Printf("Splitting trace...") + log.Print("Splitting trace...") ranges = splitTrace(data) - log.Printf("Opening browser") - if !browser.Open("http://" + ln.Addr().String()) { - fmt.Fprintf(os.Stderr, "Trace viewer is listening on http://%s\n", ln.Addr().String()) - } + addr := "http://" + ln.Addr().String() + log.Printf("Opening browser. Trace viewer is listening on %s", addr) + browser.Open(addr) // Start http server. http.HandleFunc("/", httpMain) @@ -181,10 +187,10 @@ var templMain = template.Must(template.New("").Parse(` View trace
    {{end}} Goroutine analysis
    -Network blocking profile
    -Synchronization blocking profile
    -Syscall blocking profile
    -Scheduler latency profile
    +Network blocking profile ()
    +Synchronization blocking profile ()
    +Syscall blocking profile ()
    +Scheduler latency profile ()
    `)) diff --git a/src/cmd/trace/pprof.go b/src/cmd/trace/pprof.go index 40803ac5f99..cac36e80104 100644 --- a/src/cmd/trace/pprof.go +++ b/src/cmd/trace/pprof.go @@ -15,10 +15,25 @@ import ( "net/http" "os" "os/exec" + "path/filepath" + "runtime" + "strconv" "github.com/google/pprof/profile" ) +func goCmd() string { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix) + if _, err := os.Stat(path); err == nil { + return path + } + return "go" +} + func init() { http.HandleFunc("/io", serveSVGProfile(pprofIO)) http.HandleFunc("/block", serveSVGProfile(pprofBlock)) @@ -33,17 +48,54 @@ type Record struct { time int64 } -// pprofIO generates IO pprof-like profile (time spent in IO wait). -func pprofIO(w io.Writer) error { +// pprofMatchingGoroutines parses the goroutine type id string (i.e. pc) +// and returns the ids of goroutines of the matching type. +// If the id string is empty, returns nil without an error. +func pprofMatchingGoroutines(id string, events []*trace.Event) (map[uint64]bool, error) { + if id == "" { + return nil, nil + } + pc, err := strconv.ParseUint(id, 10, 64) // id is string + if err != nil { + return nil, fmt.Errorf("invalid goroutine type: %v", id) + } + analyzeGoroutines(events) + var res map[uint64]bool + for _, g := range gs { + if g.PC != pc { + continue + } + if res == nil { + res = make(map[uint64]bool) + } + res[g.ID] = true + } + if len(res) == 0 && id != "" { + return nil, fmt.Errorf("failed to find matching goroutines for id: %s", id) + } + return res, nil +} + +// pprofIO generates IO pprof-like profile (time spent in IO wait, +// currently only network blocking event). +func pprofIO(w io.Writer, id string) error { events, err := parseEvents() if err != nil { return err } + goroutines, err := pprofMatchingGoroutines(id, events) + if err != nil { + return err + } + prof := make(map[uint64]Record) for _, ev := range events { if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { continue } + if goroutines != nil && !goroutines[ev.G] { + continue + } rec := prof[ev.StkID] rec.stk = ev.Stk rec.n++ @@ -54,22 +106,33 @@ func pprofIO(w io.Writer) error { } // pprofBlock generates blocking pprof-like profile (time spent blocked on synchronization primitives). -func pprofBlock(w io.Writer) error { +func pprofBlock(w io.Writer, id string) error { events, err := parseEvents() if err != nil { return err } + goroutines, err := pprofMatchingGoroutines(id, events) + if err != nil { + return err + } + prof := make(map[uint64]Record) for _, ev := range events { switch ev.Type { case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect, trace.EvGoBlockSync, trace.EvGoBlockCond, trace.EvGoBlockGC: + // TODO(hyangah): figure out why EvGoBlockGC should be here. + // EvGoBlockGC indicates the goroutine blocks on GC assist, not + // on synchronization primitives. default: continue } if ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { continue } + if goroutines != nil && !goroutines[ev.G] { + continue + } rec := prof[ev.StkID] rec.stk = ev.Stk rec.n++ @@ -80,16 +143,25 @@ func pprofBlock(w io.Writer) error { } // pprofSyscall generates syscall pprof-like profile (time spent blocked in syscalls). -func pprofSyscall(w io.Writer) error { +func pprofSyscall(w io.Writer, id string) error { + events, err := parseEvents() if err != nil { return err } + goroutines, err := pprofMatchingGoroutines(id, events) + if err != nil { + return err + } + prof := make(map[uint64]Record) for _, ev := range events { if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { continue } + if goroutines != nil && !goroutines[ev.G] { + continue + } rec := prof[ev.StkID] rec.stk = ev.Stk rec.n++ @@ -101,17 +173,25 @@ func pprofSyscall(w io.Writer) error { // pprofSched generates scheduler latency pprof-like profile // (time between a goroutine become runnable and actually scheduled for execution). -func pprofSched(w io.Writer) error { +func pprofSched(w io.Writer, id string) error { events, err := parseEvents() if err != nil { return err } + goroutines, err := pprofMatchingGoroutines(id, events) + if err != nil { + return err + } + prof := make(map[uint64]Record) for _, ev := range events { if (ev.Type != trace.EvGoUnblock && ev.Type != trace.EvGoCreate) || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { continue } + if goroutines != nil && !goroutines[ev.G] { + continue + } rec := prof[ev.StkID] rec.stk = ev.Stk rec.n++ @@ -122,8 +202,20 @@ func pprofSched(w io.Writer) error { } // serveSVGProfile serves pprof-like profile generated by prof as svg. -func serveSVGProfile(prof func(w io.Writer) error) http.HandlerFunc { +func serveSVGProfile(prof func(w io.Writer, id string) error) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { + + if r.FormValue("raw") != "" { + w.Header().Set("Content-Type", "application/octet-stream") + if err := prof(w, r.FormValue("id")); err != nil { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("X-Go-Pprof", "1") + http.Error(w, fmt.Sprintf("failed to get profile: %v", err), http.StatusInternalServerError) + return + } + return + } + blockf, err := ioutil.TempFile("", "block") if err != nil { http.Error(w, fmt.Sprintf("failed to create temp file: %v", err), http.StatusInternalServerError) @@ -134,7 +226,7 @@ func serveSVGProfile(prof func(w io.Writer) error) http.HandlerFunc { os.Remove(blockf.Name()) }() blockb := bufio.NewWriter(blockf) - if err := prof(blockb); err != nil { + if err := prof(blockb, r.FormValue("id")); err != nil { http.Error(w, fmt.Sprintf("failed to generate profile: %v", err), http.StatusInternalServerError) return } @@ -147,7 +239,7 @@ func serveSVGProfile(prof func(w io.Writer) error) http.HandlerFunc { return } svgFilename := blockf.Name() + ".svg" - if output, err := exec.Command("go", "tool", "pprof", "-svg", "-output", svgFilename, blockf.Name()).CombinedOutput(); err != nil { + if output, err := exec.Command(goCmd(), "tool", "pprof", "-svg", "-output", svgFilename, blockf.Name()).CombinedOutput(); err != nil { http.Error(w, fmt.Sprintf("failed to execute go tool pprof: %v\n%s", err, output), http.StatusInternalServerError) return } diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go index 7db2188861f..45d14f873d4 100644 --- a/src/cmd/trace/trace.go +++ b/src/cmd/trace/trace.go @@ -42,12 +42,30 @@ func httpTrace(w http.ResponseWriter, r *http.Request) { // See https://github.com/catapult-project/catapult/blob/master/tracing/docs/embedding-trace-viewer.md // This is almost verbatim copy of: // https://github.com/catapult-project/catapult/blob/master/tracing/bin/index.html -// on revision 623a005a3ffa9de13c4b92bc72290e7bcd1ca591. +// on revision 5f9e4c3eaa555bdef18218a89f38c768303b7b6e. var templTrace = ` + + + +{{end}} + +{{define "script"}} + +{{end}} + +{{define "top" -}} + + + + +{{.Title}} +{{template "css" .}} + + + + +{{template "header" .}} + +
    +
    linux mips64le
    linux s390x
    netbsd 386
    Notes

    FreeBSD 9.3 or later amd64, 386 Debian GNU/kFreeBSD not supported
    FreeBSD 10.3 or later amd64, 386 Debian GNU/kFreeBSD not supported
    Linux 2.6.23 or later with glibc amd64, 386, arm, arm64,
    s390x, ppc64le
    CentOS/RHEL 5.x not supported.
    Install from source for other libc.
    macOS 10.8 or later amd64 use the clang or gcc that comes with Xcode for cgo support
    Windows XP SP2 or later amd64, 386 use MinGW gcc. No need for cygwin or msys.
    ") + if class == "" { + w.WriteString("") + } else { + w.WriteString("") + } w.WriteString("

    " + title + "

    ") w.WriteString(html) w.WriteString("
    Goroutine Total time, ns Execution time, ns Network wait time, ns Sync block time, ns Blocking syscall time, ns Scheduler wait time, ns Network wait time, ns Sync block time, ns Blocking syscall time, ns Scheduler wait time, ns GC sweeping time, ns GC pause time, ns
    {{.ID}} {{.TotalTime}}
    + + + + +
    Flat +Flat% +Sum% +Cum +Cum% +Name +Inlined?
    + + +{{template "script" .}} + + + +{{end}} + +{{define "sourcelisting" -}} + + + + +{{.Title}} +{{template "css" .}} +{{template "weblistcss" .}} +{{template "weblistjs" .}} + + + +{{template "header" .}} + +
    +{{.HTMLBody}} +
    + +{{template "script" .}} + + + +{{end}} + +{{define "plaintext" -}} + + + + +{{.Title}} +{{template "css" .}} + + + +{{template "header" .}} + +
    +
    +{{.TextBody}}
    +
    +
    + +{{template "script" .}} + + + +{{end}} +`)) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go new file mode 100644 index 00000000000..67ae2628826 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go @@ -0,0 +1,393 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "html/template" + "net" + "net/http" + gourl "net/url" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/google/pprof/internal/graph" + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/internal/report" + "github.com/google/pprof/profile" +) + +// webInterface holds the state needed for serving a browser based interface. +type webInterface struct { + prof *profile.Profile + options *plugin.Options + help map[string]string + templates *template.Template +} + +func makeWebInterface(p *profile.Profile, opt *plugin.Options) *webInterface { + templates := template.New("templategroup") + addTemplates(templates) + report.AddSourceTemplates(templates) + return &webInterface{ + prof: p, + options: opt, + help: make(map[string]string), + templates: templates, + } +} + +// maxEntries is the maximum number of entries to print for text interfaces. +const maxEntries = 50 + +// errorCatcher is a UI that captures errors for reporting to the browser. +type errorCatcher struct { + plugin.UI + errors []string +} + +func (ec *errorCatcher) PrintErr(args ...interface{}) { + ec.errors = append(ec.errors, strings.TrimSuffix(fmt.Sprintln(args...), "\n")) + ec.UI.PrintErr(args...) +} + +// webArgs contains arguments passed to templates in webhtml.go. +type webArgs struct { + BaseURL string + Title string + Errors []string + Total int64 + Legend []string + Help map[string]string + Nodes []string + HTMLBody template.HTML + TextBody string + Top []report.TextItem +} + +func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options) error { + host, portStr, err := net.SplitHostPort(hostport) + if err != nil { + return fmt.Errorf("could not split http address: %v", err) + } + port, err := strconv.Atoi(portStr) + if err != nil { + return fmt.Errorf("invalid port number: %v", err) + } + if host == "" { + host = "localhost" + } + + interactiveMode = true + ui := makeWebInterface(p, o) + for n, c := range pprofCommands { + ui.help[n] = c.description + } + for n, v := range pprofVariables { + ui.help[n] = v.help + } + ui.help["details"] = "Show information about the profile and this view" + ui.help["graph"] = "Display profile as a directed graph" + ui.help["reset"] = "Show the entire profile" + + server := o.HTTPServer + if server == nil { + server = defaultWebServer + } + args := &plugin.HTTPServerArgs{ + Hostport: net.JoinHostPort(host, portStr), + Host: host, + Port: port, + Handlers: map[string]http.Handler{ + "/": http.HandlerFunc(ui.dot), + "/top": http.HandlerFunc(ui.top), + "/disasm": http.HandlerFunc(ui.disasm), + "/source": http.HandlerFunc(ui.source), + "/peek": http.HandlerFunc(ui.peek), + }, + } + + go openBrowser("http://"+args.Hostport, o) + return server(args) +} + +func defaultWebServer(args *plugin.HTTPServerArgs) error { + ln, err := net.Listen("tcp", args.Hostport) + if err != nil { + return err + } + isLocal := isLocalhost(args.Host) + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if isLocal { + // Only allow local clients + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil || !isLocalhost(host) { + http.Error(w, "permission denied", http.StatusForbidden) + return + } + } + h := args.Handlers[req.URL.Path] + if h == nil { + // Fall back to default behavior + h = http.DefaultServeMux + } + h.ServeHTTP(w, req) + }) + s := &http.Server{Handler: handler} + return s.Serve(ln) +} + +func isLocalhost(host string) bool { + for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1"} { + if host == v { + return true + } + } + return false +} + +func openBrowser(url string, o *plugin.Options) { + // Construct URL. + u, _ := gourl.Parse(url) + q := u.Query() + for _, p := range []struct{ param, key string }{ + {"f", "focus"}, + {"s", "show"}, + {"i", "ignore"}, + {"h", "hide"}, + } { + if v := pprofVariables[p.key].value; v != "" { + q.Set(p.param, v) + } + } + u.RawQuery = q.Encode() + + // Give server a little time to get ready. + time.Sleep(time.Millisecond * 500) + + for _, b := range browsers() { + args := strings.Split(b, " ") + if len(args) == 0 { + continue + } + viewer := exec.Command(args[0], append(args[1:], u.String())...) + viewer.Stderr = os.Stderr + if err := viewer.Start(); err == nil { + return + } + } + // No visualizer succeeded, so just print URL. + o.UI.PrintErr(u.String()) +} + +func varsFromURL(u *gourl.URL) variables { + vars := pprofVariables.makeCopy() + vars["focus"].value = u.Query().Get("f") + vars["show"].value = u.Query().Get("s") + vars["ignore"].value = u.Query().Get("i") + vars["hide"].value = u.Query().Get("h") + return vars +} + +// makeReport generates a report for the specified command. +func (ui *webInterface) makeReport(w http.ResponseWriter, req *http.Request, + cmd []string, vars ...string) (*report.Report, []string) { + v := varsFromURL(req.URL) + for i := 0; i+1 < len(vars); i += 2 { + v[vars[i]].value = vars[i+1] + } + catcher := &errorCatcher{UI: ui.options.UI} + options := *ui.options + options.UI = catcher + _, rpt, err := generateRawReport(ui.prof, cmd, v, &options) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return nil, nil + } + return rpt, catcher.errors +} + +// render generates html using the named template based on the contents of data. +func (ui *webInterface) render(w http.ResponseWriter, baseURL, tmpl string, + rpt *report.Report, errList, legend []string, data webArgs) { + file := getFromLegend(legend, "File: ", "unknown") + profile := getFromLegend(legend, "Type: ", "unknown") + data.BaseURL = baseURL + data.Title = file + " " + profile + data.Errors = errList + data.Total = rpt.Total() + data.Legend = legend + data.Help = ui.help + html := &bytes.Buffer{} + if err := ui.templates.ExecuteTemplate(html, tmpl, data); err != nil { + http.Error(w, "internal template error", http.StatusInternalServerError) + ui.options.UI.PrintErr(err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Write(html.Bytes()) +} + +// dot generates a web page containing an svg diagram. +func (ui *webInterface) dot(w http.ResponseWriter, req *http.Request) { + rpt, errList := ui.makeReport(w, req, []string{"svg"}) + if rpt == nil { + return // error already reported + } + + // Generate dot graph. + g, config := report.GetDOT(rpt) + legend := config.Labels + config.Labels = nil + dot := &bytes.Buffer{} + graph.ComposeDot(dot, g, &graph.DotAttributes{}, config) + + // Convert to svg. + svg, err := dotToSvg(dot.Bytes()) + if err != nil { + http.Error(w, "Could not execute dot; may need to install graphviz.", + http.StatusNotImplemented) + ui.options.UI.PrintErr("Failed to execute dot. Is Graphviz installed?\n", err) + return + } + + // Get all node names into an array. + nodes := []string{""} // dot starts with node numbered 1 + for _, n := range g.Nodes { + nodes = append(nodes, n.Info.Name) + } + + ui.render(w, "/", "graph", rpt, errList, legend, webArgs{ + HTMLBody: template.HTML(string(svg)), + Nodes: nodes, + }) +} + +func dotToSvg(dot []byte) ([]byte, error) { + cmd := exec.Command("dot", "-Tsvg") + out := &bytes.Buffer{} + cmd.Stdin, cmd.Stdout, cmd.Stderr = bytes.NewBuffer(dot), out, os.Stderr + if err := cmd.Run(); err != nil { + return nil, err + } + + // Fix dot bug related to unquoted amperands. + svg := bytes.Replace(out.Bytes(), []byte("&;"), []byte("&;"), -1) + + // Cleanup for embedding by dropping stuff before the start. + if pos := bytes.Index(svg, []byte("= 0 { + svg = svg[pos:] + } + return svg, nil +} + +func (ui *webInterface) top(w http.ResponseWriter, req *http.Request) { + rpt, errList := ui.makeReport(w, req, []string{"top"}, "nodecount", "500") + if rpt == nil { + return // error already reported + } + top, legend := report.TextItems(rpt) + var nodes []string + for _, item := range top { + nodes = append(nodes, item.Name) + } + + ui.render(w, "/top", "top", rpt, errList, legend, webArgs{ + Top: top, + Nodes: nodes, + }) +} + +// disasm generates a web page containing disassembly. +func (ui *webInterface) disasm(w http.ResponseWriter, req *http.Request) { + args := []string{"disasm", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args) + if rpt == nil { + return // error already reported + } + + out := &bytes.Buffer{} + if err := report.PrintAssembly(out, rpt, ui.options.Obj, maxEntries); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, "/disasm", "plaintext", rpt, errList, legend, webArgs{ + TextBody: out.String(), + }) + +} + +// source generates a web page containing source code annotated with profile +// data. +func (ui *webInterface) source(w http.ResponseWriter, req *http.Request) { + args := []string{"weblist", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args) + if rpt == nil { + return // error already reported + } + + // Generate source listing. + var body bytes.Buffer + if err := report.PrintWebList(&body, rpt, ui.options.Obj, maxEntries); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, "/source", "sourcelisting", rpt, errList, legend, webArgs{ + HTMLBody: template.HTML(body.String()), + }) +} + +// peek generates a web page listing callers/callers. +func (ui *webInterface) peek(w http.ResponseWriter, req *http.Request) { + args := []string{"peek", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, "lines", "t") + if rpt == nil { + return // error already reported + } + + out := &bytes.Buffer{} + if err := report.Generate(out, rpt, ui.options.Obj); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, "/peek", "plaintext", rpt, errList, legend, webArgs{ + TextBody: out.String(), + }) +} + +// getFromLegend returns the suffix of an entry in legend that starts +// with param. It returns def if no such entry is found. +func getFromLegend(legend []string, param, def string) string { + for _, s := range legend { + if strings.HasPrefix(s, param) { + return s[len(param):] + } + } + return def +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui_test.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui_test.go new file mode 100644 index 00000000000..96380a01b39 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui_test.go @@ -0,0 +1,243 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "regexp" + "sync" + "testing" + "time" + + "runtime" + + "github.com/google/pprof/internal/plugin" + "github.com/google/pprof/profile" +) + +func TestWebInterface(t *testing.T) { + // This test starts a web browser in a background goroutine + // after a 500ms delay. Sometimes the test exits before it + // can run the browser, but sometimes the browser does open. + // That's obviously unacceptable. + defer time.Sleep(2 * time.Second) // to see the browser open + t.Skip("golang.org/issue/22651") + + if runtime.GOOS == "nacl" { + t.Skip("test assumes tcp available") + } + + prof := makeFakeProfile() + + // Custom http server creator + var server *httptest.Server + serverCreated := make(chan bool) + creator := func(a *plugin.HTTPServerArgs) error { + server = httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if h := a.Handlers[r.URL.Path]; h != nil { + h.ServeHTTP(w, r) + } + })) + serverCreated <- true + return nil + } + + // Start server and wait for it to be initialized + go serveWebInterface("unused:1234", prof, &plugin.Options{ + Obj: fakeObjTool{}, + UI: &stdUI{}, + HTTPServer: creator, + }) + <-serverCreated + defer server.Close() + + haveDot := false + if _, err := exec.LookPath("dot"); err == nil { + haveDot = true + } + + type testCase struct { + path string + want []string + needDot bool + } + testcases := []testCase{ + {"/", []string{"F1", "F2", "F3", "testbin", "cpu"}, true}, + {"/top", []string{`"Name":"F2","InlineLabel":"","Flat":200,"Cum":300,"FlatFormat":"200ms","CumFormat":"300ms"}`}, false}, + {"/source?f=" + url.QueryEscape("F[12]"), + []string{"F1", "F2", "300ms +line1"}, false}, + {"/peek?f=" + url.QueryEscape("F[12]"), + []string{"300ms.*F1", "200ms.*300ms.*F2"}, false}, + {"/disasm?f=" + url.QueryEscape("F[12]"), + []string{"f1:asm", "f2:asm"}, false}, + } + for _, c := range testcases { + if c.needDot && !haveDot { + t.Log("skipping", c.path, "since dot (graphviz) does not seem to be installed") + continue + } + + res, err := http.Get(server.URL + c.path) + if err != nil { + t.Error("could not fetch", c.path, err) + continue + } + data, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Error("could not read response", c.path, err) + continue + } + result := string(data) + for _, w := range c.want { + if match, _ := regexp.MatchString(w, result); !match { + t.Errorf("response for %s does not match "+ + "expected pattern '%s'; "+ + "actual result:\n%s", c.path, w, result) + } + } + } + + // Also fetch all the test case URLs in parallel to test thread + // safety when run under the race detector. + var wg sync.WaitGroup + for _, c := range testcases { + if c.needDot && !haveDot { + continue + } + path := server.URL + c.path + for count := 0; count < 2; count++ { + wg.Add(1) + go func() { + http.Get(path) + wg.Done() + }() + } + } + wg.Wait() + + time.Sleep(5 * time.Second) +} + +// Implement fake object file support. + +const addrBase = 0x1000 +const fakeSource = "testdata/file1000.src" + +type fakeObj struct{} + +func (f fakeObj) Close() error { return nil } +func (f fakeObj) Name() string { return "testbin" } +func (f fakeObj) Base() uint64 { return 0 } +func (f fakeObj) BuildID() string { return "" } +func (f fakeObj) SourceLine(addr uint64) ([]plugin.Frame, error) { + return nil, fmt.Errorf("SourceLine unimplemented") +} +func (f fakeObj) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + return []*plugin.Sym{ + {[]string{"F1"}, fakeSource, addrBase, addrBase + 10}, + {[]string{"F2"}, fakeSource, addrBase + 10, addrBase + 20}, + {[]string{"F3"}, fakeSource, addrBase + 20, addrBase + 30}, + }, nil +} + +type fakeObjTool struct{} + +func (obj fakeObjTool) Open(file string, start, limit, offset uint64) (plugin.ObjFile, error) { + return fakeObj{}, nil +} + +func (obj fakeObjTool) Disasm(file string, start, end uint64) ([]plugin.Inst, error) { + return []plugin.Inst{ + {Addr: addrBase + 0, Text: "f1:asm", Function: "F1"}, + {Addr: addrBase + 10, Text: "f2:asm", Function: "F2"}, + {Addr: addrBase + 20, Text: "d3:asm", Function: "F3"}, + }, nil +} + +func makeFakeProfile() *profile.Profile { + // Three functions: F1, F2, F3 with three lines, 11, 22, 33. + funcs := []*profile.Function{ + {ID: 1, Name: "F1", Filename: fakeSource, StartLine: 3}, + {ID: 2, Name: "F2", Filename: fakeSource, StartLine: 5}, + {ID: 3, Name: "F3", Filename: fakeSource, StartLine: 7}, + } + lines := []profile.Line{ + {Function: funcs[0], Line: 11}, + {Function: funcs[1], Line: 22}, + {Function: funcs[2], Line: 33}, + } + mapping := []*profile.Mapping{ + { + ID: 1, + Start: addrBase, + Limit: addrBase + 10, + Offset: 0, + File: "testbin", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + }, + } + + // Three interesting addresses: base+{10,20,30} + locs := []*profile.Location{ + {ID: 1, Address: addrBase + 10, Line: lines[0:1], Mapping: mapping[0]}, + {ID: 2, Address: addrBase + 20, Line: lines[1:2], Mapping: mapping[0]}, + {ID: 3, Address: addrBase + 30, Line: lines[2:3], Mapping: mapping[0]}, + } + + // Two stack traces. + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{locs[2], locs[1], locs[0]}, + Value: []int64{100}, + }, + { + Location: []*profile.Location{locs[1], locs[0]}, + Value: []int64{200}, + }, + }, + Location: locs, + Function: funcs, + Mapping: mapping, + } +} + +func TestIsLocalHost(t *testing.T) { + for _, s := range []string{"localhost:10000", "[::1]:10000", "127.0.0.1:10000"} { + host, _, err := net.SplitHostPort(s) + if err != nil { + t.Error("unexpected error when splitting", s) + continue + } + if !isLocalhost(host) { + t.Errorf("host %s from %s not considered local", host, s) + } + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go index c46272e8fca..9b238c5b87e 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec.go @@ -131,7 +131,7 @@ func GetBuildID(binary io.ReaderAt) ([]byte, error) { if buildID == nil { buildID = note.Desc } else { - return nil, fmt.Errorf("multiple build ids found, don't know which to use!") + return nil, fmt.Errorf("multiple build ids found, don't know which to use") } } } @@ -240,17 +240,22 @@ func GetBase(fh *elf.FileHeader, loadSegment *elf.ProgHeader, stextOffset *uint6 } return start, nil case elf.ET_DYN: - if offset != 0 { - if loadSegment == nil || loadSegment.Vaddr == 0 { - return start - offset, nil - } - return 0, fmt.Errorf("Don't know how to handle mapping. Offset=%x, vaddr=%x", - offset, loadSegment.Vaddr) - } + // The process mapping information, start = start of virtual address range, + // and offset = offset in the executable file of the start address, tells us + // that a runtime virtual address x maps to a file offset + // fx = x - start + offset. if loadSegment == nil { - return start, nil + return start - offset, nil } - return start - loadSegment.Vaddr, nil + // The program header, if not nil, indicates the offset in the file where + // the executable segment is located (loadSegment.Off), and the base virtual + // address where the first byte of the segment is loaded + // (loadSegment.Vaddr). A file offset fx maps to a virtual (symbol) address + // sx = fx - loadSegment.Off + loadSegment.Vaddr. + // + // Thus, a runtime virtual address x maps to a symbol address + // sx = x - start + offset - loadSegment.Off + loadSegment.Vaddr. + return start - offset + loadSegment.Off - loadSegment.Vaddr, nil } return 0, fmt.Errorf("Don't know how to handle FileHeader.Type %v", fh.Type) } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec_test.go b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec_test.go index b9f2a841a6d..c6b8fe4c22e 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/elfexec/elfexec_test.go @@ -62,8 +62,9 @@ func TestGetBase(t *testing.T) { {"exec chromeos kernel 4", fhExec, kernelHeader, uint64p(0xffffffff81200198), 0x198, 0x100000, 0, 0x7ee00000, false}, {"exec chromeos kernel unremapped", fhExec, kernelHeader, uint64p(0xffffffff810001c8), 0xffffffff834001c8, 0xffffffffc0000000, 0xffffffff834001c8, 0x2400000, false}, {"dyn", fhDyn, nil, nil, 0x200000, 0x300000, 0, 0x200000, false}, - {"dyn offset", fhDyn, lsOffset, nil, 0x0, 0x300000, 0, 0xFFFFFFFFFFC00000, false}, + {"dyn map", fhDyn, lsOffset, nil, 0x0, 0x300000, 0, 0xFFFFFFFFFFE00000, false}, {"dyn nomap", fhDyn, nil, nil, 0x0, 0x0, 0, 0, false}, + {"dyn map+offset", fhDyn, lsOffset, nil, 0x900000, 0xa00000, 0x200000, 0x500000, false}, {"rel", fhRel, nil, nil, 0x2000000, 0x3000000, 0, 0x2000000, false}, {"rel nomap", fhRel, nil, nil, 0x0, ^uint64(0), 0, 0, false}, {"rel offset", fhRel, nil, nil, 0x100000, 0x200000, 0x1, 0, true}, diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go index c99e8992ded..4e5d12f6cdb 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph.go @@ -42,15 +42,17 @@ type DotNodeAttributes struct { // DotConfig contains attributes about how a graph should be // constructed and how it should look. type DotConfig struct { - Title string // The title of the DOT graph - Labels []string // The labels for the DOT's legend + Title string // The title of the DOT graph + LegendURL string // The URL to link to from the legend. + Labels []string // The labels for the DOT's legend - FormatValue func(int64) string // A formatting function for values - FormatTag func(int64, string) string // A formatting function for numeric tags - Total int64 // The total weight of the graph, used to compute percentages + FormatValue func(int64) string // A formatting function for values + Total int64 // The total weight of the graph, used to compute percentages } -// Compose creates and writes a in the DOT format to the writer, using +const maxNodelets = 4 // Number of nodelets for labels (both numeric and non) + +// ComposeDot creates and writes a in the DOT format to the writer, using // the configurations given. func ComposeDot(w io.Writer, g *Graph, a *DotAttributes, c *DotConfig) { builder := &builder{w, a, c} @@ -120,11 +122,19 @@ func (b *builder) finish() { // addLegend generates a legend in DOT format. func (b *builder) addLegend() { labels := b.config.Labels - var title string - if len(labels) > 0 { - title = labels[0] + if len(labels) == 0 { + return } - fmt.Fprintf(b, `subgraph cluster_L { "%s" [shape=box fontsize=16 label="%s\l"] }`+"\n", title, strings.Join(labels, `\l`)) + title := labels[0] + fmt.Fprintf(b, `subgraph cluster_L { "%s" [shape=box fontsize=16`, title) + fmt.Fprintf(b, ` label="%s\l"`, strings.Join(labels, `\l`)) + if b.config.LegendURL != "" { + fmt.Fprintf(b, ` URL="%s" target="_blank"`, b.config.LegendURL) + } + if b.config.Title != "" { + fmt.Fprintf(b, ` tooltip="%s"`, b.config.Title) + } + fmt.Fprintf(b, "] }\n") } // addNode generates a graph node in DOT format. @@ -176,8 +186,8 @@ func (b *builder) addNode(node *Node, nodeID int, maxFlat float64) { } // Create DOT attribute for node. - attr := fmt.Sprintf(`label="%s" fontsize=%d shape=%s tooltip="%s (%s)" color="%s" fillcolor="%s"`, - label, fontSize, shape, node.Info.PrintableName(), cumValue, + attr := fmt.Sprintf(`label="%s" id="node%d" fontsize=%d shape=%s tooltip="%s (%s)" color="%s" fillcolor="%s"`, + label, nodeID, fontSize, shape, node.Info.PrintableName(), cumValue, dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), false), dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), true)) @@ -204,13 +214,11 @@ func (b *builder) addNode(node *Node, nodeID int, maxFlat float64) { // addNodelets generates the DOT boxes for the node tags if they exist. func (b *builder) addNodelets(node *Node, nodeID int) bool { - const maxNodelets = 4 // Number of nodelets for alphanumeric labels - const maxNumNodelets = 4 // Number of nodelets for numeric labels var nodelets string // Populate two Tag slices, one for LabelTags and one for NumericTags. var ts []*Tag - lnts := make(map[string][]*Tag, 0) + lnts := make(map[string][]*Tag) for _, t := range node.LabelTags { ts = append(ts, t) } @@ -239,15 +247,15 @@ func (b *builder) addNodelets(node *Node, nodeID int) bool { continue } weight := b.config.FormatValue(w) - nodelets += fmt.Sprintf(`N%d_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", nodeID, i, t.Name, weight) + nodelets += fmt.Sprintf(`N%d_%d [label = "%s" id="N%d_%d" fontsize=8 shape=box3d tooltip="%s"]`+"\n", nodeID, i, t.Name, nodeID, i, weight) nodelets += fmt.Sprintf(`N%d -> N%d_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"]`+"\n", nodeID, nodeID, i, weight, weight, weight) if nts := lnts[t.Name]; nts != nil { - nodelets += b.numericNodelets(nts, maxNumNodelets, flatTags, fmt.Sprintf(`N%d_%d`, nodeID, i)) + nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d_%d`, nodeID, i)) } } if nts := lnts[""]; nts != nil { - nodelets += b.numericNodelets(nts, maxNumNodelets, flatTags, fmt.Sprintf(`N%d`, nodeID)) + nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d`, nodeID)) } fmt.Fprint(b, nodelets) @@ -266,7 +274,7 @@ func (b *builder) numericNodelets(nts []*Tag, maxNumNodelets int, flatTags bool, } if w != 0 { weight := b.config.FormatValue(w) - nodelets += fmt.Sprintf(`N%s_%d [label = "%s" fontsize=8 shape=box3d tooltip="%s"]`+"\n", source, j, t.Name, weight) + nodelets += fmt.Sprintf(`N%s_%d [label = "%s" id="N%s_%d" fontsize=8 shape=box3d tooltip="%s"]`+"\n", source, j, t.Name, source, j, weight) nodelets += fmt.Sprintf(`%s -> N%s_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"%s]`+"\n", source, source, j, weight, weight, weight, attr) } } @@ -441,14 +449,9 @@ func tagDistance(t, u *Tag) float64 { } func (b *builder) tagGroupLabel(g []*Tag) (label string, flat, cum int64) { - formatTag := b.config.FormatTag - if formatTag == nil { - formatTag = measurement.Label - } - if len(g) == 1 { t := g[0] - return formatTag(t.Value, t.Unit), t.FlatValue(), t.CumValue() + return measurement.Label(t.Value, t.Unit), t.FlatValue(), t.CumValue() } min := g[0] max := g[0] @@ -472,7 +475,11 @@ func (b *builder) tagGroupLabel(g []*Tag) (label string, flat, cum int64) { if dc != 0 { c = c / dc } - return formatTag(min.Value, min.Unit) + ".." + formatTag(max.Value, max.Unit), f, c + + // Tags are not scaled with the selected output unit because tags are often + // much smaller than other values which appear, so the range of tag sizes + // sometimes would appear to be "0..0" when scaled to the selected output unit. + return measurement.Label(min.Value, min.Unit) + ".." + measurement.Label(max.Value, max.Unit), f, c } func min64(a, b int64) int64 { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph_test.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph_test.go index 7f512697692..b8368b8fa4f 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/dotgraph_test.go @@ -16,8 +16,10 @@ package graph import ( "bytes" + "flag" "fmt" "io/ioutil" + "path/filepath" "reflect" "strconv" "strings" @@ -26,7 +28,7 @@ import ( "github.com/google/pprof/internal/proftest" ) -const path = "testdata/" +var updateFlag = flag.Bool("update", false, "Update the golden files") func TestComposeWithStandardGraph(t *testing.T) { g := baseGraph() @@ -35,12 +37,7 @@ func TestComposeWithStandardGraph(t *testing.T) { var buf bytes.Buffer ComposeDot(&buf, g, a, c) - want, err := ioutil.ReadFile(path + "compose1.dot") - if err != nil { - t.Fatalf("error reading test file: %v", err) - } - - compareGraphs(t, buf.Bytes(), want) + compareGraphs(t, buf.Bytes(), "compose1.dot") } func TestComposeWithNodeAttributesAndZeroFlat(t *testing.T) { @@ -64,12 +61,7 @@ func TestComposeWithNodeAttributesAndZeroFlat(t *testing.T) { var buf bytes.Buffer ComposeDot(&buf, g, a, c) - want, err := ioutil.ReadFile(path + "compose2.dot") - if err != nil { - t.Fatalf("error reading test file: %v", err) - } - - compareGraphs(t, buf.Bytes(), want) + compareGraphs(t, buf.Bytes(), "compose2.dot") } func TestComposeWithTagsAndResidualEdge(t *testing.T) { @@ -97,12 +89,7 @@ func TestComposeWithTagsAndResidualEdge(t *testing.T) { var buf bytes.Buffer ComposeDot(&buf, g, a, c) - want, err := ioutil.ReadFile(path + "compose3.dot") - if err != nil { - t.Fatalf("error reading test file: %v", err) - } - - compareGraphs(t, buf.Bytes(), want) + compareGraphs(t, buf.Bytes(), "compose3.dot") } func TestComposeWithNestedTags(t *testing.T) { @@ -127,12 +114,7 @@ func TestComposeWithNestedTags(t *testing.T) { var buf bytes.Buffer ComposeDot(&buf, g, a, c) - want, err := ioutil.ReadFile(path + "compose5.dot") - if err != nil { - t.Fatalf("error reading test file: %v", err) - } - - compareGraphs(t, buf.Bytes(), want) + compareGraphs(t, buf.Bytes(), "compose5.dot") } func TestComposeWithEmptyGraph(t *testing.T) { @@ -142,12 +124,18 @@ func TestComposeWithEmptyGraph(t *testing.T) { var buf bytes.Buffer ComposeDot(&buf, g, a, c) - want, err := ioutil.ReadFile(path + "compose4.dot") - if err != nil { - t.Fatalf("error reading test file: %v", err) - } + compareGraphs(t, buf.Bytes(), "compose4.dot") +} - compareGraphs(t, buf.Bytes(), want) +func TestComposeWithStandardGraphAndURL(t *testing.T) { + g := baseGraph() + a, c := baseAttrsAndConfig() + c.LegendURL = "http://example.com" + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose6.dot") } func baseGraph() *Graph { @@ -199,13 +187,78 @@ func baseAttrsAndConfig() (*DotAttributes, *DotConfig) { return a, c } -func compareGraphs(t *testing.T, got, want []byte) { +func compareGraphs(t *testing.T, got []byte, wantFile string) { + wantFile = filepath.Join("testdata", wantFile) + want, err := ioutil.ReadFile(wantFile) + if err != nil { + t.Fatalf("error reading test file %s: %v", wantFile, err) + } + if string(got) != string(want) { d, err := proftest.Diff(got, want) if err != nil { t.Fatalf("error finding diff: %v", err) } t.Errorf("Compose incorrectly wrote %s", string(d)) + if *updateFlag { + err := ioutil.WriteFile(wantFile, got, 0644) + if err != nil { + t.Errorf("failed to update the golden file %q: %v", wantFile, err) + } + } + } +} + +func TestNodeletCountCapping(t *testing.T) { + labelTags := make(TagMap) + for i := 0; i < 10; i++ { + name := fmt.Sprintf("tag-%d", i) + labelTags[name] = &Tag{ + Name: name, + Flat: 10, + Cum: 10, + } + } + numTags := make(TagMap) + for i := 0; i < 10; i++ { + name := fmt.Sprintf("num-tag-%d", i) + numTags[name] = &Tag{ + Name: name, + Unit: "mb", + Value: 16, + Flat: 10, + Cum: 10, + } + } + node1 := &Node{ + Info: NodeInfo{Name: "node1-with-tags"}, + Flat: 10, + Cum: 10, + NumericTags: map[string]TagMap{"": numTags}, + LabelTags: labelTags, + } + node2 := &Node{ + Info: NodeInfo{Name: "node2"}, + Flat: 15, + Cum: 15, + } + node3 := &Node{ + Info: NodeInfo{Name: "node3"}, + Flat: 15, + Cum: 15, + } + g := &Graph{ + Nodes: Nodes{ + node1, + node2, + node3, + }, + } + for n := 1; n <= 3; n++ { + input := maxNodelets + n + if got, want := len(g.SelectTopNodes(input, true)), n; got != want { + t.Errorf("SelectTopNodes(%d): got %d nodes, want %d", input, got, want) + } } } @@ -240,19 +293,19 @@ func TestTagCollapse(t *testing.T) { } tagWant := [][]*Tag{ - []*Tag{ + { makeTag("1B..2GB", "", 0, 2401, 2401), }, - []*Tag{ + { makeTag("2GB", "", 0, 1000, 1000), makeTag("1B..12MB", "", 0, 1401, 1401), }, - []*Tag{ + { makeTag("2GB", "", 0, 1000, 1000), makeTag("12MB", "", 0, 100, 100), makeTag("1B..1MB", "", 0, 1301, 1301), }, - []*Tag{ + { makeTag("2GB", "", 0, 1000, 1000), makeTag("1MB", "", 0, 1000, 1000), makeTag("2B..1kB", "", 0, 201, 201), diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go index 428e6257c76..cd72bf2ab13 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go @@ -240,6 +240,8 @@ type Edge struct { Inline bool } +// WeightValue returns the weight value for this edge, normalizing if a +// divisor is available. func (e *Edge) WeightValue() int64 { if e.WeightDiv == 0 { return e.Weight @@ -327,7 +329,7 @@ func newGraph(prof *profile.Profile, o *Options) (*Graph, map[uint64]Nodes) { // Add cum weight to all nodes in stack, avoiding double counting. if _, ok := seenNode[n]; !ok { seenNode[n] = true - n.addSample(dw, w, labels, sample.NumLabel, o.FormatTag, false) + n.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, false) } // Update edge weights for all edges in stack, avoiding double counting. if _, ok := seenEdge[nodePair{n, parent}]; !ok && parent != nil && n != parent { @@ -340,7 +342,7 @@ func newGraph(prof *profile.Profile, o *Options) (*Graph, map[uint64]Nodes) { } if parent != nil && !residual { // Add flat weight to leaf node. - parent.addSample(dw, w, labels, sample.NumLabel, o.FormatTag, true) + parent.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, true) } } @@ -399,7 +401,7 @@ func newTree(prof *profile.Profile, o *Options) (g *Graph) { if n == nil { continue } - n.addSample(dw, w, labels, sample.NumLabel, o.FormatTag, false) + n.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, false) if parent != nil { parent.AddToEdgeDiv(n, dw, w, false, lidx != len(lines)-1) } @@ -407,7 +409,7 @@ func newTree(prof *profile.Profile, o *Options) (g *Graph) { } } if parent != nil { - parent.addSample(dw, w, labels, sample.NumLabel, o.FormatTag, true) + parent.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, true) } } @@ -600,7 +602,7 @@ func (ns Nodes) Sum() (flat int64, cum int64) { return } -func (n *Node) addSample(dw, w int64, labels string, numLabel map[string][]int64, format func(int64, string) string, flat bool) { +func (n *Node) addSample(dw, w int64, labels string, numLabel map[string][]int64, numUnit map[string][]string, format func(int64, string) string, flat bool) { // Update sample value if flat { n.FlatDiv += dw @@ -631,9 +633,15 @@ func (n *Node) addSample(dw, w int64, labels string, numLabel map[string][]int64 if format == nil { format = defaultLabelFormat } - for key, nvals := range numLabel { - for _, v := range nvals { - t := numericTags.findOrAddTag(format(v, key), key, v) + for k, nvals := range numLabel { + units := numUnit[k] + for i, v := range nvals { + var t *Tag + if len(units) > 0 { + t = numericTags.findOrAddTag(format(v, units[i]), units[i], v) + } else { + t = numericTags.findOrAddTag(format(v, k), k, v) + } if flat { t.FlatDiv += dw t.Flat += w @@ -800,7 +808,11 @@ func (g *Graph) selectTopNodes(maxNodes int, visualMode bool) Nodes { // If generating a visual graph, count tags as nodes. Update // maxNodes to account for them. for i, n := range g.Nodes { - if count += countTags(n) + 1; count >= maxNodes { + tags := countTags(n) + if tags > maxNodelets { + tags = maxNodelets + } + if count += tags + 1; count >= maxNodes { maxNodes = i + 1 break } @@ -832,17 +844,6 @@ func countTags(n *Node) int { return count } -// countEdges counts the number of edges below the specified cutoff. -func countEdges(el EdgeMap, cutoff int64) int { - count := 0 - for _, e := range el { - if e.Weight > cutoff { - count++ - } - } - return count -} - // RemoveRedundantEdges removes residual edges if the destination can // be reached through another path. This is done to simplify the graph // while preserving connectivity. diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph_test.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph_test.go index c2848f8cf20..5657084cac0 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph_test.go @@ -171,7 +171,7 @@ func createExpectedEdges(parent expectedNode, children ...expectedNode) { } } -// createTestCase1 creates a test case that initally looks like: +// createTestCase1 creates a test case that initially looks like: // 0 // |(5) // 1 @@ -255,7 +255,7 @@ func createTestCase2() trimTreeTestcase { } } -// createTestCase3 creates an initally empty graph and expects an empty graph +// createTestCase3 creates an initially empty graph and expects an empty graph // after trimming. func createTestCase3() trimTreeTestcase { graph := &Graph{make(Nodes, 0)} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose1.dot b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose1.dot index ceed025318a..da349a40a8b 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose1.dot +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose1.dot @@ -1,7 +1,7 @@ digraph "testtitle" { node [style=filled fillcolor="#f8f8f8"] -subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] } -N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] -N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"] } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose2.dot b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose2.dot index ee951fe3b1c..0c1a6ebaf12 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose2.dot +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose2.dot @@ -1,7 +1,7 @@ digraph "testtitle" { node [style=filled fillcolor="#f8f8f8"] -subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] } -N1 [label="SRC10 (10.00%)\nof 25 (25.00%)" fontsize=24 shape=folder tooltip="src (25)" color="#b23c00" fillcolor="#edddd5" style="bold,filled" peripheries=2 URL="www.google.com" target="_blank"] -N2 [label="dest\n0 of 25 (25.00%)" fontsize=8 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l" tooltip="testtitle"] } +N1 [label="SRC10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=24 shape=folder tooltip="src (25)" color="#b23c00" fillcolor="#edddd5" style="bold,filled" peripheries=2 URL="www.google.com" target="_blank"] +N2 [label="dest\n0 of 25 (25.00%)" id="node2" fontsize=8 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"] } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose3.dot b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose3.dot index 99a3119b82b..1b878b79df9 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose3.dot +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose3.dot @@ -1,11 +1,11 @@ digraph "testtitle" { node [style=filled fillcolor="#f8f8f8"] -subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] } -N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] -N1_0 [label = "tag1" fontsize=8 shape=box3d tooltip="10"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N1_0 [label = "tag1" id="N1_0" fontsize=8 shape=box3d tooltip="10"] N1 -> N1_0 [label=" 10" weight=100 tooltip="10" labeltooltip="10"] -NN1_0 [label = "tag2" fontsize=8 shape=box3d tooltip="20"] +NN1_0 [label = "tag2" id="NN1_0" fontsize=8 shape=box3d tooltip="20"] N1 -> NN1_0 [label=" 20" weight=100 tooltip="20" labeltooltip="20"] -N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src ... dest (10)" labeltooltip="src ... dest (10)" style="dotted" minlen=2] } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose4.dot b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose4.dot index adc9cc6f68a..302da8ce948 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose4.dot +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose4.dot @@ -1,4 +1,4 @@ digraph "testtitle" { node [style=filled fillcolor="#f8f8f8"] -subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] } +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l" tooltip="testtitle"] } } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose5.dot b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose5.dot index 352975f5878..8876e337e66 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose5.dot +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose5.dot @@ -1,11 +1,11 @@ digraph "testtitle" { node [style=filled fillcolor="#f8f8f8"] -subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l"] } -N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] -N1_0 [label = "tag1" fontsize=8 shape=box3d tooltip="10"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N1_0 [label = "tag1" id="N1_0" fontsize=8 shape=box3d tooltip="10"] N1 -> N1_0 [label=" 10" weight=100 tooltip="10" labeltooltip="10"] -NN1_0_0 [label = "tag2" fontsize=8 shape=box3d tooltip="20"] +NN1_0_0 [label = "tag2" id="NN1_0_0" fontsize=8 shape=box3d tooltip="20"] N1_0 -> NN1_0_0 [label=" 20" weight=100 tooltip="20" labeltooltip="20"] -N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)" minlen=2] } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose6.dot b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose6.dot new file mode 100644 index 00000000000..cf884394c7e --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/testdata/compose6.dot @@ -0,0 +1,7 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\l" URL="http://example.com" target="_blank" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"] +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go index 0f7a21d26a8..0a60435644b 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement.go @@ -170,12 +170,16 @@ func memoryLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok switch fromUnit { case "byte", "b": - case "kilobyte", "kb": + case "kb", "kbyte", "kilobyte": value *= 1024 - case "megabyte", "mb": + case "mb", "mbyte", "megabyte": value *= 1024 * 1024 - case "gigabyte", "gb": + case "gb", "gbyte", "gigabyte": value *= 1024 * 1024 * 1024 + case "tb", "tbyte", "terabyte": + value *= 1024 * 1024 * 1024 * 1024 + case "pb", "pbyte", "petabyte": + value *= 1024 * 1024 * 1024 * 1024 * 1024 default: return 0, "", false } @@ -188,8 +192,12 @@ func memoryLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok toUnit = "kb" case value < 1024*1024*1024: toUnit = "mb" - default: + case value < 1024*1024*1024*1024: toUnit = "gb" + case value < 1024*1024*1024*1024*1024: + toUnit = "tb" + default: + toUnit = "pb" } } @@ -203,6 +211,10 @@ func memoryLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok output, toUnit = float64(value)/(1024*1024), "MB" case "gb", "gbyte", "gigabyte": output, toUnit = float64(value)/(1024*1024*1024), "GB" + case "tb", "tbyte", "terabyte": + output, toUnit = float64(value)/(1024*1024*1024*1024), "TB" + case "pb", "pbyte", "petabyte": + output, toUnit = float64(value)/(1024*1024*1024*1024*1024), "PB" } return output, toUnit, true } @@ -289,7 +301,7 @@ func timeLabel(value int64, fromUnit, toUnit string) (v float64, u string, ok bo case "week", "wk": output, toUnit = dd/float64(7*24*time.Hour), "wks" case "year", "yr": - output, toUnit = dd/float64(365*7*24*time.Hour), "yrs" + output, toUnit = dd/float64(365*24*time.Hour), "yrs" default: fallthrough case "sec", "second", "s": diff --git a/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement_test.go b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement_test.go new file mode 100644 index 00000000000..155cafa1982 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/measurement/measurement_test.go @@ -0,0 +1,47 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package measurement + +import ( + "testing" +) + +func TestScale(t *testing.T) { + for _, tc := range []struct { + value int64 + fromUnit, toUnit string + wantValue float64 + wantUnit string + }{ + {1, "s", "ms", 1000, "ms"}, + {1, "kb", "b", 1024, "B"}, + {1, "kbyte", "b", 1024, "B"}, + {1, "kilobyte", "b", 1024, "B"}, + {1, "mb", "kb", 1024, "kB"}, + {1, "gb", "mb", 1024, "MB"}, + {1024, "gb", "tb", 1, "TB"}, + {1024, "tb", "pb", 1, "PB"}, + {2048, "mb", "auto", 2, "GB"}, + {3.1536e7, "s", "auto", 1, "yrs"}, + {-1, "s", "ms", -1000, "ms"}, + {1, "foo", "count", 1, ""}, + {1, "foo", "bar", 1, "bar"}, + } { + if gotValue, gotUnit := Scale(tc.value, tc.fromUnit, tc.toUnit); gotValue != tc.wantValue || gotUnit != tc.wantUnit { + t.Errorf("Scale(%d, %q, %q) = (%f, %q), want (%f, %q)", + tc.value, tc.fromUnit, tc.toUnit, gotValue, gotUnit, tc.wantValue, tc.wantUnit) + } + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go index d14ac2c215d..e5878aed704 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go @@ -17,6 +17,7 @@ package plugin import ( "io" + "net/http" "regexp" "time" @@ -31,6 +32,16 @@ type Options struct { Sym Symbolizer Obj ObjTool UI UI + + // HTTPServer is a function that should block serving http requests, + // including the handlers specfied in args. If non-nil, pprof will + // invoke this function if necessary to provide a web interface. + // + // If HTTPServer is nil, pprof will use its own internal HTTP server. + // + // A common use for a custom HTTPServer is to provide custom + // authentication checks. + HTTPServer func(args *HTTPServerArgs) error } // Writer provides a mechanism to write data under a certain name, @@ -185,3 +196,17 @@ type UI interface { // the auto-completion of cmd, if the UI supports auto-completion at all. SetAutoComplete(complete func(string) string) } + +// HTTPServerArgs contains arguments needed by an HTTP server that +// is exporting a pprof web interface. +type HTTPServerArgs struct { + // Hostport contains the http server address (derived from flags). + Hostport string + + Host string // Host portion of Hostport + Port int // Port portion of Hostport + + // Handlers maps from URL paths to the handler to invoke to + // serve that path. + Handlers map[string]http.Handler +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/proftest/proftest.go b/src/cmd/vendor/github.com/google/pprof/internal/proftest/proftest.go index 9767b2eedb6..7f9dcab61a6 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/proftest/proftest.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/proftest/proftest.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "os" "os/exec" + "regexp" "testing" ) @@ -71,10 +72,14 @@ func EncodeJSON(x interface{}) []byte { } // TestUI implements the plugin.UI interface, triggering test failures -// if more than Ignore errors are printed. +// if more than Ignore errors not matching AllowRx are printed. +// Also tracks the number of times the error matches AllowRx in +// NumAllowRxMatches. type TestUI struct { - T *testing.T - Ignore int + T *testing.T + Ignore int + AllowRx string + NumAllowRxMatches int } // ReadLine returns no input, as no input is expected during testing. @@ -89,11 +94,24 @@ func (ui *TestUI) Print(args ...interface{}) { // PrintErr messages may trigger an error failure. A fixed number of // error messages are permitted when appropriate. func (ui *TestUI) PrintErr(args ...interface{}) { + if ui.AllowRx != "" { + if matched, err := regexp.MatchString(ui.AllowRx, fmt.Sprint(args...)); matched || err != nil { + if err != nil { + ui.T.Errorf("failed to match against regex %q: %v", ui.AllowRx, err) + } + ui.NumAllowRxMatches++ + return + } + } if ui.Ignore > 0 { ui.Ignore-- return } - ui.T.Error(args) + // Stringify arguments with fmt.Sprint() to match what default UI + // implementation does. Without this Error() calls fmt.Sprintln() which + // _always_ adds spaces between arguments, unlike fmt.Sprint() which only + // adds them between arguments if neither is string. + ui.T.Error(fmt.Sprint(args...)) } // IsTerminal indicates if the UI is an interactive terminal. diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go index ecfd6982b14..f434554dd96 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go @@ -25,6 +25,7 @@ import ( "sort" "strconv" "strings" + "text/tabwriter" "time" "github.com/google/pprof/internal/graph" @@ -63,6 +64,8 @@ type Options struct { Ratio float64 Title string ProfileLabels []string + ActiveFilters []string + NumLabelUnits map[string]string NodeCount int NodeFraction float64 @@ -125,6 +128,9 @@ func (rpt *Report) newTrimmedGraph() (g *graph.Graph, origCount, droppedNodes, d visualMode := o.OutputFormat == Dot cumSort := o.CumSort + // The call_tree option is only honored when generating visual representations of the callgraph. + callTree := o.CallTree && (o.OutputFormat == Dot || o.OutputFormat == Callgrind) + // First step: Build complete graph to identify low frequency nodes, based on their cum weight. g = rpt.newGraph(nil) totalValue, _ := g.Nodes.Sum() @@ -133,7 +139,7 @@ func (rpt *Report) newTrimmedGraph() (g *graph.Graph, origCount, droppedNodes, d // Filter out nodes with cum value below nodeCutoff. if nodeCutoff > 0 { - if o.CallTree { + if callTree { if nodesKept := g.DiscardLowFrequencyNodePtrs(nodeCutoff); len(g.Nodes) != len(nodesKept) { droppedNodes = len(g.Nodes) - len(nodesKept) g.TrimTree(nodesKept) @@ -154,7 +160,7 @@ func (rpt *Report) newTrimmedGraph() (g *graph.Graph, origCount, droppedNodes, d // Remove low frequency tags and edges as they affect selection. g.TrimLowFrequencyTags(nodeCutoff) g.TrimLowFrequencyEdges(edgeCutoff) - if o.CallTree { + if callTree { if nodesKept := g.SelectTopNodePtrs(nodeCount, visualMode); len(g.Nodes) != len(nodesKept) { g.TrimTree(nodesKept) g.SortNodes(cumSort, visualMode) @@ -236,15 +242,27 @@ func (rpt *Report) newGraph(nodes graph.NodeSet) *graph.Graph { for _, f := range prof.Function { f.Filename = trimPath(f.Filename) } - // Remove numeric tags not recognized by pprof. + // Removes all numeric tags except for the bytes tag prior + // to making graph. + // TODO: modify to select first numeric tag if no bytes tag for _, s := range prof.Sample { numLabels := make(map[string][]int64, len(s.NumLabel)) - for k, v := range s.NumLabel { + numUnits := make(map[string][]string, len(s.NumLabel)) + for k, vs := range s.NumLabel { if k == "bytes" { - numLabels[k] = append(numLabels[k], v...) + unit := o.NumLabelUnits[k] + numValues := make([]int64, len(vs)) + numUnit := make([]string, len(vs)) + for i, v := range vs { + numValues[i] = v + numUnit[i] = unit + } + numLabels[k] = append(numLabels[k], numValues...) + numUnits[k] = append(numUnits[k], numUnit...) } } s.NumLabel = numLabels + s.NumUnit = numUnits } formatTag := func(v int64, key string) string { @@ -337,6 +355,11 @@ func (fm functionMap) FindOrAdd(ni graph.NodeInfo) *profile.Function { // printAssembly prints an annotated assembly listing. func printAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + return PrintAssembly(w, rpt, obj, -1) +} + +// PrintAssembly prints annotated disasssembly of rpt to w. +func PrintAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFuncs int) error { o := rpt.options prof := rpt.prof @@ -352,12 +375,34 @@ func printAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool) error { fmt.Fprintln(w, "Total:", rpt.formatValue(rpt.total)) symbols := symbolsFromBinaries(prof, g, o.Symbol, address, obj) symNodes := nodesPerSymbol(g.Nodes, symbols) - // Sort function names for printing. - var syms objSymbols + + // Sort for printing. + var syms []*objSymbol for s := range symNodes { syms = append(syms, s) } - sort.Sort(syms) + byName := func(a, b *objSymbol) bool { + if na, nb := a.sym.Name[0], b.sym.Name[0]; na != nb { + return na < nb + } + return a.sym.Start < b.sym.Start + } + if maxFuncs < 0 { + sort.Sort(orderSyms{syms, byName}) + } else { + byFlatSum := func(a, b *objSymbol) bool { + suma, _ := symNodes[a].Sum() + sumb, _ := symNodes[b].Sum() + if suma != sumb { + return suma > sumb + } + return byName(a, b) + } + sort.Sort(orderSyms{syms, byFlatSum}) + if len(syms) > maxFuncs { + syms = syms[:maxFuncs] + } + } // Correlate the symbols from the binary with the profile samples. for _, s := range syms { @@ -471,6 +516,7 @@ func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regex &objSymbol{ sym: ms, base: base, + file: f, }, ) } @@ -485,25 +531,18 @@ func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regex type objSymbol struct { sym *plugin.Sym base uint64 + file plugin.ObjFile } -// objSymbols is a wrapper type to enable sorting of []*objSymbol. -type objSymbols []*objSymbol - -func (o objSymbols) Len() int { - return len(o) +// orderSyms is a wrapper type to sort []*objSymbol by a supplied comparator. +type orderSyms struct { + v []*objSymbol + less func(a, b *objSymbol) bool } -func (o objSymbols) Less(i, j int) bool { - if namei, namej := o[i].sym.Name[0], o[j].sym.Name[0]; namei != namej { - return namei < namej - } - return o[i].sym.Start < o[j].sym.Start -} - -func (o objSymbols) Swap(i, j int) { - o[i], o[j] = o[j], o[i] -} +func (o orderSyms) Len() int { return len(o.v) } +func (o orderSyms) Less(i, j int) bool { return o.less(o.v[i], o.v[j]) } +func (o orderSyms) Swap(i, j int) { o.v[i], o.v[j] = o.v[j], o.v[i] } // nodesPerSymbol classifies nodes into a group of symbols. func nodesPerSymbol(ns graph.Nodes, symbols []*objSymbol) map[*objSymbol]graph.Nodes { @@ -528,6 +567,13 @@ type assemblyInstruction struct { line int flat, cum int64 flatDiv, cumDiv int64 + startsBlock bool + inlineCalls []callID +} + +type callID struct { + file string + line int } func (a *assemblyInstruction) flatValue() int64 { @@ -617,25 +663,24 @@ func printTags(w io.Writer, rpt *Report) error { for _, s := range p.Sample { for key, vals := range s.Label { for _, val := range vals { - if valueMap, ok := tagMap[key]; ok { - valueMap[val] = valueMap[val] + s.Value[0] - continue + valueMap, ok := tagMap[key] + if !ok { + valueMap = make(map[string]int64) + tagMap[key] = valueMap } - valueMap := make(map[string]int64) - valueMap[val] = s.Value[0] - tagMap[key] = valueMap + valueMap[val] += o.SampleValue(s.Value) } } for key, vals := range s.NumLabel { + unit := o.NumLabelUnits[key] for _, nval := range vals { - val := formatTag(nval, key) - if valueMap, ok := tagMap[key]; ok { - valueMap[val] = valueMap[val] + s.Value[0] - continue + val := formatTag(nval, unit) + valueMap, ok := tagMap[key] + if !ok { + valueMap = make(map[string]int64) + tagMap[key] = valueMap } - valueMap := make(map[string]int64) - valueMap[val] = s.Value[0] - tagMap[key] = valueMap + valueMap[val] += o.SampleValue(s.Value) } } } @@ -644,6 +689,7 @@ func printTags(w io.Writer, rpt *Report) error { for key := range tagMap { tagKeys = append(tagKeys, &graph.Tag{Name: key}) } + tabw := tabwriter.NewWriter(w, 0, 0, 1, ' ', tabwriter.AlignRight) for _, tagKey := range graph.SortTags(tagKeys, true) { var total int64 key := tagKey.Name @@ -653,18 +699,19 @@ func printTags(w io.Writer, rpt *Report) error { tags = append(tags, &graph.Tag{Name: t, Flat: c}) } - fmt.Fprintf(w, "%s: Total %d\n", key, total) + f, u := measurement.Scale(total, o.SampleUnit, o.OutputUnit) + fmt.Fprintf(tabw, "%s:\t Total %.1f%s\n", key, f, u) for _, t := range graph.SortTags(tags, true) { + f, u := measurement.Scale(t.FlatValue(), o.SampleUnit, o.OutputUnit) if total > 0 { - fmt.Fprintf(w, " %8d (%s): %s\n", t.FlatValue(), - percentage(t.FlatValue(), total), t.Name) + fmt.Fprintf(tabw, " \t%.1f%s (%s):\t %s\n", f, u, percentage(t.FlatValue(), total), t.Name) } else { - fmt.Fprintf(w, " %8d: %s\n", t.FlatValue(), t.Name) + fmt.Fprintf(tabw, " \t%.1f%s:\t %s\n", f, u, t.Name) } } - fmt.Fprintln(w) + fmt.Fprintln(tabw) } - return nil + return tabw.Flush() } // printComments prints all freeform comments in the profile. @@ -677,16 +724,22 @@ func printComments(w io.Writer, rpt *Report) error { return nil } -// printText prints a flat text report for a profile. -func printText(w io.Writer, rpt *Report) error { +// TextItem holds a single text report entry. +type TextItem struct { + Name string + InlineLabel string // Not empty if inlined + Flat, Cum int64 // Raw values + FlatFormat, CumFormat string // Formatted values +} + +// TextItems returns a list of text items from the report and a list +// of labels that describe the report. +func TextItems(rpt *Report) ([]TextItem, []string) { g, origCount, droppedNodes, _ := rpt.newTrimmedGraph() rpt.selectOutputUnit(g) + labels := reportLabels(rpt, g, origCount, droppedNodes, 0, false) - fmt.Fprintln(w, strings.Join(reportLabels(rpt, g, origCount, droppedNodes, 0, false), "\n")) - - fmt.Fprintf(w, "%10s %5s%% %5s%% %10s %5s%%\n", - "flat", "flat", "sum", "cum", "cum") - + var items []TextItem var flatSum int64 for _, n := range g.Nodes { name, flat, cum := n.Info.PrintableName(), n.FlatValue(), n.CumValue() @@ -700,22 +753,46 @@ func printText(w io.Writer, rpt *Report) error { } } + var inl string if inline { if noinline { - name = name + " (partial-inline)" + inl = "(partial-inline)" } else { - name = name + " (inline)" + inl = "(inline)" } } flatSum += flat - fmt.Fprintf(w, "%10s %s %s %10s %s %s\n", - rpt.formatValue(flat), - percentage(flat, rpt.total), + items = append(items, TextItem{ + Name: name, + InlineLabel: inl, + Flat: flat, + Cum: cum, + FlatFormat: rpt.formatValue(flat), + CumFormat: rpt.formatValue(cum), + }) + } + return items, labels +} + +// printText prints a flat text report for a profile. +func printText(w io.Writer, rpt *Report) error { + items, labels := TextItems(rpt) + fmt.Fprintln(w, strings.Join(labels, "\n")) + fmt.Fprintf(w, "%10s %5s%% %5s%% %10s %5s%%\n", + "flat", "flat", "sum", "cum", "cum") + var flatSum int64 + for _, item := range items { + inl := item.InlineLabel + if inl != "" { + inl = " " + inl + } + flatSum += item.Flat + fmt.Fprintf(w, "%10s %s %s %10s %s %s%s\n", + item.FlatFormat, percentage(item.Flat, rpt.total), percentage(flatSum, rpt.total), - rpt.formatValue(cum), - percentage(cum, rpt.total), - name) + item.CumFormat, percentage(item.Cum, rpt.total), + item.Name, inl) } return nil } @@ -749,6 +826,20 @@ func printTraces(w io.Writer, rpt *Report) error { } sort.Strings(labels) fmt.Fprint(w, strings.Join(labels, "")) + + // Print any numeric labels for the sample + var numLabels []string + for key, vals := range sample.NumLabel { + unit := o.NumLabelUnits[key] + numValues := make([]string, len(vals)) + for i, vv := range vals { + numValues[i] = measurement.Label(vv, unit) + } + numLabels = append(numLabels, fmt.Sprintf("%10s: %s\n", key, strings.Join(numValues, " "))) + } + sort.Strings(numLabels) + fmt.Fprint(w, strings.Join(numLabels, "")) + var d, v int64 v = o.SampleValue(sample.Value) if o.SampleMeanDivisor != nil { @@ -969,24 +1060,25 @@ func printTree(w io.Writer, rpt *Report) error { return nil } -// printDOT prints an annotated callgraph in DOT format. -func printDOT(w io.Writer, rpt *Report) error { +// GetDOT returns a graph suitable for dot processing along with some +// configuration information. +func GetDOT(rpt *Report) (*graph.Graph, *graph.DotConfig) { g, origCount, droppedNodes, droppedEdges := rpt.newTrimmedGraph() rpt.selectOutputUnit(g) labels := reportLabels(rpt, g, origCount, droppedNodes, droppedEdges, true) - o := rpt.options - formatTag := func(v int64, key string) string { - return measurement.ScaledLabel(v, key, o.OutputUnit) - } - c := &graph.DotConfig{ Title: rpt.options.Title, Labels: labels, FormatValue: rpt.formatValue, - FormatTag: formatTag, Total: rpt.total, } + return g, c +} + +// printDOT prints an annotated callgraph in DOT format. +func printDOT(w io.Writer, rpt *Report) error { + g, c := GetDOT(rpt) graph.ComposeDot(w, g, &graph.DotAttributes{}, c) return nil } @@ -1055,9 +1147,7 @@ func reportLabels(rpt *Report, g *graph.Graph, origCount, droppedNodes, droppedE var label []string if len(rpt.options.ProfileLabels) > 0 { - for _, l := range rpt.options.ProfileLabels { - label = append(label, l) - } + label = append(label, rpt.options.ProfileLabels...) } else if fullHeaders || !rpt.options.CompactLabels { label = ProfileLabels(rpt) } @@ -1067,6 +1157,11 @@ func reportLabels(rpt *Report, g *graph.Graph, origCount, droppedNodes, droppedE flatSum = flatSum + n.FlatValue() } + if len(rpt.options.ActiveFilters) > 0 { + activeFilters := legendActiveFilters(rpt.options.ActiveFilters) + label = append(label, activeFilters...) + } + label = append(label, fmt.Sprintf("Showing nodes accounting for %s, %s of %s total", rpt.formatValue(flatSum), strings.TrimSpace(percentage(flatSum, rpt.total)), rpt.formatValue(rpt.total))) if rpt.total != 0 { @@ -1086,6 +1181,18 @@ func reportLabels(rpt *Report, g *graph.Graph, origCount, droppedNodes, droppedE return label } +func legendActiveFilters(activeFilters []string) []string { + legendActiveFilters := make([]string, len(activeFilters)+1) + legendActiveFilters[0] = "Active filters:" + for i, s := range activeFilters { + if len(s) > 80 { + s = s[:80] + "…" + } + legendActiveFilters[i+1] = " " + s + } + return legendActiveFilters +} + func genLabel(d int, n, l, f string) string { if d > 1 { n = n + "s" @@ -1159,6 +1266,9 @@ type Report struct { formatValue func(int64) string } +// Total returns the total number of samples in a report. +func (rpt *Report) Total() int64 { return rpt.total } + func abs64(i int64) int64 { if i < 0 { return -i diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/report_test.go b/src/cmd/vendor/github.com/google/pprof/internal/report/report_test.go index 28cf6b4ce38..e05cf5ad08d 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/report_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/report_test.go @@ -264,3 +264,24 @@ func TestFunctionMap(t *testing.T) { } } } + +func TestLegendActiveFilters(t *testing.T) { + activeFilterInput := []string{ + "focus=123|456|789|101112|131415|161718|192021|222324|252627|282930|313233|343536|363738|acbdefghijklmnop", + "show=short filter", + } + expectedLegendActiveFilter := []string{ + "Active filters:", + " focus=123|456|789|101112|131415|161718|192021|222324|252627|282930|313233|343536…", + " show=short filter", + } + legendActiveFilter := legendActiveFilters(activeFilterInput) + if len(legendActiveFilter) != len(expectedLegendActiveFilter) { + t.Errorf("wanted length %v got length %v", len(expectedLegendActiveFilter), len(legendActiveFilter)) + } + for i := range legendActiveFilter { + if legendActiveFilter[i] != expectedLegendActiveFilter[i] { + t.Errorf("%d: want \"%v\", got \"%v\"", i, expectedLegendActiveFilter[i], legendActiveFilter[i]) + } + } +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go index f5e3b6b9d71..ce82ae55c47 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/source.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source.go @@ -62,6 +62,7 @@ func printSource(w io.Writer, rpt *Report) error { } sourcePath = wd } + reader := newSourceReader(sourcePath) fmt.Fprintf(w, "Total: %s\n", rpt.formatValue(rpt.total)) for _, fn := range functions { @@ -94,7 +95,7 @@ func printSource(w io.Writer, rpt *Report) error { fns := fileNodes[filename] flatSum, cumSum := fns.Sum() - fnodes, _, err := getSourceFromFile(filename, sourcePath, fns, 0, 0) + fnodes, _, err := getSourceFromFile(filename, reader, fns, 0, 0) fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, filename) fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n", rpt.formatValue(flatSum), rpt.formatValue(cumSum), @@ -116,6 +117,16 @@ func printSource(w io.Writer, rpt *Report) error { // printWebSource prints an annotated source listing, include all // functions with samples that match the regexp rpt.options.symbol. func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + printHeader(w, rpt) + if err := PrintWebList(w, rpt, obj, -1); err != nil { + return err + } + printPageClosing(w) + return nil +} + +// PrintWebList prints annotated source listing of rpt to w. +func PrintWebList(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFiles int) error { o := rpt.options g := rpt.newGraph(nil) @@ -134,6 +145,7 @@ func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error { } sourcePath = wd } + reader := newSourceReader(sourcePath) type fileFunction struct { fileName, functionName string @@ -167,7 +179,7 @@ func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error { } if len(fileNodes) == 0 { - return fmt.Errorf("No source information for %s\n", o.Symbol.String()) + return fmt.Errorf("No source information for %s", o.Symbol.String()) } sourceFiles := make(graph.Nodes, 0, len(fileNodes)) @@ -176,10 +188,18 @@ func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error { sNode.Flat, sNode.Cum = nodes.Sum() sourceFiles = append(sourceFiles, &sNode) } - sourceFiles.Sort(graph.FileOrder) + + // Limit number of files printed? + if maxFiles < 0 { + sourceFiles.Sort(graph.FileOrder) + } else { + sourceFiles.Sort(graph.FlatNameOrder) + if maxFiles < len(sourceFiles) { + sourceFiles = sourceFiles[:maxFiles] + } + } // Print each file associated with this function. - printHeader(w, rpt) for _, n := range sourceFiles { ff := fileFunction{n.Info.File, n.Info.Name} fns := fileNodes[ff] @@ -187,18 +207,17 @@ func printWebSource(w io.Writer, rpt *Report, obj plugin.ObjTool) error { asm := assemblyPerSourceLine(symbols, fns, ff.fileName, obj) start, end := sourceCoordinates(asm) - fnodes, path, err := getSourceFromFile(ff.fileName, sourcePath, fns, start, end) + fnodes, path, err := getSourceFromFile(ff.fileName, reader, fns, start, end) if err != nil { fnodes, path = getMissingFunctionSource(ff.fileName, asm, start, end) } printFunctionHeader(w, ff.functionName, path, n.Flat, n.Cum, rpt) for _, fn := range fnodes { - printFunctionSourceLine(w, fn, asm[fn.Info.Lineno], rpt) + printFunctionSourceLine(w, fn, asm[fn.Info.Lineno], reader, rpt) } printFunctionClosing(w) } - printPageClosing(w) return nil } @@ -236,11 +255,41 @@ func assemblyPerSourceLine(objSyms []*objSymbol, rs graph.Nodes, src string, obj srcBase := filepath.Base(src) anodes := annotateAssembly(insts, rs, o.base) var lineno = 0 + var prevline = 0 for _, an := range anodes { - if filepath.Base(an.file) == srcBase { + // Do not rely solely on the line number produced by Disasm + // since it is not what we want in the presence of inlining. + // + // E.g., suppose we are printing source code for F and this + // instruction is from H where F called G called H and both + // of those calls were inlined. We want to use the line + // number from F, not from H (which is what Disasm gives us). + // + // So find the outer-most linenumber in the source file. + found := false + if frames, err := o.file.SourceLine(an.address + o.base); err == nil { + for i := len(frames) - 1; i >= 0; i-- { + if filepath.Base(frames[i].File) == srcBase { + for j := i - 1; j >= 0; j-- { + an.inlineCalls = append(an.inlineCalls, callID{frames[j].File, frames[j].Line}) + } + lineno = frames[i].Line + found = true + break + } + } + } + if !found && filepath.Base(an.file) == srcBase { lineno = an.line } + if lineno != 0 { + if lineno != prevline { + // This instruction starts a new block + // of contiguous instructions on this line. + an.startsBlock = true + } + prevline = lineno assembly[lineno] = append(assembly[lineno], an) } } @@ -265,7 +314,15 @@ func findMatchingSymbol(objSyms []*objSymbol, ns graph.Nodes) *objSymbol { // printHeader prints the page header for a weblist report. func printHeader(w io.Writer, rpt *Report) { - fmt.Fprintln(w, weblistPageHeader) + fmt.Fprintln(w, ` + + + + +Pprof listing`) + fmt.Fprintln(w, weblistPageCSS) + fmt.Fprintln(w, weblistPageScript) + fmt.Fprint(w, "\n\n\n") var labels []string for _, l := range ProfileLabels(rpt) { @@ -290,30 +347,33 @@ func printFunctionHeader(w io.Writer, name, path string, flatSum, cumSum int64, } // printFunctionSourceLine prints a source line and the corresponding assembly. -func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyInstruction, rpt *Report) { +func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyInstruction, reader *sourceReader, rpt *Report) { if len(assembly) == 0 { fmt.Fprintf(w, - " %6d %10s %10s %s \n", + " %6d %10s %10s %8s %s \n", fn.Info.Lineno, valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt), - template.HTMLEscapeString(fn.Info.Name)) + "", template.HTMLEscapeString(fn.Info.Name)) return } fmt.Fprintf(w, - " %6d %10s %10s %s ", + " %6d %10s %10s %8s %s ", fn.Info.Lineno, valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt), - template.HTMLEscapeString(fn.Info.Name)) + "", template.HTMLEscapeString(fn.Info.Name)) + srcIndent := indentation(fn.Info.Name) fmt.Fprint(w, "") - for _, an := range assembly { + var curCalls []callID + for i, an := range assembly { + if an.startsBlock && i != 0 { + // Insert a separator between discontiguous blocks. + fmt.Fprintf(w, " %8s %28s\n", "", "⋮") + } + var fileline string - class := "disasmloc" if an.file != "" { fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(an.file), an.line) - if an.line != fn.Info.Lineno { - class = "unimportant" - } } flat, cum := an.flat, an.cum if an.flatDiv != 0 { @@ -322,11 +382,30 @@ func printFunctionSourceLine(w io.Writer, fn *graph.Node, assembly []assemblyIns if an.cumDiv != 0 { cum = cum / an.cumDiv } - fmt.Fprintf(w, " %8s %10s %10s %8x: %-48s %s\n", "", - valueOrDot(flat, rpt), valueOrDot(cum, rpt), - an.address, - template.HTMLEscapeString(an.instruction), - class, + + // Print inlined call context. + for j, c := range an.inlineCalls { + if j < len(curCalls) && curCalls[j] == c { + // Skip if same as previous instruction. + continue + } + curCalls = nil + fname := trimPath(c.file) + fline, ok := reader.line(fname, c.line) + if !ok { + fline = "" + } + text := strings.Repeat(" ", srcIndent+4+4*j) + strings.TrimSpace(fline) + fmt.Fprintf(w, " %8s %10s %10s %8s %s %s:%d\n", + "", "", "", "", + template.HTMLEscapeString(fmt.Sprintf("%-80s", text)), + template.HTMLEscapeString(filepath.Base(fname)), c.line) + } + curCalls = an.inlineCalls + text := strings.Repeat(" ", srcIndent+4+4*len(curCalls)) + an.instruction + fmt.Fprintf(w, " %8s %10s %10s %8x: %s %s\n", + "", valueOrDot(flat, rpt), valueOrDot(cum, rpt), an.address, + template.HTMLEscapeString(fmt.Sprintf("%-80s", text)), template.HTMLEscapeString(fileline)) } fmt.Fprintln(w, "") @@ -345,14 +424,10 @@ func printPageClosing(w io.Writer) { // getSourceFromFile collects the sources of a function from a source // file and annotates it with the samples in fns. Returns the sources // as nodes, using the info.name field to hold the source code. -func getSourceFromFile(file, sourcePath string, fns graph.Nodes, start, end int) (graph.Nodes, string, error) { +func getSourceFromFile(file string, reader *sourceReader, fns graph.Nodes, start, end int) (graph.Nodes, string, error) { file = trimPath(file) - f, err := openSourceFile(file, sourcePath) - if err != nil { - return nil, file, err - } - lineNodes := make(map[int]graph.Nodes) + // Collect source coordinates from profile. const margin = 5 // Lines before first/after last sample. if start == 0 { @@ -382,36 +457,28 @@ func getSourceFromFile(file, sourcePath string, fns graph.Nodes, start, end int) } lineNodes[lineno] = append(lineNodes[lineno], n) } + if start < 1 { + start = 1 + } var src graph.Nodes - buf := bufio.NewReader(f) - lineno := 1 - for { - line, err := buf.ReadString('\n') - if err != nil { - if err != io.EOF { - return nil, file, err - } - if line == "" { - break - } - } - if lineno >= start { - flat, cum := lineNodes[lineno].Sum() - - src = append(src, &graph.Node{ - Info: graph.NodeInfo{ - Name: strings.TrimRight(line, "\n"), - Lineno: lineno, - }, - Flat: flat, - Cum: cum, - }) - } - lineno++ - if lineno > end { + for lineno := start; lineno <= end; lineno++ { + line, ok := reader.line(file, lineno) + if !ok { break } + flat, cum := lineNodes[lineno].Sum() + src = append(src, &graph.Node{ + Info: graph.NodeInfo{ + Name: strings.TrimRight(line, "\n"), + Lineno: lineno, + }, + Flat: flat, + Cum: cum, + }) + } + if err := reader.fileError(file); err != nil { + return nil, file, err } return src, file, nil } @@ -446,6 +513,57 @@ func getMissingFunctionSource(filename string, asm map[int][]assemblyInstruction return fnodes, filename } +// sourceReader provides access to source code with caching of file contents. +type sourceReader struct { + searchPath string + + // files maps from path name to a list of lines. + // files[*][0] is unused since line numbering starts at 1. + files map[string][]string + + // errors collects errors encountered per file. These errors are + // consulted before returning out of these module. + errors map[string]error +} + +func newSourceReader(searchPath string) *sourceReader { + return &sourceReader{ + searchPath, + make(map[string][]string), + make(map[string]error), + } +} + +func (reader *sourceReader) fileError(path string) error { + return reader.errors[path] +} + +func (reader *sourceReader) line(path string, lineno int) (string, bool) { + lines, ok := reader.files[path] + if !ok { + // Read and cache file contents. + lines = []string{""} // Skip 0th line + f, err := openSourceFile(path, reader.searchPath) + if err != nil { + reader.errors[path] = err + } else { + s := bufio.NewScanner(f) + for s.Scan() { + lines = append(lines, s.Text()) + } + f.Close() + if s.Err() != nil { + reader.errors[path] = err + } + } + reader.files[path] = lines + } + if lineno <= 0 || lineno >= len(lines) { + return "", false + } + return lines[lineno], true +} + // openSourceFile opens a source file from a name encoded in a // profile. File names in a profile after often relative paths, so // search them in each of the paths in searchPath (or CWD by default), @@ -492,3 +610,20 @@ func trimPath(path string) string { } return path } + +func indentation(line string) int { + column := 0 + for _, c := range line { + if c == ' ' { + column++ + } else if c == '\t' { + column++ + for column%8 != 0 { + column++ + } + } else { + break + } + } + return column +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go index 2bb81f20257..02a6d772487 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source_html.go @@ -14,12 +14,17 @@ package report -const weblistPageHeader = ` - - - -Pprof listing - - - - -` +` const weblistPageClosing = ` diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/source_test.go b/src/cmd/vendor/github.com/google/pprof/internal/report/source_test.go new file mode 100644 index 00000000000..9a2b5a21c44 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/source_test.go @@ -0,0 +1,89 @@ +package report + +import ( + "bytes" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" + + "github.com/google/pprof/internal/binutils" + "github.com/google/pprof/profile" +) + +func TestWebList(t *testing.T) { + if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { + t.Skip("weblist only tested on x86-64 linux") + } + + cpu := readProfile(filepath.Join("testdata", "sample.cpu"), t) + rpt := New(cpu, &Options{ + OutputFormat: WebList, + Symbol: regexp.MustCompile("busyLoop"), + SampleValue: func(v []int64) int64 { return v[1] }, + SampleUnit: cpu.SampleType[1].Unit, + }) + buf := bytes.NewBuffer(nil) + if err := Generate(buf, rpt, &binutils.Binutils{}); err != nil { + t.Fatalf("could not generate weblist: %v", err) + } + output := buf.String() + + for _, expect := range []string{"func busyLoop", "callq", "math.Abs"} { + if !strings.Contains(output, expect) { + t.Errorf("weblist output does not contain '%s':\n%s", expect, output) + } + } +} + +func TestIndentation(t *testing.T) { + for _, c := range []struct { + str string + wantIndent int + }{ + {"", 0}, + {"foobar", 0}, + {" foo", 2}, + {"\tfoo", 8}, + {"\t foo", 9}, + {" \tfoo", 8}, + {" \tfoo", 8}, + {" \tfoo", 16}, + } { + if n := indentation(c.str); n != c.wantIndent { + t.Errorf("indentation(%v): got %d, want %d", c.str, n, c.wantIndent) + } + } +} + +func readProfile(fname string, t *testing.T) *profile.Profile { + file, err := os.Open(fname) + if err != nil { + t.Fatalf("%s: could not open profile: %v", fname, err) + } + defer file.Close() + p, err := profile.Parse(file) + if err != nil { + t.Fatalf("%s: could not parse profile: %v", fname, err) + } + + // Fix file names so they do not include absolute path names. + fix := func(s string) string { + const testdir = "/internal/report/" + pos := strings.Index(s, testdir) + if pos == -1 { + return s + } + return s[pos+len(testdir):] + } + for _, m := range p.Mapping { + m.File = fix(m.File) + } + for _, f := range p.Function { + f.Filename = fix(f.Filename) + } + + return p +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/README.md b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/README.md new file mode 100644 index 00000000000..2b60fcca6c7 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/README.md @@ -0,0 +1,10 @@ +sample/ contains a sample program that can be profiled. +sample.bin is its x86-64 binary. +sample.cpu is a profile generated by sample.bin. + +To update the binary and profile: + +```shell +go build -o sample.bin ./sample +./sample.bin -cpuprofile sample.cpu +``` diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample.bin b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample.bin new file mode 100755 index 00000000000..25929e64601 Binary files /dev/null and b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample.bin differ diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample.cpu b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample.cpu new file mode 100644 index 00000000000..50eea72ea2d Binary files /dev/null and b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample.cpu differ diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample/sample.go b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample/sample.go new file mode 100644 index 00000000000..3c812dd5fd0 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/sample/sample.go @@ -0,0 +1,41 @@ +// sample program that is used to produce some of the files in +// pprof/internal/report/testdata. +package main + +import ( + "flag" + "fmt" + "log" + "math" + "os" + "runtime/pprof" +) + +var cpuProfile = flag.String("cpuprofile", "", "where to write cpu profile") + +func main() { + flag.Parse() + f, err := os.Create(*cpuProfile) + if err != nil { + log.Fatal("could not create CPU profile: ", err) + } + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal("could not start CPU profile: ", err) + } + defer pprof.StopCPUProfile() + busyLoop() +} + +func busyLoop() { + m := make(map[int]int) + for i := 0; i < 1000000; i++ { + m[i] = i + 10 + } + var sum float64 + for i := 0; i < 100; i++ { + for _, v := range m { + sum += math.Abs(float64(v)) + } + } + fmt.Println("Sum", sum) +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source.dot b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source.dot index 19746a4bafd..b67ca168c5c 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source.dot +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/testdata/source.dot @@ -1,13 +1,13 @@ digraph "unnamed" { node [style=filled fillcolor="#f8f8f8"] subgraph cluster_L { "Duration: 10s, Total samples = 11111 " [shape=box fontsize=16 label="Duration: 10s, Total samples = 11111 \lShowing nodes accounting for 11111, 100% of 11111 total\l"] } -N1 [label="tee\nsource2:8\n10000 (90.00%)" fontsize=24 shape=box tooltip="tee testdata/source2:8 (10000)" color="#b20500" fillcolor="#edd6d5"] -N2 [label="main\nsource1:2\n1 (0.009%)\nof 11111 (100%)" fontsize=9 shape=box tooltip="main testdata/source1:2 (11111)" color="#b20000" fillcolor="#edd5d5"] -N3 [label="tee\nsource2:2\n1000 (9.00%)\nof 11000 (99.00%)" fontsize=14 shape=box tooltip="tee testdata/source2:2 (11000)" color="#b20000" fillcolor="#edd5d5"] -N4 [label="tee\nsource2:8\n100 (0.9%)" fontsize=10 shape=box tooltip="tee testdata/source2:8 (100)" color="#b2b0aa" fillcolor="#edecec"] -N5 [label="bar\nsource1:10\n10 (0.09%)" fontsize=9 shape=box tooltip="bar testdata/source1:10 (10)" color="#b2b2b1" fillcolor="#ededed"] -N6 [label="bar\nsource1:10\n0 of 100 (0.9%)" fontsize=8 shape=box tooltip="bar testdata/source1:10 (100)" color="#b2b0aa" fillcolor="#edecec"] -N7 [label="foo\nsource1:4\n0 of 10 (0.09%)" fontsize=8 shape=box tooltip="foo testdata/source1:4 (10)" color="#b2b2b1" fillcolor="#ededed"] +N1 [label="tee\nsource2:8\n10000 (90.00%)" id="node1" fontsize=24 shape=box tooltip="tee testdata/source2:8 (10000)" color="#b20500" fillcolor="#edd6d5"] +N2 [label="main\nsource1:2\n1 (0.009%)\nof 11111 (100%)" id="node2" fontsize=9 shape=box tooltip="main testdata/source1:2 (11111)" color="#b20000" fillcolor="#edd5d5"] +N3 [label="tee\nsource2:2\n1000 (9.00%)\nof 11000 (99.00%)" id="node3" fontsize=14 shape=box tooltip="tee testdata/source2:2 (11000)" color="#b20000" fillcolor="#edd5d5"] +N4 [label="tee\nsource2:8\n100 (0.9%)" id="node4" fontsize=10 shape=box tooltip="tee testdata/source2:8 (100)" color="#b2b0aa" fillcolor="#edecec"] +N5 [label="bar\nsource1:10\n10 (0.09%)" id="node5" fontsize=9 shape=box tooltip="bar testdata/source1:10 (10)" color="#b2b2b1" fillcolor="#ededed"] +N6 [label="bar\nsource1:10\n0 of 100 (0.9%)" id="node6" fontsize=8 shape=box tooltip="bar testdata/source1:10 (100)" color="#b2b0aa" fillcolor="#edecec"] +N7 [label="foo\nsource1:4\n0 of 10 (0.09%)" id="node7" fontsize=8 shape=box tooltip="foo testdata/source1:4 (10)" color="#b2b2b1" fillcolor="#ededed"] N2 -> N3 [label=" 11000" weight=100 penwidth=5 color="#b20000" tooltip="main testdata/source1:2 -> tee testdata/source2:2 (11000)" labeltooltip="main testdata/source1:2 -> tee testdata/source2:2 (11000)"] N3 -> N1 [label=" 10000" weight=91 penwidth=5 color="#b20500" tooltip="tee testdata/source2:2 -> tee testdata/source2:8 (10000)" labeltooltip="tee testdata/source2:2 -> tee testdata/source2:8 (10000)"] N6 -> N4 [label=" 100" color="#b2b0aa" tooltip="bar testdata/source1:10 -> tee testdata/source2:8 (100)" labeltooltip="bar testdata/source1:10 -> tee testdata/source2:8 (100)"] diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go index 2c1c729ddfe..727b7e8c56c 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go @@ -18,6 +18,7 @@ package symbolizer import ( + "crypto/tls" "fmt" "io/ioutil" "net/http" @@ -41,21 +42,26 @@ type Symbolizer struct { // test taps for dependency injection var symbolzSymbolize = symbolz.Symbolize var localSymbolize = doLocalSymbolize +var demangleFunction = Demangle // Symbolize attempts to symbolize profile p. First uses binutils on // local binaries; if the source is a URL it attempts to get any // missed entries using symbolz. func (s *Symbolizer) Symbolize(mode string, sources plugin.MappingSources, p *profile.Profile) error { - remote, local, force, demanglerMode := true, true, false, "" + remote, local, fast, force, demanglerMode := true, true, false, false, "" for _, o := range strings.Split(strings.ToLower(mode), ":") { switch o { + case "": + continue case "none", "no": return nil - case "local", "fastlocal": + case "local": remote, local = false, true + case "fastlocal": + remote, local, fast = false, true, true case "remote": remote, local = true, false - case "", "force": + case "force": force = true default: switch d := strings.TrimPrefix(o, "demangle="); d { @@ -74,29 +80,48 @@ func (s *Symbolizer) Symbolize(mode string, sources plugin.MappingSources, p *pr var err error if local { // Symbolize locally using binutils. - if err = localSymbolize(mode, p, s.Obj, s.UI); err != nil { + if err = localSymbolize(p, fast, force, s.Obj, s.UI); err != nil { s.UI.PrintErr("local symbolization: " + err.Error()) } } if remote { - if err = symbolzSymbolize(sources, postURL, p, s.UI); err != nil { + if err = symbolzSymbolize(p, force, sources, postURL, s.UI); err != nil { return err // Ran out of options. } } - Demangle(p, force, demanglerMode) + demangleFunction(p, force, demanglerMode) return nil } // postURL issues a POST to a URL over HTTP. func postURL(source, post string) ([]byte, error) { - resp, err := http.Post(source, "application/octet-stream", strings.NewReader(post)) + url, err := url.Parse(source) + if err != nil { + return nil, err + } + + var tlsConfig *tls.Config + if url.Scheme == "https+insecure" { + tlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + url.Scheme = "https" + source = url.String() + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } + resp, err := client.Post(source, "application/octet-stream", strings.NewReader(post)) if err != nil { return nil, fmt.Errorf("http post %s: %v", source, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, statusCodeError(resp) + return nil, fmt.Errorf("http post %s: %v", source, statusCodeError(resp)) } return ioutil.ReadAll(resp.Body) } @@ -114,18 +139,10 @@ func statusCodeError(resp *http.Response) error { // doLocalSymbolize adds symbol and line number information to all locations // in a profile. mode enables some options to control // symbolization. -func doLocalSymbolize(mode string, prof *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error { - force := false - // Disable some mechanisms based on mode string. - for _, o := range strings.Split(strings.ToLower(mode), ":") { - switch { - case o == "force": - force = true - case o == "fastlocal": - if bu, ok := obj.(*binutils.Binutils); ok { - bu.SetFastSymbolization(true) - } - default: +func doLocalSymbolize(prof *profile.Profile, fast, force bool, obj plugin.ObjTool, ui plugin.UI) error { + if fast { + if bu, ok := obj.(*binutils.Binutils); ok { + bu.SetFastSymbolization(true) } } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer_test.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer_test.go index 66cad3eaa10..956519d37ca 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer_test.go @@ -17,6 +17,7 @@ package symbolizer import ( "fmt" "regexp" + "sort" "strings" "testing" @@ -101,9 +102,11 @@ func TestSymbolization(t *testing.T) { defer func() { symbolzSymbolize = sSym localSymbolize = lSym + demangleFunction = Demangle }() symbolzSymbolize = symbolzMock localSymbolize = localMock + demangleFunction = demangleMock type testcase struct { mode string @@ -117,19 +120,35 @@ func TestSymbolization(t *testing.T) { for i, tc := range []testcase{ { "local", - "local=local", + "local=[]", }, { "fastlocal", - "local=fastlocal", + "local=[fast]", }, { "remote", - "symbolz", + "symbolz=[]", }, { "", - "local=:symbolz", + "local=[]:symbolz=[]", + }, + { + "demangle=none", + "demangle=[none]:force:local=[force]:symbolz=[force]", + }, + { + "remote:demangle=full", + "demangle=[full]:force:symbolz=[force]", + }, + { + "local:demangle=templates", + "demangle=[templates]:force:local=[force]", + }, + { + "force:remote", + "force:symbolz=[force]", }, } { prof := testProfile.Copy() @@ -137,23 +156,44 @@ func TestSymbolization(t *testing.T) { t.Errorf("symbolize #%d: %v", i, err) continue } + sort.Strings(prof.Comments) if got, want := strings.Join(prof.Comments, ":"), tc.wantComment; got != want { - t.Errorf("got %s, want %s", got, want) + t.Errorf("%q: got %s, want %s", tc.mode, got, want) continue } } } -func symbolzMock(sources plugin.MappingSources, syms func(string, string) ([]byte, error), p *profile.Profile, ui plugin.UI) error { - p.Comments = append(p.Comments, "symbolz") +func symbolzMock(p *profile.Profile, force bool, sources plugin.MappingSources, syms func(string, string) ([]byte, error), ui plugin.UI) error { + var args []string + if force { + args = append(args, "force") + } + p.Comments = append(p.Comments, "symbolz=["+strings.Join(args, ",")+"]") return nil } -func localMock(mode string, p *profile.Profile, obj plugin.ObjTool, ui plugin.UI) error { - p.Comments = append(p.Comments, "local="+mode) +func localMock(p *profile.Profile, fast, force bool, obj plugin.ObjTool, ui plugin.UI) error { + var args []string + if fast { + args = append(args, "fast") + } + if force { + args = append(args, "force") + } + p.Comments = append(p.Comments, "local=["+strings.Join(args, ",")+"]") return nil } +func demangleMock(p *profile.Profile, force bool, mode string) { + if force { + p.Comments = append(p.Comments, "force") + } + if mode != "" { + p.Comments = append(p.Comments, "demangle=["+mode+"]") + } +} + func TestLocalSymbolization(t *testing.T) { prof := testProfile.Copy() @@ -165,7 +205,7 @@ func TestLocalSymbolization(t *testing.T) { } b := mockObjTool{} - if err := localSymbolize("", prof, b, &proftest.TestUI{T: t}); err != nil { + if err := localSymbolize(prof, false, false, b, &proftest.TestUI{T: t}); err != nil { t.Fatalf("localSymbolize(): %v", err) } @@ -207,11 +247,11 @@ func checkSymbolizedLocation(a uint64, got []profile.Line) error { } var mockAddresses = map[uint64][]plugin.Frame{ - 1000: []plugin.Frame{frame("fun11", "file11.src", 10)}, - 2000: []plugin.Frame{frame("fun21", "file21.src", 20), frame("fun22", "file22.src", 20)}, - 3000: []plugin.Frame{frame("fun31", "file31.src", 30), frame("fun32", "file32.src", 30), frame("fun33", "file33.src", 30)}, - 4000: []plugin.Frame{frame("fun41", "file41.src", 40), frame("fun42", "file42.src", 40), frame("fun43", "file43.src", 40), frame("fun44", "file44.src", 40)}, - 5000: []plugin.Frame{frame("fun51", "file51.src", 50), frame("fun52", "file52.src", 50), frame("fun53", "file53.src", 50), frame("fun54", "file54.src", 50), frame("fun55", "file55.src", 50)}, + 1000: {frame("fun11", "file11.src", 10)}, + 2000: {frame("fun21", "file21.src", 20), frame("fun22", "file22.src", 20)}, + 3000: {frame("fun31", "file31.src", 30), frame("fun32", "file32.src", 30), frame("fun33", "file33.src", 30)}, + 4000: {frame("fun41", "file41.src", 40), frame("fun42", "file42.src", 40), frame("fun43", "file43.src", 40), frame("fun44", "file44.src", 40)}, + 5000: {frame("fun51", "file51.src", 50), frame("fun52", "file52.src", 50), frame("fun53", "file53.src", 50), frame("fun54", "file54.src", 50), frame("fun55", "file55.src", 50)}, } func frame(fname, file string, line int) plugin.Frame { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go index e84765bb201..34c119c4c2f 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz.go @@ -36,12 +36,13 @@ var ( // Symbolize symbolizes profile p by parsing data returned by a // symbolz handler. syms receives the symbolz query (hex addresses -// separated by '+') and returns the symbolz output in a string. It -// symbolizes all locations based on their addresses, regardless of -// mapping. -func Symbolize(sources plugin.MappingSources, syms func(string, string) ([]byte, error), p *profile.Profile, ui plugin.UI) error { +// separated by '+') and returns the symbolz output in a string. If +// force is false, it will only symbolize locations from mappings +// not already marked as HasFunctions. +func Symbolize(p *profile.Profile, force bool, sources plugin.MappingSources, syms func(string, string) ([]byte, error), ui plugin.UI) error { for _, m := range p.Mapping { - if m.HasFunctions { + if !force && m.HasFunctions { + // Only check for HasFunctions as symbolz only populates function names. continue } mappingSources := sources[m.File] @@ -65,16 +66,13 @@ func Symbolize(sources plugin.MappingSources, syms func(string, string) ([]byte, // symbolz returns the corresponding symbolz source for a profile URL. func symbolz(source string) string { if url, err := url.Parse(source); err == nil && url.Host != "" { - if strings.Contains(url.Path, "/") { - if dir := path.Dir(url.Path); dir == "/debug/pprof" { - // For Go language profile handlers in net/http/pprof package. - url.Path = "/debug/pprof/symbol" - } else { - url.Path = "/symbolz" - } - url.RawQuery = "" - return url.String() + if strings.Contains(url.Path, "/debug/pprof/") { + url.Path = path.Clean(url.Path + "/../symbol") + } else { + url.Path = "/symbolz" } + url.RawQuery = "" + return url.String() } return "" @@ -82,7 +80,7 @@ func symbolz(source string) string { // symbolizeMapping symbolizes locations belonging to a Mapping by querying // a symbolz handler. An offset is applied to all addresses to take care of -// normalization occured for merged Mappings. +// normalization occurred for merged Mappings. func symbolizeMapping(source string, offset int64, syms func(string, string) ([]byte, error), m *profile.Mapping, p *profile.Profile) error { // Construct query of addresses to symbolize. var a []string diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz_test.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz_test.go index 641b5ca6b77..270a6198c91 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz_test.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolz/symbolz_test.go @@ -26,13 +26,14 @@ import ( func TestSymbolzURL(t *testing.T) { for try, want := range map[string]string{ - "http://host:8000/profilez": "http://host:8000/symbolz", - "http://host:8000/profilez?seconds=5": "http://host:8000/symbolz", - "http://host:8000/profilez?seconds=5&format=proto": "http://host:8000/symbolz", - "http://host:8000/heapz?format=legacy": "http://host:8000/symbolz", - "http://host:8000/debug/pprof/profile": "http://host:8000/debug/pprof/symbol", - "http://host:8000/debug/pprof/profile?seconds=10": "http://host:8000/debug/pprof/symbol", - "http://host:8000/debug/pprof/heap": "http://host:8000/debug/pprof/symbol", + "http://host:8000/profilez": "http://host:8000/symbolz", + "http://host:8000/profilez?seconds=5": "http://host:8000/symbolz", + "http://host:8000/profilez?seconds=5&format=proto": "http://host:8000/symbolz", + "http://host:8000/heapz?format=legacy": "http://host:8000/symbolz", + "http://host:8000/debug/pprof/profile": "http://host:8000/debug/pprof/symbol", + "http://host:8000/debug/pprof/profile?seconds=10": "http://host:8000/debug/pprof/symbol", + "http://host:8000/debug/pprof/heap": "http://host:8000/debug/pprof/symbol", + "http://some.host:8080/some/deeper/path/debug/pprof/endpoint?param=value": "http://some.host:8080/some/deeper/path/debug/pprof/symbol", } { if got := symbolz(try); got != want { t.Errorf(`symbolz(%s)=%s, want "%s"`, try, got, want) @@ -41,12 +42,49 @@ func TestSymbolzURL(t *testing.T) { } func TestSymbolize(t *testing.T) { + s := plugin.MappingSources{ + "buildid": []struct { + Source string + Start uint64 + }{ + {Source: "http://localhost:80/profilez"}, + }, + } + + for _, hasFunctions := range []bool{false, true} { + for _, force := range []bool{false, true} { + p := testProfile(hasFunctions) + + if err := Symbolize(p, force, s, fetchSymbols, &proftest.TestUI{T: t}); err != nil { + t.Errorf("symbolz: %v", err) + continue + } + var wantSym, wantNoSym []*profile.Location + if force || !hasFunctions { + wantNoSym = p.Location[:1] + wantSym = p.Location[1:] + } else { + wantNoSym = p.Location + } + + if err := checkSymbolized(wantSym, true); err != nil { + t.Errorf("symbolz hasFns=%v force=%v: %v", hasFunctions, force, err) + } + if err := checkSymbolized(wantNoSym, false); err != nil { + t.Errorf("symbolz hasFns=%v force=%v: %v", hasFunctions, force, err) + } + } + } +} + +func testProfile(hasFunctions bool) *profile.Profile { m := []*profile.Mapping{ { - ID: 1, - Start: 0x1000, - Limit: 0x5000, - BuildID: "buildid", + ID: 1, + Start: 0x1000, + Limit: 0x5000, + BuildID: "buildid", + HasFunctions: hasFunctions, }, } p := &profile.Profile{ @@ -59,33 +97,25 @@ func TestSymbolize(t *testing.T) { Mapping: m, } - s := plugin.MappingSources{ - "buildid": []struct { - Source string - Start uint64 - }{ - {Source: "http://localhost:80/profilez"}, - }, - } + return p +} - if err := Symbolize(s, fetchSymbols, p, &proftest.TestUI{T: t}); err != nil { - t.Errorf("symbolz: %v", err) - } - - if l := p.Location[0]; len(l.Line) != 0 { - t.Errorf("unexpected symbolization for %#x: %v", l.Address, l.Line) - } - - for _, l := range p.Location[1:] { - if len(l.Line) != 1 { - t.Errorf("failed to symbolize %#x", l.Address) - continue +func checkSymbolized(locs []*profile.Location, wantSymbolized bool) error { + for _, loc := range locs { + if !wantSymbolized && len(loc.Line) != 0 { + return fmt.Errorf("unexpected symbolization for %#x: %v", loc.Address, loc.Line) } - address := l.Address - l.Mapping.Start - if got, want := l.Line[0].Function.Name, fmt.Sprintf("%#x", address); got != want { - t.Errorf("symbolz %#x, got %s, want %s", address, got, want) + if wantSymbolized { + if len(loc.Line) != 1 { + return fmt.Errorf("expected symbolization for %#x: %v", loc.Address, loc.Line) + } + address := loc.Address - loc.Mapping.Start + if got, want := loc.Line[0].Function.Name, fmt.Sprintf("%#x", address); got != want { + return fmt.Errorf("symbolz %#x, got %s, want %s", address, got, want) + } } } + return nil } func fetchSymbols(source, post string) ([]byte, error) { diff --git a/src/cmd/vendor/github.com/google/pprof/profile/encode.go b/src/cmd/vendor/github.com/google/pprof/profile/encode.go index c64083a4009..622319484a6 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/encode.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/encode.go @@ -59,12 +59,19 @@ func (p *Profile) preEncode() { } sort.Strings(numKeys) for _, k := range numKeys { + keyX := addString(strings, k) vs := s.NumLabel[k] - for _, v := range vs { + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } s.labelX = append(s.labelX, label{ - keyX: addString(strings, k), - numX: v, + keyX: keyX, + numX: v, + unitX: unitX, }, ) } @@ -289,6 +296,7 @@ func (p *Profile) postDecode() error { for _, s := range p.Sample { labels := make(map[string][]string, len(s.labelX)) numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) for _, l := range s.labelX { var key, value string key, err = getString(p.stringTable, &l.keyX, err) @@ -296,6 +304,14 @@ func (p *Profile) postDecode() error { value, err = getString(p.stringTable, &l.strX, err) labels[key] = append(labels[key], value) } else if l.numX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } numLabels[key] = append(numLabels[key], l.numX) } } @@ -304,6 +320,12 @@ func (p *Profile) postDecode() error { } if len(numLabels) > 0 { s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits } s.Location = make([]*Location, len(s.locationIDX)) for i, lid := range s.locationIDX { @@ -340,6 +362,15 @@ func (p *Profile) postDecode() error { return err } +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + func (p *ValueType) decoder() []decoder { return valueTypeDecoder } @@ -392,6 +423,7 @@ func (p label) encode(b *buffer) { encodeInt64Opt(b, 1, p.keyX) encodeInt64Opt(b, 2, p.strX) encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) } var labelDecoder = []decoder{ @@ -402,6 +434,8 @@ var labelDecoder = []decoder{ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, // optional int64 num = 3 func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, } func (p *Mapping) decoder() []decoder { diff --git a/src/cmd/vendor/github.com/google/pprof/profile/filter.go b/src/cmd/vendor/github.com/google/pprof/profile/filter.go index 85361e87995..f857fdf8f86 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/filter.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/filter.go @@ -41,10 +41,11 @@ func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) } } if show != nil { - hnm = true l.Line = l.matchedLines(show) if len(l.Line) == 0 { hidden[l.ID] = true + } else { + hnm = true } } } diff --git a/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 7b40f5d24c6..06322e5d2c8 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -212,7 +212,10 @@ func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*L switch pType { case "heap": const javaHeapzSamplingRate = 524288 // 512K - s.NumLabel = map[string][]int64{"bytes": []int64{s.Value[1] / s.Value[0]}} + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) case "contention": if period := p.Period; period != 0 { diff --git a/src/cmd/vendor/github.com/google/pprof/profile/merge.go b/src/cmd/vendor/github.com/google/pprof/profile/merge.go index 2e9c2cd8af8..e00829cc343 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/merge.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/merge.go @@ -85,6 +85,41 @@ func Merge(srcs []*Profile) (*Profile, error) { return p, nil } +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + func isZeroSample(s *Sample) bool { for _, v := range s.Value { if v != 0 { @@ -120,6 +155,7 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample { Value: make([]int64, len(src.Value)), Label: make(map[string][]string, len(src.Label)), NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), } for i, l := range src.Location { s.Location[i] = pm.mapLocation(l) @@ -130,9 +166,13 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample { s.Label[k] = vv } for k, v := range src.NumLabel { + u := src.NumUnit[k] vv := make([]int64, len(v)) + uu := make([]string, len(u)) copy(vv, v) + copy(uu, u) s.NumLabel[k] = vv + s.NumUnit[k] = uu } // Check memoization table. Must be done on the remapped location to // account for the remapped mapping. Add current values to the @@ -165,7 +205,7 @@ func (sample *Sample) key() sampleKey { numlabels := make([]string, 0, len(sample.NumLabel)) for k, v := range sample.NumLabel { - numlabels = append(numlabels, fmt.Sprintf("%q%x", k, v)) + numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) } sort.Strings(numlabels) @@ -432,7 +472,6 @@ func (p *Profile) compatible(pb *Profile) error { return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) } } - return nil } diff --git a/src/cmd/vendor/github.com/google/pprof/profile/profile.go b/src/cmd/vendor/github.com/google/pprof/profile/profile.go index fb3d4fd4fbc..a0f53efe3ec 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/profile.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/profile.go @@ -26,6 +26,7 @@ import ( "regexp" "sort" "strings" + "sync" "time" ) @@ -47,6 +48,10 @@ type Profile struct { PeriodType *ValueType Period int64 + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + commentX []int64 dropFramesX int64 keepFramesX int64 @@ -69,6 +74,7 @@ type Sample struct { Value []int64 Label map[string][]string NumLabel map[string][]int64 + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -80,6 +86,8 @@ type label struct { // Exactly one of the two following values must be set strX int64 numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 } // Mapping corresponds to Profile.Mapping @@ -296,21 +304,25 @@ func (p *Profile) updateLocationMapping(from, to *Mapping) { } } -// Write writes the profile as a gzip-compressed marshaled protobuf. -func (p *Profile) Write(w io.Writer) error { +func serialize(p *Profile) []byte { + p.encodeMu.Lock() p.preEncode() b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { zw := gzip.NewWriter(w) defer zw.Close() - _, err := zw.Write(b) + _, err := zw.Write(serialize(p)) return err } // WriteUncompressed writes the profile as a marshaled protobuf. func (p *Profile) WriteUncompressed(w io.Writer) error { - p.preEncode() - b := marshal(p) - _, err := w.Write(b) + _, err := w.Write(serialize(p)) return err } @@ -325,8 +337,11 @@ func (p *Profile) CheckValid() error { return fmt.Errorf("missing sample type information") } for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } if len(s.Value) != sampleLen { - return fmt.Errorf("mismatch: sample has: %d values vs. %d types", len(s.Value), len(p.SampleType)) + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) } for _, l := range s.Location { if l == nil { @@ -339,6 +354,9 @@ func (p *Profile) CheckValid() error { // Check that there are no duplicate ids mappings := make(map[uint64]*Mapping, len(p.Mapping)) for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } if m.ID == 0 { return fmt.Errorf("found mapping with reserved ID=0") } @@ -349,6 +367,9 @@ func (p *Profile) CheckValid() error { } functions := make(map[uint64]*Function, len(p.Function)) for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } if f.ID == 0 { return fmt.Errorf("found function with reserved ID=0") } @@ -359,6 +380,9 @@ func (p *Profile) CheckValid() error { } locations := make(map[uint64]*Location, len(p.Location)) for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } if l.ID == 0 { return fmt.Errorf("found location with reserved id=0") } @@ -426,6 +450,70 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address return p.CheckValid() } +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + // String dumps a text representation of a profile. Intended mainly // for debugging purposes. func (p *Profile) String() string { @@ -455,87 +543,132 @@ func (p *Profile) String() string { } ss = append(ss, strings.TrimSpace(sh1)) for _, s := range p.Sample { - var sv string - for _, v := range s.Value { - sv = fmt.Sprintf("%s %10d", sv, v) - } - sv = sv + ": " - for _, l := range s.Location { - sv = sv + fmt.Sprintf("%d ", l.ID) - } - ss = append(ss, sv) - const labelHeader = " " - if len(s.Label) > 0 { - ls := []string{} - for k, v := range s.Label { - ls = append(ls, fmt.Sprintf("%s:%v", k, v)) - } - sort.Strings(ls) - ss = append(ss, labelHeader+strings.Join(ls, " ")) - } - if len(s.NumLabel) > 0 { - ls := []string{} - for k, v := range s.NumLabel { - ls = append(ls, fmt.Sprintf("%s:%v", k, v)) - } - sort.Strings(ls) - ss = append(ss, labelHeader+strings.Join(ls, " ")) - } + ss = append(ss, s.string()) } ss = append(ss, "Locations") for _, l := range p.Location { - locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) - if m := l.Mapping; m != nil { - locStr = locStr + fmt.Sprintf("M=%d ", m.ID) - } - if len(l.Line) == 0 { - ss = append(ss, locStr) - } - for li := range l.Line { - lnStr := "??" - if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", - fn.Name, - fn.Filename, - l.Line[li].Line, - fn.StartLine) - if fn.Name != fn.SystemName { - lnStr = lnStr + "(" + fn.SystemName + ")" - } - } - ss = append(ss, locStr+lnStr) - // Do not print location details past the first line - locStr = " " - } + ss = append(ss, l.string()) } ss = append(ss, "Mappings") for _, m := range p.Mapping { - bits := "" - if m.HasFunctions { - bits = bits + "[FN]" - } - if m.HasFilenames { - bits = bits + "[FL]" - } - if m.HasLineNumbers { - bits = bits + "[LN]" - } - if m.HasInlineFrames { - bits = bits + "[IN]" - } - ss = append(ss, fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", - m.ID, - m.Start, m.Limit, m.Offset, - m.File, - m.BuildID, - bits)) + ss = append(ss, m.string()) } return strings.Join(ss, "\n") + "\n" } +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLablesToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + // Scale multiplies all sample values in a profile by a constant. func (p *Profile) Scale(ratio float64) { if ratio == 1 { @@ -596,19 +729,17 @@ func (p *Profile) HasFileLines() bool { } // Unsymbolizable returns true if a mapping points to a binary for which -// locations can't be symbolized in principle, at least now. +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. func (m *Mapping) Unsymbolizable() bool { name := filepath.Base(m.File) - return name == "[vdso]" || strings.HasPrefix(name, "linux-vdso") || name == "[heap]" || strings.HasPrefix(m.File, "/dev/dri/") + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") } // Copy makes a fully independent copy of a profile. func (p *Profile) Copy() *Profile { - p.preEncode() - b := marshal(p) - pp := &Profile{} - if err := unmarshal(b, pp); err != nil { + if err := unmarshal(serialize(p), pp); err != nil { panic(err) } if err := pp.postDecode(); err != nil { diff --git a/src/cmd/vendor/github.com/google/pprof/profile/profile_test.go b/src/cmd/vendor/github.com/google/pprof/profile/profile_test.go index c2319a65002..bc2ab8bdd10 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/profile_test.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/profile_test.go @@ -19,8 +19,10 @@ import ( "fmt" "io/ioutil" "path/filepath" + "reflect" "regexp" "strings" + "sync" "testing" "github.com/google/pprof/internal/proftest" @@ -91,7 +93,6 @@ func TestParse(t *testing.T) { } func TestParseError(t *testing.T) { - testcases := []string{ "", "garbage text", @@ -107,6 +108,63 @@ func TestParseError(t *testing.T) { } } +func TestCheckValid(t *testing.T) { + const path = "testdata/java.cpu" + + inbytes, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("failed to read profile file %q: %v", path, err) + } + p, err := Parse(bytes.NewBuffer(inbytes)) + if err != nil { + t.Fatalf("failed to parse profile %q: %s", path, err) + } + + for _, tc := range []struct { + mutateFn func(*Profile) + wantErr string + }{ + { + mutateFn: func(p *Profile) { p.SampleType = nil }, + wantErr: "missing sample type information", + }, + { + mutateFn: func(p *Profile) { p.Sample[0] = nil }, + wantErr: "profile has nil sample", + }, + { + mutateFn: func(p *Profile) { p.Sample[0].Value = append(p.Sample[0].Value, 0) }, + wantErr: "sample has 3 values vs. 2 types", + }, + { + mutateFn: func(p *Profile) { p.Sample[0].Location[0] = nil }, + wantErr: "sample has nil location", + }, + { + mutateFn: func(p *Profile) { p.Location[0] = nil }, + wantErr: "profile has nil location", + }, + { + mutateFn: func(p *Profile) { p.Mapping = append(p.Mapping, nil) }, + wantErr: "profile has nil mapping", + }, + { + mutateFn: func(p *Profile) { p.Function[0] = nil }, + wantErr: "profile has nil function", + }, + } { + t.Run(tc.wantErr, func(t *testing.T) { + p := p.Copy() + tc.mutateFn(p) + if err := p.CheckValid(); err == nil { + t.Errorf("CheckValid(): got no error, want error %q", tc.wantErr) + } else if !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("CheckValid(): got error %v, want error %q", err, tc.wantErr) + } + }) + } +} + // leaveTempfile leaves |b| in a temporary file on disk and returns the // temp filename. This is useful to recover a profile when the test // fails. @@ -217,7 +275,7 @@ var cpuL = []*Location{ }, } -var testProfile = &Profile{ +var testProfile1 = &Profile{ PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, Period: 1, DurationNanos: 10e9, @@ -230,40 +288,181 @@ var testProfile = &Profile{ Location: []*Location{cpuL[0]}, Value: []int64{1000, 1000}, Label: map[string][]string{ - "key1": []string{"tag1"}, - "key2": []string{"tag1"}, + "key1": {"tag1"}, + "key2": {"tag1"}, }, }, { Location: []*Location{cpuL[1], cpuL[0]}, Value: []int64{100, 100}, Label: map[string][]string{ - "key1": []string{"tag2"}, - "key3": []string{"tag2"}, + "key1": {"tag2"}, + "key3": {"tag2"}, }, }, { Location: []*Location{cpuL[2], cpuL[0]}, Value: []int64{10, 10}, Label: map[string][]string{ - "key1": []string{"tag3"}, - "key2": []string{"tag2"}, + "key1": {"tag3"}, + "key2": {"tag2"}, }, }, { Location: []*Location{cpuL[3], cpuL[0]}, Value: []int64{10000, 10000}, Label: map[string][]string{ - "key1": []string{"tag4"}, - "key2": []string{"tag1"}, + "key1": {"tag4"}, + "key2": {"tag1"}, }, }, { Location: []*Location{cpuL[4], cpuL[0]}, Value: []int64{1, 1}, Label: map[string][]string{ - "key1": []string{"tag4"}, - "key2": []string{"tag1"}, + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile2 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{70, 1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[1], cpuL[0]}, + Value: []int64{60, 100}, + Label: map[string][]string{ + "key1": {"tag2"}, + "key3": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[2], cpuL[0]}, + Value: []int64{50, 10}, + Label: map[string][]string{ + "key1": {"tag3"}, + "key2": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[3], cpuL[0]}, + Value: []int64{40, 10000}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[4], cpuL[0]}, + Value: []int64{1, 1}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile3 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile4 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {10}, + "key2": {30}, + }, + NumUnit: map[string][]string{ + "key1": {"bytes"}, + "key2": {"bytes"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile5 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {10}, + "key2": {30}, + }, + NumUnit: map[string][]string{ + "key1": {"bytes"}, + "key2": {"bytes"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {10}, + "key2": {30}, + }, + NumUnit: map[string][]string{ + "key1": {"kilobytes"}, + "key2": {"kilobytes"}, }, }, }, @@ -273,10 +472,10 @@ var testProfile = &Profile{ } var aggTests = map[string]aggTest{ - "precise": aggTest{true, true, true, true, 5}, - "fileline": aggTest{false, true, true, true, 4}, - "inline_function": aggTest{false, true, false, true, 3}, - "function": aggTest{false, true, false, false, 2}, + "precise": {true, true, true, true, 5}, + "fileline": {false, true, true, true, 4}, + "inline_function": {false, true, false, true, 3}, + "function": {false, true, false, false, 2}, } type aggTest struct { @@ -287,7 +486,7 @@ type aggTest struct { const totalSamples = int64(11111) func TestAggregation(t *testing.T) { - prof := testProfile.Copy() + prof := testProfile1.Copy() for _, resolution := range []string{"precise", "fileline", "inline_function", "function"} { a := aggTests[resolution] if !a.precise { @@ -362,7 +561,7 @@ func checkAggregation(prof *Profile, a *aggTest) error { // Test merge leaves the main binary in place. func TestMergeMain(t *testing.T) { - prof := testProfile.Copy() + prof := testProfile1.Copy() p1, err := Merge([]*Profile{prof}) if err != nil { t.Fatalf("merge error: %v", err) @@ -377,7 +576,7 @@ func TestMerge(t *testing.T) { // -2. Should end up with an empty profile (all samples for a // location should add up to 0). - prof := testProfile.Copy() + prof := testProfile1.Copy() p1, err := Merge([]*Profile{prof, prof}) if err != nil { t.Errorf("merge error: %v", err) @@ -409,7 +608,7 @@ func TestMergeAll(t *testing.T) { // Aggregate 10 copies of the profile. profs := make([]*Profile, 10) for i := 0; i < 10; i++ { - profs[i] = testProfile.Copy() + profs[i] = testProfile1.Copy() } prof, err := Merge(profs) if err != nil { @@ -420,7 +619,7 @@ func TestMergeAll(t *testing.T) { tb := locationHash(s) samples[tb] = samples[tb] + s.Value[0] } - for _, s := range testProfile.Sample { + for _, s := range testProfile1.Sample { tb := locationHash(s) if samples[tb] != s.Value[0]*10 { t.Errorf("merge got wrong value at %s : %d instead of %d", tb, samples[tb], s.Value[0]*10) @@ -428,6 +627,140 @@ func TestMergeAll(t *testing.T) { } } +func TestNumLabelMerge(t *testing.T) { + for _, tc := range []struct { + name string + profs []*Profile + wantNumLabels []map[string][]int64 + wantNumUnits []map[string][]string + }{ + { + name: "different tag units not merged", + profs: []*Profile{testProfile4.Copy(), testProfile5.Copy()}, + wantNumLabels: []map[string][]int64{ + { + "key1": {10}, + "key2": {30}, + }, + { + "key1": {10}, + "key2": {30}, + }, + }, + wantNumUnits: []map[string][]string{ + { + "key1": {"bytes"}, + "key2": {"bytes"}, + }, + { + "key1": {"kilobytes"}, + "key2": {"kilobytes"}, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + prof, err := Merge(tc.profs) + if err != nil { + t.Errorf("merge error: %v", err) + } + + if want, got := len(tc.wantNumLabels), len(prof.Sample); want != got { + t.Fatalf("got %d samples, want %d samples", got, want) + } + for i, wantLabels := range tc.wantNumLabels { + numLabels := prof.Sample[i].NumLabel + if !reflect.DeepEqual(wantLabels, numLabels) { + t.Errorf("got numeric labels %v, want %v", numLabels, wantLabels) + } + + wantUnits := tc.wantNumUnits[i] + numUnits := prof.Sample[i].NumUnit + if !reflect.DeepEqual(wantUnits, numUnits) { + t.Errorf("got numeric labels %v, want %v", numUnits, wantUnits) + } + } + }) + } +} + +func TestNormalizeBySameProfile(t *testing.T) { + pb := testProfile1.Copy() + p := testProfile1.Copy() + + if err := p.Normalize(pb); err != nil { + t.Fatal(err) + } + + for i, s := range p.Sample { + for j, v := range s.Value { + expectedSampleValue := testProfile1.Sample[i].Value[j] + if v != expectedSampleValue { + t.Errorf("For sample %d, value %d want %d got %d", i, j, expectedSampleValue, v) + } + } + } +} + +func TestNormalizeByDifferentProfile(t *testing.T) { + p := testProfile1.Copy() + pb := testProfile2.Copy() + + if err := p.Normalize(pb); err != nil { + t.Fatal(err) + } + + expectedSampleValues := [][]int64{ + {19, 1000}, + {1, 100}, + {0, 10}, + {198, 10000}, + {0, 1}, + } + + for i, s := range p.Sample { + for j, v := range s.Value { + if v != expectedSampleValues[i][j] { + t.Errorf("For sample %d, value %d want %d got %d", i, j, expectedSampleValues[i][j], v) + } + } + } +} + +func TestNormalizeByMultipleOfSameProfile(t *testing.T) { + pb := testProfile1.Copy() + for i, s := range pb.Sample { + for j, v := range s.Value { + pb.Sample[i].Value[j] = 10 * v + } + } + + p := testProfile1.Copy() + + err := p.Normalize(pb) + if err != nil { + t.Fatal(err) + } + + for i, s := range p.Sample { + for j, v := range s.Value { + expectedSampleValue := 10 * testProfile1.Sample[i].Value[j] + if v != expectedSampleValue { + t.Errorf("For sample %d, value %d, want %d got %d", i, j, expectedSampleValue, v) + } + } + } +} + +func TestNormalizeIncompatibleProfiles(t *testing.T) { + p := testProfile1.Copy() + pb := testProfile3.Copy() + + if err := p.Normalize(pb); err == nil { + t.Errorf("Expected an error") + } +} + func TestFilter(t *testing.T) { // Perform several forms of filtering on the test profile. @@ -437,12 +770,33 @@ func TestFilter(t *testing.T) { } for tx, tc := range []filterTestcase{ - {nil, nil, nil, nil, true, false, false, false}, - {regexp.MustCompile("notfound"), nil, nil, nil, false, false, false, false}, - {nil, regexp.MustCompile("foo.c"), nil, nil, true, true, false, false}, - {nil, nil, regexp.MustCompile("lib.so"), nil, true, false, true, false}, + { + fm: true, // nil focus matches every sample + }, + { + focus: regexp.MustCompile("notfound"), + }, + { + ignore: regexp.MustCompile("foo.c"), + fm: true, + im: true, + }, + { + hide: regexp.MustCompile("lib.so"), + fm: true, + hm: true, + }, + { + show: regexp.MustCompile("foo.c"), + fm: true, + hnm: true, + }, + { + show: regexp.MustCompile("notfound"), + fm: true, + }, } { - prof := *testProfile.Copy() + prof := *testProfile1.Copy() gf, gi, gh, gnh := prof.FilterSamplesByName(tc.focus, tc.ignore, tc.hide, tc.show) if gf != tc.fm { t.Errorf("Filter #%d, got fm=%v, want %v", tx, gf, tc.fm) @@ -488,7 +842,7 @@ func TestTagFilter(t *testing.T) { {regexp.MustCompile("key1"), nil, true, false, 1}, {nil, regexp.MustCompile("key[12]"), true, true, 1}, } { - prof := testProfile.Copy() + prof := testProfile1.Copy() gim, gem := prof.FilterTagsByName(tc.include, tc.exclude) if gim != tc.im { t.Errorf("Filter #%d, got include match=%v, want %v", tx, gim, tc.im) @@ -513,9 +867,152 @@ func locationHash(s *Sample) string { return tb } -func TestSetMain(t *testing.T) { - testProfile.massageMappings() - if testProfile.Mapping[0].File != mainBinary { - t.Errorf("got %s for main", testProfile.Mapping[0].File) +func TestNumLabelUnits(t *testing.T) { + var tagFilterTests = []struct { + desc string + tagVals []map[string][]int64 + tagUnits []map[string][]string + wantUnits map[string]string + wantIgnoredUnits map[string][]string + }{ + { + "One sample, multiple keys, different specified units", + []map[string][]int64{{"key1": {131072}, "key2": {128}}}, + []map[string][]string{{"key1": {"bytes"}, "key2": {"kilobytes"}}}, + map[string]string{"key1": "bytes", "key2": "kilobytes"}, + map[string][]string{}, + }, + { + "One sample, one key with one value, unit specified", + []map[string][]int64{{"key1": {8}}}, + []map[string][]string{{"key1": {"bytes"}}}, + map[string]string{"key1": "bytes"}, + map[string][]string{}, + }, + { + "One sample, one key with one value, empty unit specified", + []map[string][]int64{{"key1": {8}}}, + []map[string][]string{{"key1": {""}}}, + map[string]string{"key1": "key1"}, + map[string][]string{}, + }, + { + "Key bytes, unit not specified", + []map[string][]int64{{"bytes": {8}}}, + []map[string][]string{nil}, + map[string]string{"bytes": "bytes"}, + map[string][]string{}, + }, + { + "One sample, one key with one value, unit not specified", + []map[string][]int64{{"kilobytes": {8}}}, + []map[string][]string{nil}, + map[string]string{"kilobytes": "kilobytes"}, + map[string][]string{}, + }, + { + "Key request, unit not specified", + []map[string][]int64{{"request": {8}}}, + []map[string][]string{nil}, + map[string]string{"request": "bytes"}, + map[string][]string{}, + }, + { + "Key alignment, unit not specified", + []map[string][]int64{{"alignment": {8}}}, + []map[string][]string{nil}, + map[string]string{"alignment": "bytes"}, + map[string][]string{}, + }, + { + "One sample, one key with multiple values and two different units", + []map[string][]int64{{"key1": {8, 8}}}, + []map[string][]string{{"key1": {"bytes", "kilobytes"}}}, + map[string]string{"key1": "bytes"}, + map[string][]string{"key1": {"kilobytes"}}, + }, + { + "One sample, one key with multiple values and three different units", + []map[string][]int64{{"key1": {8, 8}}}, + []map[string][]string{{"key1": {"bytes", "megabytes", "kilobytes"}}}, + map[string]string{"key1": "bytes"}, + map[string][]string{"key1": {"kilobytes", "megabytes"}}, + }, + { + "Two samples, one key, different units specified", + []map[string][]int64{{"key1": {8}}, {"key1": {8}}}, + []map[string][]string{{"key1": {"bytes"}}, {"key1": {"kilobytes"}}}, + map[string]string{"key1": "bytes"}, + map[string][]string{"key1": {"kilobytes"}}, + }, + { + "Keys alignment, request, and bytes have units specified", + []map[string][]int64{{ + "alignment": {8}, + "request": {8}, + "bytes": {8}, + }}, + []map[string][]string{{ + "alignment": {"seconds"}, + "request": {"minutes"}, + "bytes": {"hours"}, + }}, + map[string]string{ + "alignment": "seconds", + "request": "minutes", + "bytes": "hours", + }, + map[string][]string{}, + }, + } + for _, test := range tagFilterTests { + p := &Profile{Sample: make([]*Sample, len(test.tagVals))} + for i, numLabel := range test.tagVals { + s := Sample{ + NumLabel: numLabel, + NumUnit: test.tagUnits[i], + } + p.Sample[i] = &s + } + units, ignoredUnits := p.NumLabelUnits() + if !reflect.DeepEqual(test.wantUnits, units) { + t.Errorf("%s: got %v units, want %v", test.desc, units, test.wantUnits) + } + if !reflect.DeepEqual(test.wantIgnoredUnits, ignoredUnits) { + t.Errorf("%s: got %v ignored units, want %v", test.desc, ignoredUnits, test.wantIgnoredUnits) + } } } + +func TestSetMain(t *testing.T) { + testProfile1.massageMappings() + if testProfile1.Mapping[0].File != mainBinary { + t.Errorf("got %s for main", testProfile1.Mapping[0].File) + } +} + +// parallel runs n copies of fn in parallel. +func parallel(n int, fn func()) { + var wg sync.WaitGroup + wg.Add(n) + for i := 0; i < n; i++ { + go func() { + fn() + wg.Done() + }() + } + wg.Wait() +} + +func TestThreadSafety(t *testing.T) { + src := testProfile1.Copy() + parallel(4, func() { src.Copy() }) + parallel(4, func() { + var b bytes.Buffer + src.WriteUncompressed(&b) + }) + parallel(4, func() { + var b bytes.Buffer + src.Write(&b) + }) +} diff --git a/src/cmd/vendor/github.com/google/pprof/profile/proto.go b/src/cmd/vendor/github.com/google/pprof/profile/proto.go index 01b7f7ae181..e7df33ac2b8 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/proto.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/proto.go @@ -71,7 +71,7 @@ func encodeLength(b *buffer, tag int, len int) { func encodeUint64(b *buffer, tag int, x uint64) { // append varint to b.data - encodeVarint(b, uint64(tag)<<3|0) + encodeVarint(b, uint64(tag)<<3) encodeVarint(b, x) } @@ -145,13 +145,6 @@ func encodeStrings(b *buffer, tag int, x []string) { } } -func encodeStringOpt(b *buffer, tag int, x string) { - if x == "" { - return - } - encodeString(b, tag, x) -} - func encodeBool(b *buffer, tag int, x bool) { if x { encodeUint64(b, tag, 1) @@ -161,10 +154,9 @@ func encodeBool(b *buffer, tag int, x bool) { } func encodeBoolOpt(b *buffer, tag int, x bool) { - if x == false { - return + if x { + encodeBool(b, tag, x) } - encodeBool(b, tag, x) } func encodeMessage(b *buffer, tag int, m message) { diff --git a/src/cmd/vendor/github.com/google/pprof/profile/proto_test.go b/src/cmd/vendor/github.com/google/pprof/profile/proto_test.go index d2a351373e1..e0832294ace 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/proto_test.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/proto_test.go @@ -100,8 +100,8 @@ var all = &Profile{ { Location: []*Location{testL[0], testL[1], testL[2], testL[1], testL[1]}, Label: map[string][]string{ - "key1": []string{"value1"}, - "key2": []string{"value2"}, + "key1": {"value1"}, + "key2": {"value2"}, }, Value: []int64{10, 20}, }, @@ -109,12 +109,19 @@ var all = &Profile{ Location: []*Location{testL[1], testL[2], testL[0], testL[1]}, Value: []int64{30, 40}, Label: map[string][]string{ - "key1": []string{"value1"}, - "key2": []string{"value2"}, + "key1": {"value1"}, + "key2": {"value2"}, }, NumLabel: map[string][]int64{ - "key1": []int64{1, 2}, - "key2": []int64{3, 4}, + "key1": {1, 2}, + "key2": {3, 4}, + "bytes": {3, 4}, + "requests": {1, 1, 3, 4, 5}, + "alignment": {3, 4}, + }, + NumUnit: map[string][]string{ + "requests": {"", "", "seconds", "", "s"}, + "alignment": {"kilobytes", "kilobytes"}, }, }, }, diff --git a/src/cmd/vendor/github.com/google/pprof/profile/prune.go b/src/cmd/vendor/github.com/google/pprof/profile/prune.go index cf9cbb3894a..02d21a81846 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/prune.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/prune.go @@ -22,6 +22,39 @@ import ( "strings" ) +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + // Prune removes all nodes beneath a node matching dropRx, and not // matching keepRx. If the root node of a Sample matches, the sample // will have an empty stack. @@ -33,12 +66,7 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { var i int for i = len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { - // Account for leading '.' on the PPC ELF v1 ABI. - funcName := strings.TrimPrefix(fn.Name, ".") - // Account for unsimplified names -- trim starting from the first '('. - if index := strings.Index(funcName, "("); index > 0 { - funcName = funcName[:index] - } + funcName := simplifyFunc(fn.Name) if dropRx.MatchString(funcName) { if keepRx == nil || !keepRx.MatchString(funcName) { break @@ -126,12 +154,7 @@ func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { for _, loc := range p.Location { for i := 0; i < len(loc.Line); i++ { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { - // Account for leading '.' on the PPC ELF v1 ABI. - funcName := strings.TrimPrefix(fn.Name, ".") - // Account for unsimplified names -- trim starting from the first '('. - if index := strings.Index(funcName, "("); index > 0 { - funcName = funcName[:index] - } + funcName := simplifyFunc(fn.Name) if dropRx.MatchString(funcName) { // Found matching entry to prune. pruneBeneath[loc.ID] = true diff --git a/src/cmd/vendor/github.com/google/pprof/profile/prune_test.go b/src/cmd/vendor/github.com/google/pprof/profile/prune_test.go index 58fa25ee2d0..75d7c6d4f7f 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/prune_test.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/prune_test.go @@ -25,6 +25,7 @@ func TestPrune(t *testing.T) { want string }{ {in1, out1}, + {in2, out2}, } { in := test.in.Copy() in.RemoveUninteresting() @@ -50,6 +51,10 @@ var funs = []*Function{ {ID: 4, Name: "fun3", SystemName: "fun3", Filename: "fun.c"}, {ID: 5, Name: "fun4", SystemName: "fun4", Filename: "fun.c"}, {ID: 6, Name: "fun5", SystemName: "fun5", Filename: "fun.c"}, + {ID: 7, Name: "unsimplified_fun(int)", SystemName: "unsimplified_fun(int)", Filename: "fun.c"}, + {ID: 8, Name: "Foo::(anonymous namespace)::Test::Bar", SystemName: "Foo::(anonymous namespace)::Test::Bar", Filename: "fun.c"}, + {ID: 9, Name: "Hello::(anonymous namespace)::World(const Foo::(anonymous namespace)::Test::Bar)", SystemName: "Hello::(anonymous namespace)::World(const Foo::(anonymous namespace)::Test::Bar)", Filename: "fun.c"}, + {ID: 10, Name: "Foo::operator()(::Bar)", SystemName: "Foo::operator()(::Bar)", Filename: "fun.c"}, } var locs1 = []*Location{ @@ -137,3 +142,89 @@ Locations 4: 0x0 fun5 fun.c:2 s=0 Mappings ` + +var locs2 = []*Location{ + { + ID: 1, + Line: []Line{ + {Function: funs[0], Line: 1}, + }, + }, + { + ID: 2, + Line: []Line{ + {Function: funs[6], Line: 1}, + }, + }, + { + ID: 3, + Line: []Line{ + {Function: funs[7], Line: 1}, + }, + }, + { + ID: 4, + Line: []Line{ + {Function: funs[8], Line: 1}, + }, + }, + { + ID: 5, + Line: []Line{ + {Function: funs[9], Line: 1}, + }, + }, +} + +var in2 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*Sample{ + // Unsimplified name with parameters shouldn't match. + { + Location: []*Location{locs2[1], locs2[0]}, + Value: []int64{1, 1}, + }, + // .*Foo::.*::Bar.* should (and will be dropped) regardless of the anonymous namespace. + { + Location: []*Location{locs2[2], locs2[0]}, + Value: []int64{1, 1}, + }, + // .*Foo::.*::Bar.* shouldn't match inside the parameter list. + { + Location: []*Location{locs2[3], locs2[0]}, + Value: []int64{1, 1}, + }, + // .*operator\(\) should match, regardless of parameters. + { + Location: []*Location{locs2[4], locs2[0]}, + Value: []int64{1, 1}, + }, + }, + Location: locs2, + Function: funs, + DropFrames: `unsimplified_fun\(int\)|.*Foo::.*::Bar.*|.*operator\(\)`, +} + +const out2 = `PeriodType: cpu milliseconds +Period: 1 +Duration: 10s +Samples: +samples/count cpu/milliseconds + 1 1: 2 1 + 1 1: 1 + 1 1: 4 1 + 1 1: 1 +Locations + 1: 0x0 main main.c:1 s=0 + 2: 0x0 unsimplified_fun(int) fun.c:1 s=0 + 3: 0x0 Foo::(anonymous namespace)::Test::Bar fun.c:1 s=0 + 4: 0x0 Hello::(anonymous namespace)::World(const Foo::(anonymous namespace)::Test::Bar) fun.c:1 s=0 + 5: 0x0 Foo::operator()(::Bar) fun.c:1 s=0 +Mappings +` diff --git a/src/cmd/vendor/github.com/google/pprof/proto/profile.proto b/src/cmd/vendor/github.com/google/pprof/proto/profile.proto index aa790e032ad..78caf4138b2 100644 --- a/src/cmd/vendor/github.com/google/pprof/proto/profile.proto +++ b/src/cmd/vendor/github.com/google/pprof/proto/profile.proto @@ -109,6 +109,15 @@ message Label { // At most one of the following must be present int64 str = 2; // Index into string table int64 num = 3; + + // Should only be present when num is present. + // Specifies the units of num. + // Use arbitrary string (for example, "requests") as a custom count unit. + // If no unit is specified, consumer may apply heuristic to deduce the unit. + // Consumers may also interpret units like "bytes" and "kilobytes" as memory + // units and units like "seconds" and "nanoseconds" as time units, + // and apply appropriate unit conversions to these. + int64 num_unit = 4; // Index into string table } message Mapping { diff --git a/src/cmd/vendor/github.com/google/pprof/test.sh b/src/cmd/vendor/github.com/google/pprof/test.sh new file mode 100755 index 00000000000..81b90891312 --- /dev/null +++ b/src/cmd/vendor/github.com/google/pprof/test.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -e +MODE=atomic +echo "mode: $MODE" > coverage.txt + +PKG=$(go list ./... | grep -v /vendor/) + +staticcheck $PKG +unused $PKG +go test -v $PKG + +for d in $PKG; do + go test -race -coverprofile=profile.out -covermode=$MODE $d + if [ -f profile.out ]; then + cat profile.out | grep -v "^mode: " >> coverage.txt + rm profile.out + fi +done diff --git a/src/cmd/vendor/golang.org/x/arch/arm/armasm/decode.go b/src/cmd/vendor/golang.org/x/arch/arm/armasm/decode.go index cc81dc3f50b..6b4d73841be 100644 --- a/src/cmd/vendor/golang.org/x/arch/arm/armasm/decode.go +++ b/src/cmd/vendor/golang.org/x/arch/arm/armasm/decode.go @@ -233,9 +233,9 @@ func decodeArg(aop instArg, x uint32) Arg { typ, count := decodeShift(x) // ROR #0 here means ROR #0, but decodeShift rewrites to RRX #1. if typ == RotateRightExt { - return Rm + return Reg(Rm) } - return RegShift{Rm, typ, count} + return RegShift{Rm, typ, uint8(count)} case arg_R_shift_R: Rm := Reg(x & (1<<4 - 1)) @@ -247,9 +247,9 @@ func decodeArg(aop instArg, x uint32) Arg { Rm := Reg(x & (1<<4 - 1)) typ, count := decodeShift(x) if typ == ShiftLeft && count == 0 { - return Rm + return Reg(Rm) } - return RegShift{Rm, typ, count} + return RegShift{Rm, typ, uint8(count)} case arg_R1_0: return Reg((x & (1<<4 - 1))) diff --git a/src/cmd/vendor/golang.org/x/arch/arm/armasm/ext_test.go b/src/cmd/vendor/golang.org/x/arch/arm/armasm/ext_test.go index 98192b324ed..3556ae77bbe 100644 --- a/src/cmd/vendor/golang.org/x/arch/arm/armasm/ext_test.go +++ b/src/cmd/vendor/golang.org/x/arch/arm/armasm/ext_test.go @@ -34,7 +34,7 @@ var ( debug = false ) -// A ExtInst represents a single decoded instruction parsed +// An ExtInst represents a single decoded instruction parsed // from an external disassembler's output. type ExtInst struct { addr uint32 diff --git a/src/cmd/vendor/golang.org/x/arch/arm/armasm/inst.go b/src/cmd/vendor/golang.org/x/arch/arm/armasm/inst.go index 60d633bdb6c..0e056442b23 100644 --- a/src/cmd/vendor/golang.org/x/arch/arm/armasm/inst.go +++ b/src/cmd/vendor/golang.org/x/arch/arm/armasm/inst.go @@ -105,7 +105,7 @@ func (i Imm) String() string { return fmt.Sprintf("#%#x", uint32(i)) } -// A ImmAlt is an alternate encoding of an integer constant. +// An ImmAlt is an alternate encoding of an integer constant. type ImmAlt struct { Val uint8 Rot uint8 diff --git a/src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go b/src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go index fae0ca62a57..321b081505b 100644 --- a/src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go +++ b/src/cmd/vendor/golang.org/x/arch/arm/armasm/plan9x.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "fmt" "io" + "math" "strings" ) @@ -37,7 +38,7 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text op := inst.Op.String() switch inst.Op &^ 15 { - case LDR_EQ, LDRB_EQ, LDRH_EQ: + case LDR_EQ, LDRB_EQ, LDRH_EQ, LDRSB_EQ, LDRSH_EQ, VLDR_EQ: // Check for RET reg, _ := inst.Args[0].(Reg) mem, _ := inst.Args[1].(Mem) @@ -48,22 +49,22 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text // Check for PC-relative load. if mem.Base == PC && mem.Sign == 0 && mem.Mode == AddrOffset && text != nil { addr := uint32(pc) + 8 + uint32(mem.Offset) - buf := make([]byte, 4) + buf := make([]byte, 8) switch inst.Op &^ 15 { - case LDRB_EQ: + case LDRB_EQ, LDRSB_EQ: if _, err := text.ReadAt(buf[:1], int64(addr)); err != nil { break } args[1] = fmt.Sprintf("$%#x", buf[0]) - case LDRH_EQ: + case LDRH_EQ, LDRSH_EQ: if _, err := text.ReadAt(buf[:2], int64(addr)); err != nil { break } args[1] = fmt.Sprintf("$%#x", binary.LittleEndian.Uint16(buf)) case LDR_EQ: - if _, err := text.ReadAt(buf, int64(addr)); err != nil { + if _, err := text.ReadAt(buf[:4], int64(addr)); err != nil { break } x := binary.LittleEndian.Uint32(buf) @@ -72,6 +73,22 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text } else { args[1] = fmt.Sprintf("$%#x", x) } + + case VLDR_EQ: + switch { + case strings.HasPrefix(args[0], "D"): // VLDR.F64 + if _, err := text.ReadAt(buf, int64(addr)); err != nil { + break + } + args[1] = fmt.Sprintf("$%f", math.Float64frombits(binary.LittleEndian.Uint64(buf))) + case strings.HasPrefix(args[0], "S"): // VLDR.F32 + if _, err := text.ReadAt(buf[:4], int64(addr)); err != nil { + break + } + args[1] = fmt.Sprintf("$%f", math.Float32frombits(binary.LittleEndian.Uint32(buf))) + default: + panic(fmt.Sprintf("wrong FP register: %v", inst)) + } } } } @@ -79,7 +96,7 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text // Move addressing mode into opcode suffix. suffix := "" switch inst.Op &^ 15 { - case LDR_EQ, LDRB_EQ, LDRH_EQ, STR_EQ, STRB_EQ, STRH_EQ: + case LDR_EQ, LDRB_EQ, LDRSB_EQ, LDRH_EQ, LDRSH_EQ, STR_EQ, STRB_EQ, STRH_EQ, VLDR_EQ, VSTR_EQ: mem, _ := inst.Args[1].(Mem) switch mem.Mode { case AddrOffset, AddrLDM: @@ -98,7 +115,7 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text if mem.Sign != 0 { sign := "" if mem.Sign < 0 { - sign = "" + suffix += ".U" } shift := "" if mem.Count != 0 { @@ -113,6 +130,11 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text for i, j := 0, len(args)-1; i < j; i, j = i+1, j-1 { args[i], args[j] = args[j], args[i] } + // For MLA-like instructions, the addend is the third operand. + switch inst.Op &^ 15 { + case SMLAWT_EQ, SMLAWB_EQ, MLA_EQ, MLA_S_EQ, MLS_EQ, SMMLA_EQ, SMMLS_EQ, SMLABB_EQ, SMLATB_EQ, SMLABT_EQ, SMLATT_EQ, SMLAD_EQ, SMLAD_X_EQ, SMLSD_EQ, SMLSD_X_EQ: + args = []string{args[1], args[2], args[0], args[3]} + } switch inst.Op &^ 15 { case MOV_EQ: @@ -121,9 +143,26 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text case LDR_EQ: op = "MOVW" + op[3:] + suffix case LDRB_EQ: - op = "MOVB" + op[4:] + suffix + op = "MOVBU" + op[4:] + suffix + case LDRSB_EQ: + op = "MOVBS" + op[5:] + suffix case LDRH_EQ: - op = "MOVH" + op[4:] + suffix + op = "MOVHU" + op[4:] + suffix + case LDRSH_EQ: + op = "MOVHS" + op[5:] + suffix + case VLDR_EQ: + switch { + case strings.HasPrefix(args[1], "D"): // VLDR.F64 + op = "MOVD" + op[4:] + suffix + args[1] = "F" + args[1][1:] // Dx -> Fx + case strings.HasPrefix(args[1], "S"): // VLDR.F32 + op = "MOVF" + op[4:] + suffix + if inst.Args[0].(Reg)&1 == 0 { // Sx -> Fy, y = x/2, if x is even + args[1] = fmt.Sprintf("F%d", (inst.Args[0].(Reg)-S0)/2) + } + default: + panic(fmt.Sprintf("wrong FP register: %v", inst)) + } case STR_EQ: op = "MOVW" + op[3:] + suffix @@ -134,6 +173,20 @@ func GoSyntax(inst Inst, pc uint64, symname func(uint64) (string, uint64), text case STRH_EQ: op = "MOVH" + op[4:] + suffix args[0], args[1] = args[1], args[0] + case VSTR_EQ: + switch { + case strings.HasPrefix(args[1], "D"): // VSTR.F64 + op = "MOVD" + op[4:] + suffix + args[1] = "F" + args[1][1:] // Dx -> Fx + case strings.HasPrefix(args[1], "S"): // VSTR.F32 + op = "MOVF" + op[4:] + suffix + if inst.Args[0].(Reg)&1 == 0 { // Sx -> Fy, y = x/2, if x is even + args[1] = fmt.Sprintf("F%d", (inst.Args[0].(Reg)-S0)/2) + } + default: + panic(fmt.Sprintf("wrong FP register: %v", inst)) + } + args[0], args[1] = args[1], args[0] } if args != nil { @@ -154,7 +207,7 @@ func plan9Arg(inst *Inst, pc uint64, symname func(uint64) (string, uint64), arg case Endian: case Imm: - return fmt.Sprintf("$%d", int(a)) + return fmt.Sprintf("$%d", uint32(a)) case Mem: @@ -185,6 +238,8 @@ func plan9Arg(inst *Inst, pc uint64, symname func(uint64) (string, uint64), arg } else { fmt.Fprintf(&buf, "R%d-R%d", start, end) } + start = -2 + end = -2 } } for i := 0; i < 16; i++ { @@ -195,6 +250,8 @@ func plan9Arg(inst *Inst, pc uint64, symname func(uint64) (string, uint64), arg } start = i end = i + } else { + flush() } } flush() diff --git a/src/cmd/vendor/golang.org/x/arch/arm/armasm/tables.go b/src/cmd/vendor/golang.org/x/arch/arm/armasm/tables.go index 58f51fe1f50..3ad489e0197 100644 --- a/src/cmd/vendor/golang.org/x/arch/arm/armasm/tables.go +++ b/src/cmd/vendor/golang.org/x/arch/arm/armasm/tables.go @@ -945,6 +945,22 @@ const ( MRS_LE MRS MRS_ZZ + MSR_EQ + MSR_NE + MSR_CS + MSR_CC + MSR_MI + MSR_PL + MSR_VS + MSR_VC + MSR_HI + MSR_LS + MSR_GE + MSR_LT + MSR_GT + MSR_LE + MSR + MSR_ZZ MUL_EQ MUL_NE MUL_CS @@ -1585,6 +1601,22 @@ const ( SBFX_LE SBFX SBFX_ZZ + SDIV_EQ + SDIV_NE + SDIV_CS + SDIV_CC + SDIV_MI + SDIV_PL + SDIV_VS + SDIV_VC + SDIV_HI + SDIV_LS + SDIV_GE + SDIV_LT + SDIV_GT + SDIV_LE + SDIV + SDIV_ZZ SEL_EQ SEL_NE SEL_CS @@ -2929,6 +2961,22 @@ const ( UBFX_LE UBFX UBFX_ZZ + UDIV_EQ + UDIV_NE + UDIV_CS + UDIV_CC + UDIV_MI + UDIV_PL + UDIV_VS + UDIV_VC + UDIV_HI + UDIV_LS + UDIV_GE + UDIV_LT + UDIV_GT + UDIV_LE + UDIV + UDIV_ZZ UHADD16_EQ UHADD16_NE UHADD16_CS @@ -5480,6 +5528,22 @@ var opstr = [...]string{ MRS_LE: "MRS.LE", MRS: "MRS", MRS_ZZ: "MRS.ZZ", + MSR_EQ: "MSR.EQ", + MSR_NE: "MSR.NE", + MSR_CS: "MSR.CS", + MSR_CC: "MSR.CC", + MSR_MI: "MSR.MI", + MSR_PL: "MSR.PL", + MSR_VS: "MSR.VS", + MSR_VC: "MSR.VC", + MSR_HI: "MSR.HI", + MSR_LS: "MSR.LS", + MSR_GE: "MSR.GE", + MSR_LT: "MSR.LT", + MSR_GT: "MSR.GT", + MSR_LE: "MSR.LE", + MSR: "MSR", + MSR_ZZ: "MSR.ZZ", MUL_EQ: "MUL.EQ", MUL_NE: "MUL.NE", MUL_CS: "MUL.CS", @@ -6107,6 +6171,22 @@ var opstr = [...]string{ SBFX_LE: "SBFX.LE", SBFX: "SBFX", SBFX_ZZ: "SBFX.ZZ", + SDIV_EQ: "SDIV.EQ", + SDIV_NE: "SDIV.NE", + SDIV_CS: "SDIV.CS", + SDIV_CC: "SDIV.CC", + SDIV_MI: "SDIV.MI", + SDIV_PL: "SDIV.PL", + SDIV_VS: "SDIV.VS", + SDIV_VC: "SDIV.VC", + SDIV_HI: "SDIV.HI", + SDIV_LS: "SDIV.LS", + SDIV_GE: "SDIV.GE", + SDIV_LT: "SDIV.LT", + SDIV_GT: "SDIV.GT", + SDIV_LE: "SDIV.LE", + SDIV: "SDIV", + SDIV_ZZ: "SDIV.ZZ", SEL_EQ: "SEL.EQ", SEL_NE: "SEL.NE", SEL_CS: "SEL.CS", @@ -7436,6 +7516,22 @@ var opstr = [...]string{ UBFX_LE: "UBFX.LE", UBFX: "UBFX", UBFX_ZZ: "UBFX.ZZ", + UDIV_EQ: "UDIV.EQ", + UDIV_NE: "UDIV.NE", + UDIV_CS: "UDIV.CS", + UDIV_CC: "UDIV.CC", + UDIV_MI: "UDIV.MI", + UDIV_PL: "UDIV.PL", + UDIV_VS: "UDIV.VS", + UDIV_VC: "UDIV.VC", + UDIV_HI: "UDIV.HI", + UDIV_LS: "UDIV.LS", + UDIV_GE: "UDIV.GE", + UDIV_LT: "UDIV.LT", + UDIV_GT: "UDIV.GT", + UDIV_LE: "UDIV.LE", + UDIV: "UDIV", + UDIV_ZZ: "UDIV.ZZ", UHADD16_EQ: "UHADD16.EQ", UHADD16_NE: "UHADD16.NE", UHADD16_CS: "UHADD16.CS", @@ -9194,6 +9290,10 @@ var instFormats = [...]instFormat{ {0x0fef0ff0, 0x01a00000, 2, MOV_EQ, 0x14011c04, instArgs{arg_R_12, arg_R_0}}, // MOV{S} , cond:4|0|0|0|1|1|0|1|S|0|0|0|0|Rd:4|0|0|0|0|0|0|0|0|Rm:4 {0x0fff0fff, 0x010f0000, 4, MRS_EQ, 0x1c04, instArgs{arg_R_12, arg_APSR}}, // MRS ,APSR cond:4|0|0|0|1|0|0|0|0|(1)|(1)|(1)|(1)|Rd:4|(0)|(0)|(0)|(0)|0|0|0|0|(0)|(0)|(0)|(0) {0x0ff000f0, 0x010f0000, 3, MRS_EQ, 0x1c04, instArgs{arg_R_12, arg_APSR}}, // MRS ,APSR cond:4|0|0|0|1|0|0|0|0|(1)|(1)|(1)|(1)|Rd:4|(0)|(0)|(0)|(0)|0|0|0|0|(0)|(0)|(0)|(0) + {0x0ffffff0, 0x012cf000, 4, MSR_EQ, 0x1c04, instArgs{arg_APSR, arg_R_0}}, // MSR APSR, cond:4|0|0|0|1|0|0|1|0|1|1|0|0|(1)|(1)|(1)|(1)|(0)|(0)|(0)|(0)|0|0|0|0|Rn:4 + {0x0fff00f0, 0x012cf000, 3, MSR_EQ, 0x1c04, instArgs{arg_APSR, arg_R_0}}, // MSR APSR, cond:4|0|0|0|1|0|0|1|0|1|1|0|0|(1)|(1)|(1)|(1)|(0)|(0)|(0)|(0)|0|0|0|0|Rn:4 + {0x0ffff000, 0x032cf000, 4, MSR_EQ, 0x1c04, instArgs{arg_APSR, arg_const}}, // MSR APSR,# cond:4|0|0|1|1|0|0|1|0|1|1|0|0|(1)|(1)|(1)|(1)|imm12:12 + {0x0fff0000, 0x032cf000, 3, MSR_EQ, 0x1c04, instArgs{arg_APSR, arg_const}}, // MSR APSR,# cond:4|0|0|1|1|0|0|1|0|1|1|0|0|(1)|(1)|(1)|(1)|imm12:12 {0x0fe0f0f0, 0x00000090, 4, MUL_EQ, 0x14011c04, instArgs{arg_R_16, arg_R_0, arg_R_8}}, // MUL{S} ,, cond:4|0|0|0|0|0|0|0|S|Rd:4|(0)|(0)|(0)|(0)|Rm:4|1|0|0|1|Rn:4 {0x0fe000f0, 0x00000090, 3, MUL_EQ, 0x14011c04, instArgs{arg_R_16, arg_R_0, arg_R_8}}, // MUL{S} ,, cond:4|0|0|0|0|0|0|0|S|Rd:4|(0)|(0)|(0)|(0)|Rm:4|1|0|0|1|Rn:4 {0x0fef0000, 0x03e00000, 2, MVN_EQ, 0x14011c04, instArgs{arg_R_12, arg_const}}, // MVN{S} ,# cond:4|0|0|1|1|1|1|1|S|(0)|(0)|(0)|(0)|Rd:4|imm12:12 @@ -9267,6 +9367,8 @@ var instFormats = [...]instFormat{ {0x0fe00090, 0x00c00010, 4, SBC_EQ, 0x14011c04, instArgs{arg_R_12, arg_R_16, arg_R_shift_R}}, // SBC{S} ,,, cond:4|0|0|0|0|1|1|0|S|Rn:4|Rd:4|Rs:4|0|type:2|1|Rm:4 {0x0fe00010, 0x00c00000, 2, SBC_EQ, 0x14011c04, instArgs{arg_R_12, arg_R_16, arg_R_shift_imm}}, // SBC{S} ,,{,} cond:4|0|0|0|0|1|1|0|S|Rn:4|Rd:4|imm5:5|type:2|0|Rm:4 {0x0fe00070, 0x07a00050, 4, SBFX_EQ, 0x1c04, instArgs{arg_R_12, arg_R_0, arg_imm5, arg_widthm1}}, // SBFX ,,#,# cond:4|0|1|1|1|1|0|1|widthm1:5|Rd:4|lsb:5|1|0|1|Rn:4 + {0x0ff0f0f0, 0x0710f010, 4, SDIV_EQ, 0x1c04, instArgs{arg_R_16, arg_R_0, arg_R_8}}, // SDIV ,, cond:4|0|1|1|1|0|0|0|1|Rd:4|(1)|(1)|(1)|(1)|Rm:4|0|0|0|1|Rn:4 + {0x0ff000f0, 0x0710f010, 3, SDIV_EQ, 0x1c04, instArgs{arg_R_16, arg_R_0, arg_R_8}}, // SDIV ,, cond:4|0|1|1|1|0|0|0|1|Rd:4|(1)|(1)|(1)|(1)|Rm:4|0|0|0|1|Rn:4 {0x0ff00ff0, 0x06800fb0, 4, SEL_EQ, 0x1c04, instArgs{arg_R_12, arg_R_16, arg_R_0}}, // SEL ,, cond:4|0|1|1|0|1|0|0|0|Rn:4|Rd:4|(1)|(1)|(1)|(1)|1|0|1|1|Rm:4 {0x0ff000f0, 0x06800fb0, 3, SEL_EQ, 0x1c04, instArgs{arg_R_12, arg_R_16, arg_R_0}}, // SEL ,, cond:4|0|1|1|0|1|0|0|0|Rn:4|Rd:4|(1)|(1)|(1)|(1)|1|0|1|1|Rm:4 {0xfffffdff, 0xf1010000, 4, SETEND, 0x0, instArgs{arg_endian}}, // SETEND 1|1|1|1|0|0|0|1|0|0|0|0|0|0|0|1|0|0|0|0|0|0|E|(0)|(0)|(0)|(0)|(0)|(0)|(0)|(0)|(0) @@ -9365,6 +9467,8 @@ var instFormats = [...]instFormat{ {0x0ff00ff0, 0x06500f30, 4, UASX_EQ, 0x1c04, instArgs{arg_R_12, arg_R_16, arg_R_0}}, // UASX ,, cond:4|0|1|1|0|0|1|0|1|Rn:4|Rd:4|(1)|(1)|(1)|(1)|0|0|1|1|Rm:4 {0x0ff000f0, 0x06500f30, 3, UASX_EQ, 0x1c04, instArgs{arg_R_12, arg_R_16, arg_R_0}}, // UASX ,, cond:4|0|1|1|0|0|1|0|1|Rn:4|Rd:4|(1)|(1)|(1)|(1)|0|0|1|1|Rm:4 {0x0fe00070, 0x07e00050, 4, UBFX_EQ, 0x1c04, instArgs{arg_R_12, arg_R_0, arg_imm5, arg_widthm1}}, // UBFX ,,#,# cond:4|0|1|1|1|1|1|1|widthm1:5|Rd:4|lsb:5|1|0|1|Rn:4 + {0x0ff0f0f0, 0x0730f010, 4, UDIV_EQ, 0x1c04, instArgs{arg_R_16, arg_R_0, arg_R_8}}, // UDIV ,, cond:4|0|1|1|1|0|0|1|1|Rd:4|(1)|(1)|(1)|(1)|Rm:4|0|0|0|1|Rn:4 + {0x0ff000f0, 0x0730f010, 3, UDIV_EQ, 0x1c04, instArgs{arg_R_16, arg_R_0, arg_R_8}}, // UDIV ,, cond:4|0|1|1|1|0|0|1|1|Rd:4|(1)|(1)|(1)|(1)|Rm:4|0|0|0|1|Rn:4 {0x0ff00ff0, 0x06700f10, 4, UHADD16_EQ, 0x1c04, instArgs{arg_R_12, arg_R_16, arg_R_0}}, // UHADD16 ,, cond:4|0|1|1|0|0|1|1|1|Rn:4|Rd:4|(1)|(1)|(1)|(1)|0|0|0|1|Rm:4 {0x0ff000f0, 0x06700f10, 3, UHADD16_EQ, 0x1c04, instArgs{arg_R_12, arg_R_16, arg_R_0}}, // UHADD16 ,, cond:4|0|1|1|0|0|1|1|1|Rn:4|Rd:4|(1)|(1)|(1)|(1)|0|0|0|1|Rm:4 {0x0ff00ff0, 0x06700f90, 4, UHADD8_EQ, 0x1c04, instArgs{arg_R_12, arg_R_16, arg_R_0}}, // UHADD8 ,, cond:4|0|1|1|0|0|1|1|1|Rn:4|Rd:4|(1)|(1)|(1)|(1)|1|0|0|1|Rm:4 diff --git a/src/cmd/vendor/golang.org/x/arch/arm/armasm/testdata/decode.txt b/src/cmd/vendor/golang.org/x/arch/arm/armasm/testdata/decode.txt index cc1ea0abe54..f38c61f7be0 100644 --- a/src/cmd/vendor/golang.org/x/arch/arm/armasm/testdata/decode.txt +++ b/src/cmd/vendor/golang.org/x/arch/arm/armasm/testdata/decode.txt @@ -304,3 +304,1253 @@ ff818c71| 1 gnu strdvc r8, [ip, pc] |6b5721d3 1 gnu error: unknown instruction |76452001 1 gnu error: unknown instruction |97acd647 1 gnu error: unknown instruction +11f71507| 1 plan9 SDIV.EQ R7, R1, R5 +15f715e7| 1 plan9 SDIV R7, R5, R5 +11f93517| 1 plan9 UDIV.NE R9, R1, R5 +12fb33e7| 1 plan9 UDIV R11, R2, R3 +ed003be9| 1 plan9 LDMDB [R0,R2-R3,R5-R7], R11! +923124e0| 1 plan9 MLA R1, R2, R3, R4 +923134e0| 1 plan9 MLA.S R1, R2, R3, R4 +923164e0| 1 plan9 MLS R1, R2, R3, R4 +ff1000e2| 1 plan9 AND $255, R0, R1 +ff1400e2| 1 plan9 AND $4278190080, R0, R1 +ff1010e2| 1 plan9 AND.S $255, R0, R1 +ff1410e2| 1 plan9 AND.S $4278190080, R0, R1 +ff0000e2| 1 plan9 AND $255, R0, R0 +ff0400e2| 1 plan9 AND $4278190080, R0, R0 +ff0010e2| 1 plan9 AND.S $255, R0, R0 +ff0410e2| 1 plan9 AND.S $4278190080, R0, R0 +002001e0| 1 plan9 AND R0, R1, R2 +002011e0| 1 plan9 AND.S R0, R1, R2 +001001e0| 1 plan9 AND R0, R1, R1 +001011e0| 1 plan9 AND.S R0, R1, R1 +202e01e0| 1 plan9 AND R0>>$28, R1, R2 +002e01e0| 1 plan9 AND R0<<$28, R1, R2 +402e01e0| 1 plan9 AND R0->$28, R1, R2 +602e01e0| 1 plan9 AND R0@>$28, R1, R2 +202e11e0| 1 plan9 AND.S R0>>$28, R1, R2 +002e11e0| 1 plan9 AND.S R0<<$28, R1, R2 +402e11e0| 1 plan9 AND.S R0->$28, R1, R2 +602e11e0| 1 plan9 AND.S R0@>$28, R1, R2 +001e01e0| 1 plan9 AND R0<<$28, R1, R1 +201e01e0| 1 plan9 AND R0>>$28, R1, R1 +401e01e0| 1 plan9 AND R0->$28, R1, R1 +601e01e0| 1 plan9 AND R0@>$28, R1, R1 +001e11e0| 1 plan9 AND.S R0<<$28, R1, R1 +201e11e0| 1 plan9 AND.S R0>>$28, R1, R1 +401e11e0| 1 plan9 AND.S R0->$28, R1, R1 +601e11e0| 1 plan9 AND.S R0@>$28, R1, R1 +103102e0| 1 plan9 AND R0<>R1, R2, R3 +503102e0| 1 plan9 AND R0->R1, R2, R3 +703102e0| 1 plan9 AND R0@>R1, R2, R3 +103112e0| 1 plan9 AND.S R0<>R1, R2, R3 +503112e0| 1 plan9 AND.S R0->R1, R2, R3 +703112e0| 1 plan9 AND.S R0@>R1, R2, R3 +102102e0| 1 plan9 AND R0<>R1, R2, R2 +502102e0| 1 plan9 AND R0->R1, R2, R2 +702102e0| 1 plan9 AND R0@>R1, R2, R2 +102112e0| 1 plan9 AND.S R0<>R1, R2, R2 +502112e0| 1 plan9 AND.S R0->R1, R2, R2 +702112e0| 1 plan9 AND.S R0@>R1, R2, R2 +ff1020e2| 1 plan9 EOR $255, R0, R1 +ff1420e2| 1 plan9 EOR $4278190080, R0, R1 +ff1030e2| 1 plan9 EOR.S $255, R0, R1 +ff1430e2| 1 plan9 EOR.S $4278190080, R0, R1 +ff0020e2| 1 plan9 EOR $255, R0, R0 +ff0420e2| 1 plan9 EOR $4278190080, R0, R0 +ff0030e2| 1 plan9 EOR.S $255, R0, R0 +ff0430e2| 1 plan9 EOR.S $4278190080, R0, R0 +002021e0| 1 plan9 EOR R0, R1, R2 +002031e0| 1 plan9 EOR.S R0, R1, R2 +001021e0| 1 plan9 EOR R0, R1, R1 +001031e0| 1 plan9 EOR.S R0, R1, R1 +202e21e0| 1 plan9 EOR R0>>$28, R1, R2 +002e21e0| 1 plan9 EOR R0<<$28, R1, R2 +402e21e0| 1 plan9 EOR R0->$28, R1, R2 +602e21e0| 1 plan9 EOR R0@>$28, R1, R2 +202e31e0| 1 plan9 EOR.S R0>>$28, R1, R2 +002e31e0| 1 plan9 EOR.S R0<<$28, R1, R2 +402e31e0| 1 plan9 EOR.S R0->$28, R1, R2 +602e31e0| 1 plan9 EOR.S R0@>$28, R1, R2 +001e21e0| 1 plan9 EOR R0<<$28, R1, R1 +201e21e0| 1 plan9 EOR R0>>$28, R1, R1 +401e21e0| 1 plan9 EOR R0->$28, R1, R1 +601e21e0| 1 plan9 EOR R0@>$28, R1, R1 +001e31e0| 1 plan9 EOR.S R0<<$28, R1, R1 +201e31e0| 1 plan9 EOR.S R0>>$28, R1, R1 +401e31e0| 1 plan9 EOR.S R0->$28, R1, R1 +601e31e0| 1 plan9 EOR.S R0@>$28, R1, R1 +103122e0| 1 plan9 EOR R0<>R1, R2, R3 +503122e0| 1 plan9 EOR R0->R1, R2, R3 +703122e0| 1 plan9 EOR R0@>R1, R2, R3 +103132e0| 1 plan9 EOR.S R0<>R1, R2, R3 +503132e0| 1 plan9 EOR.S R0->R1, R2, R3 +703132e0| 1 plan9 EOR.S R0@>R1, R2, R3 +102122e0| 1 plan9 EOR R0<>R1, R2, R2 +502122e0| 1 plan9 EOR R0->R1, R2, R2 +702122e0| 1 plan9 EOR R0@>R1, R2, R2 +102132e0| 1 plan9 EOR.S R0<>R1, R2, R2 +502132e0| 1 plan9 EOR.S R0->R1, R2, R2 +702132e0| 1 plan9 EOR.S R0@>R1, R2, R2 +ff1080e3| 1 plan9 ORR $255, R0, R1 +ff1480e3| 1 plan9 ORR $4278190080, R0, R1 +ff1090e3| 1 plan9 ORR.S $255, R0, R1 +ff1490e3| 1 plan9 ORR.S $4278190080, R0, R1 +ff0080e3| 1 plan9 ORR $255, R0, R0 +ff0480e3| 1 plan9 ORR $4278190080, R0, R0 +ff0090e3| 1 plan9 ORR.S $255, R0, R0 +ff0490e3| 1 plan9 ORR.S $4278190080, R0, R0 +002081e1| 1 plan9 ORR R0, R1, R2 +002091e1| 1 plan9 ORR.S R0, R1, R2 +001081e1| 1 plan9 ORR R0, R1, R1 +001091e1| 1 plan9 ORR.S R0, R1, R1 +202e81e1| 1 plan9 ORR R0>>$28, R1, R2 +002e81e1| 1 plan9 ORR R0<<$28, R1, R2 +402e81e1| 1 plan9 ORR R0->$28, R1, R2 +602e81e1| 1 plan9 ORR R0@>$28, R1, R2 +202e91e1| 1 plan9 ORR.S R0>>$28, R1, R2 +002e91e1| 1 plan9 ORR.S R0<<$28, R1, R2 +402e91e1| 1 plan9 ORR.S R0->$28, R1, R2 +602e91e1| 1 plan9 ORR.S R0@>$28, R1, R2 +001e81e1| 1 plan9 ORR R0<<$28, R1, R1 +201e81e1| 1 plan9 ORR R0>>$28, R1, R1 +401e81e1| 1 plan9 ORR R0->$28, R1, R1 +601e81e1| 1 plan9 ORR R0@>$28, R1, R1 +001e91e1| 1 plan9 ORR.S R0<<$28, R1, R1 +201e91e1| 1 plan9 ORR.S R0>>$28, R1, R1 +401e91e1| 1 plan9 ORR.S R0->$28, R1, R1 +601e91e1| 1 plan9 ORR.S R0@>$28, R1, R1 +103182e1| 1 plan9 ORR R0<>R1, R2, R3 +503182e1| 1 plan9 ORR R0->R1, R2, R3 +703182e1| 1 plan9 ORR R0@>R1, R2, R3 +103192e1| 1 plan9 ORR.S R0<>R1, R2, R3 +503192e1| 1 plan9 ORR.S R0->R1, R2, R3 +703192e1| 1 plan9 ORR.S R0@>R1, R2, R3 +102182e1| 1 plan9 ORR R0<>R1, R2, R2 +502182e1| 1 plan9 ORR R0->R1, R2, R2 +702182e1| 1 plan9 ORR R0@>R1, R2, R2 +102192e1| 1 plan9 ORR.S R0<>R1, R2, R2 +502192e1| 1 plan9 ORR.S R0->R1, R2, R2 +702192e1| 1 plan9 ORR.S R0@>R1, R2, R2 +ff1040e2| 1 plan9 SUB $255, R0, R1 +ff1440e2| 1 plan9 SUB $4278190080, R0, R1 +ff1050e2| 1 plan9 SUB.S $255, R0, R1 +ff1450e2| 1 plan9 SUB.S $4278190080, R0, R1 +ff0040e2| 1 plan9 SUB $255, R0, R0 +ff0440e2| 1 plan9 SUB $4278190080, R0, R0 +ff0050e2| 1 plan9 SUB.S $255, R0, R0 +ff0450e2| 1 plan9 SUB.S $4278190080, R0, R0 +002041e0| 1 plan9 SUB R0, R1, R2 +002051e0| 1 plan9 SUB.S R0, R1, R2 +001041e0| 1 plan9 SUB R0, R1, R1 +001051e0| 1 plan9 SUB.S R0, R1, R1 +202e41e0| 1 plan9 SUB R0>>$28, R1, R2 +002e41e0| 1 plan9 SUB R0<<$28, R1, R2 +402e41e0| 1 plan9 SUB R0->$28, R1, R2 +602e41e0| 1 plan9 SUB R0@>$28, R1, R2 +202e51e0| 1 plan9 SUB.S R0>>$28, R1, R2 +002e51e0| 1 plan9 SUB.S R0<<$28, R1, R2 +402e51e0| 1 plan9 SUB.S R0->$28, R1, R2 +602e51e0| 1 plan9 SUB.S R0@>$28, R1, R2 +001e41e0| 1 plan9 SUB R0<<$28, R1, R1 +201e41e0| 1 plan9 SUB R0>>$28, R1, R1 +401e41e0| 1 plan9 SUB R0->$28, R1, R1 +601e41e0| 1 plan9 SUB R0@>$28, R1, R1 +001e51e0| 1 plan9 SUB.S R0<<$28, R1, R1 +201e51e0| 1 plan9 SUB.S R0>>$28, R1, R1 +401e51e0| 1 plan9 SUB.S R0->$28, R1, R1 +601e51e0| 1 plan9 SUB.S R0@>$28, R1, R1 +103142e0| 1 plan9 SUB R0<>R1, R2, R3 +503142e0| 1 plan9 SUB R0->R1, R2, R3 +703142e0| 1 plan9 SUB R0@>R1, R2, R3 +103152e0| 1 plan9 SUB.S R0<>R1, R2, R3 +503152e0| 1 plan9 SUB.S R0->R1, R2, R3 +703152e0| 1 plan9 SUB.S R0@>R1, R2, R3 +102142e0| 1 plan9 SUB R0<>R1, R2, R2 +502142e0| 1 plan9 SUB R0->R1, R2, R2 +702142e0| 1 plan9 SUB R0@>R1, R2, R2 +102152e0| 1 plan9 SUB.S R0<>R1, R2, R2 +502152e0| 1 plan9 SUB.S R0->R1, R2, R2 +702152e0| 1 plan9 SUB.S R0@>R1, R2, R2 +ff10c0e2| 1 plan9 SBC $255, R0, R1 +ff14c0e2| 1 plan9 SBC $4278190080, R0, R1 +ff10d0e2| 1 plan9 SBC.S $255, R0, R1 +ff14d0e2| 1 plan9 SBC.S $4278190080, R0, R1 +ff00c0e2| 1 plan9 SBC $255, R0, R0 +ff04c0e2| 1 plan9 SBC $4278190080, R0, R0 +ff00d0e2| 1 plan9 SBC.S $255, R0, R0 +ff04d0e2| 1 plan9 SBC.S $4278190080, R0, R0 +0020c1e0| 1 plan9 SBC R0, R1, R2 +0020d1e0| 1 plan9 SBC.S R0, R1, R2 +0010c1e0| 1 plan9 SBC R0, R1, R1 +0010d1e0| 1 plan9 SBC.S R0, R1, R1 +202ec1e0| 1 plan9 SBC R0>>$28, R1, R2 +002ec1e0| 1 plan9 SBC R0<<$28, R1, R2 +402ec1e0| 1 plan9 SBC R0->$28, R1, R2 +602ec1e0| 1 plan9 SBC R0@>$28, R1, R2 +202ed1e0| 1 plan9 SBC.S R0>>$28, R1, R2 +002ed1e0| 1 plan9 SBC.S R0<<$28, R1, R2 +402ed1e0| 1 plan9 SBC.S R0->$28, R1, R2 +602ed1e0| 1 plan9 SBC.S R0@>$28, R1, R2 +001ec1e0| 1 plan9 SBC R0<<$28, R1, R1 +201ec1e0| 1 plan9 SBC R0>>$28, R1, R1 +401ec1e0| 1 plan9 SBC R0->$28, R1, R1 +601ec1e0| 1 plan9 SBC R0@>$28, R1, R1 +001ed1e0| 1 plan9 SBC.S R0<<$28, R1, R1 +201ed1e0| 1 plan9 SBC.S R0>>$28, R1, R1 +401ed1e0| 1 plan9 SBC.S R0->$28, R1, R1 +601ed1e0| 1 plan9 SBC.S R0@>$28, R1, R1 +1031c2e0| 1 plan9 SBC R0<>R1, R2, R3 +5031c2e0| 1 plan9 SBC R0->R1, R2, R3 +7031c2e0| 1 plan9 SBC R0@>R1, R2, R3 +1031d2e0| 1 plan9 SBC.S R0<>R1, R2, R3 +5031d2e0| 1 plan9 SBC.S R0->R1, R2, R3 +7031d2e0| 1 plan9 SBC.S R0@>R1, R2, R3 +1021c2e0| 1 plan9 SBC R0<>R1, R2, R2 +5021c2e0| 1 plan9 SBC R0->R1, R2, R2 +7021c2e0| 1 plan9 SBC R0@>R1, R2, R2 +1021d2e0| 1 plan9 SBC.S R0<>R1, R2, R2 +5021d2e0| 1 plan9 SBC.S R0->R1, R2, R2 +7021d2e0| 1 plan9 SBC.S R0@>R1, R2, R2 +ff1060e2| 1 plan9 RSB $255, R0, R1 +ff1460e2| 1 plan9 RSB $4278190080, R0, R1 +ff1070e2| 1 plan9 RSB.S $255, R0, R1 +ff1470e2| 1 plan9 RSB.S $4278190080, R0, R1 +ff0060e2| 1 plan9 RSB $255, R0, R0 +ff0460e2| 1 plan9 RSB $4278190080, R0, R0 +ff0070e2| 1 plan9 RSB.S $255, R0, R0 +ff0470e2| 1 plan9 RSB.S $4278190080, R0, R0 +002061e0| 1 plan9 RSB R0, R1, R2 +002071e0| 1 plan9 RSB.S R0, R1, R2 +001061e0| 1 plan9 RSB R0, R1, R1 +001071e0| 1 plan9 RSB.S R0, R1, R1 +202e61e0| 1 plan9 RSB R0>>$28, R1, R2 +002e61e0| 1 plan9 RSB R0<<$28, R1, R2 +402e61e0| 1 plan9 RSB R0->$28, R1, R2 +602e61e0| 1 plan9 RSB R0@>$28, R1, R2 +202e71e0| 1 plan9 RSB.S R0>>$28, R1, R2 +002e71e0| 1 plan9 RSB.S R0<<$28, R1, R2 +402e71e0| 1 plan9 RSB.S R0->$28, R1, R2 +602e71e0| 1 plan9 RSB.S R0@>$28, R1, R2 +001e61e0| 1 plan9 RSB R0<<$28, R1, R1 +201e61e0| 1 plan9 RSB R0>>$28, R1, R1 +401e61e0| 1 plan9 RSB R0->$28, R1, R1 +601e61e0| 1 plan9 RSB R0@>$28, R1, R1 +001e71e0| 1 plan9 RSB.S R0<<$28, R1, R1 +201e71e0| 1 plan9 RSB.S R0>>$28, R1, R1 +401e71e0| 1 plan9 RSB.S R0->$28, R1, R1 +601e71e0| 1 plan9 RSB.S R0@>$28, R1, R1 +103162e0| 1 plan9 RSB R0<>R1, R2, R3 +503162e0| 1 plan9 RSB R0->R1, R2, R3 +703162e0| 1 plan9 RSB R0@>R1, R2, R3 +103172e0| 1 plan9 RSB.S R0<>R1, R2, R3 +503172e0| 1 plan9 RSB.S R0->R1, R2, R3 +703172e0| 1 plan9 RSB.S R0@>R1, R2, R3 +102162e0| 1 plan9 RSB R0<>R1, R2, R2 +502162e0| 1 plan9 RSB R0->R1, R2, R2 +702162e0| 1 plan9 RSB R0@>R1, R2, R2 +102172e0| 1 plan9 RSB.S R0<>R1, R2, R2 +502172e0| 1 plan9 RSB.S R0->R1, R2, R2 +702172e0| 1 plan9 RSB.S R0@>R1, R2, R2 +ff10e0e2| 1 plan9 RSC $255, R0, R1 +ff14e0e2| 1 plan9 RSC $4278190080, R0, R1 +ff10f0e2| 1 plan9 RSC.S $255, R0, R1 +ff14f0e2| 1 plan9 RSC.S $4278190080, R0, R1 +ff00e0e2| 1 plan9 RSC $255, R0, R0 +ff04e0e2| 1 plan9 RSC $4278190080, R0, R0 +ff00f0e2| 1 plan9 RSC.S $255, R0, R0 +ff04f0e2| 1 plan9 RSC.S $4278190080, R0, R0 +0020e1e0| 1 plan9 RSC R0, R1, R2 +0020f1e0| 1 plan9 RSC.S R0, R1, R2 +0010e1e0| 1 plan9 RSC R0, R1, R1 +0010f1e0| 1 plan9 RSC.S R0, R1, R1 +202ee1e0| 1 plan9 RSC R0>>$28, R1, R2 +002ee1e0| 1 plan9 RSC R0<<$28, R1, R2 +402ee1e0| 1 plan9 RSC R0->$28, R1, R2 +602ee1e0| 1 plan9 RSC R0@>$28, R1, R2 +202ef1e0| 1 plan9 RSC.S R0>>$28, R1, R2 +002ef1e0| 1 plan9 RSC.S R0<<$28, R1, R2 +402ef1e0| 1 plan9 RSC.S R0->$28, R1, R2 +602ef1e0| 1 plan9 RSC.S R0@>$28, R1, R2 +001ee1e0| 1 plan9 RSC R0<<$28, R1, R1 +201ee1e0| 1 plan9 RSC R0>>$28, R1, R1 +401ee1e0| 1 plan9 RSC R0->$28, R1, R1 +601ee1e0| 1 plan9 RSC R0@>$28, R1, R1 +001ef1e0| 1 plan9 RSC.S R0<<$28, R1, R1 +201ef1e0| 1 plan9 RSC.S R0>>$28, R1, R1 +401ef1e0| 1 plan9 RSC.S R0->$28, R1, R1 +601ef1e0| 1 plan9 RSC.S R0@>$28, R1, R1 +1031e2e0| 1 plan9 RSC R0<>R1, R2, R3 +5031e2e0| 1 plan9 RSC R0->R1, R2, R3 +7031e2e0| 1 plan9 RSC R0@>R1, R2, R3 +1031f2e0| 1 plan9 RSC.S R0<>R1, R2, R3 +5031f2e0| 1 plan9 RSC.S R0->R1, R2, R3 +7031f2e0| 1 plan9 RSC.S R0@>R1, R2, R3 +1021e2e0| 1 plan9 RSC R0<>R1, R2, R2 +5021e2e0| 1 plan9 RSC R0->R1, R2, R2 +7021e2e0| 1 plan9 RSC R0@>R1, R2, R2 +1021f2e0| 1 plan9 RSC.S R0<>R1, R2, R2 +5021f2e0| 1 plan9 RSC.S R0->R1, R2, R2 +7021f2e0| 1 plan9 RSC.S R0@>R1, R2, R2 +ff1080e2| 1 plan9 ADD $255, R0, R1 +ff1480e2| 1 plan9 ADD $4278190080, R0, R1 +ff1090e2| 1 plan9 ADD.S $255, R0, R1 +ff1490e2| 1 plan9 ADD.S $4278190080, R0, R1 +ff0080e2| 1 plan9 ADD $255, R0, R0 +ff0480e2| 1 plan9 ADD $4278190080, R0, R0 +ff0090e2| 1 plan9 ADD.S $255, R0, R0 +ff0490e2| 1 plan9 ADD.S $4278190080, R0, R0 +002081e0| 1 plan9 ADD R0, R1, R2 +002091e0| 1 plan9 ADD.S R0, R1, R2 +001081e0| 1 plan9 ADD R0, R1, R1 +001091e0| 1 plan9 ADD.S R0, R1, R1 +202e81e0| 1 plan9 ADD R0>>$28, R1, R2 +002e81e0| 1 plan9 ADD R0<<$28, R1, R2 +402e81e0| 1 plan9 ADD R0->$28, R1, R2 +602e81e0| 1 plan9 ADD R0@>$28, R1, R2 +202e91e0| 1 plan9 ADD.S R0>>$28, R1, R2 +002e91e0| 1 plan9 ADD.S R0<<$28, R1, R2 +402e91e0| 1 plan9 ADD.S R0->$28, R1, R2 +602e91e0| 1 plan9 ADD.S R0@>$28, R1, R2 +001e81e0| 1 plan9 ADD R0<<$28, R1, R1 +201e81e0| 1 plan9 ADD R0>>$28, R1, R1 +401e81e0| 1 plan9 ADD R0->$28, R1, R1 +601e81e0| 1 plan9 ADD R0@>$28, R1, R1 +001e91e0| 1 plan9 ADD.S R0<<$28, R1, R1 +201e91e0| 1 plan9 ADD.S R0>>$28, R1, R1 +401e91e0| 1 plan9 ADD.S R0->$28, R1, R1 +601e91e0| 1 plan9 ADD.S R0@>$28, R1, R1 +103182e0| 1 plan9 ADD R0<>R1, R2, R3 +503182e0| 1 plan9 ADD R0->R1, R2, R3 +703182e0| 1 plan9 ADD R0@>R1, R2, R3 +103192e0| 1 plan9 ADD.S R0<>R1, R2, R3 +503192e0| 1 plan9 ADD.S R0->R1, R2, R3 +703192e0| 1 plan9 ADD.S R0@>R1, R2, R3 +102182e0| 1 plan9 ADD R0<>R1, R2, R2 +502182e0| 1 plan9 ADD R0->R1, R2, R2 +702182e0| 1 plan9 ADD R0@>R1, R2, R2 +102192e0| 1 plan9 ADD.S R0<>R1, R2, R2 +502192e0| 1 plan9 ADD.S R0->R1, R2, R2 +702192e0| 1 plan9 ADD.S R0@>R1, R2, R2 +ff10a0e2| 1 plan9 ADC $255, R0, R1 +ff14a0e2| 1 plan9 ADC $4278190080, R0, R1 +ff10b0e2| 1 plan9 ADC.S $255, R0, R1 +ff14b0e2| 1 plan9 ADC.S $4278190080, R0, R1 +ff00a0e2| 1 plan9 ADC $255, R0, R0 +ff04a0e2| 1 plan9 ADC $4278190080, R0, R0 +ff00b0e2| 1 plan9 ADC.S $255, R0, R0 +ff04b0e2| 1 plan9 ADC.S $4278190080, R0, R0 +0020a1e0| 1 plan9 ADC R0, R1, R2 +0020b1e0| 1 plan9 ADC.S R0, R1, R2 +0010a1e0| 1 plan9 ADC R0, R1, R1 +0010b1e0| 1 plan9 ADC.S R0, R1, R1 +202ea1e0| 1 plan9 ADC R0>>$28, R1, R2 +002ea1e0| 1 plan9 ADC R0<<$28, R1, R2 +402ea1e0| 1 plan9 ADC R0->$28, R1, R2 +602ea1e0| 1 plan9 ADC R0@>$28, R1, R2 +202eb1e0| 1 plan9 ADC.S R0>>$28, R1, R2 +002eb1e0| 1 plan9 ADC.S R0<<$28, R1, R2 +402eb1e0| 1 plan9 ADC.S R0->$28, R1, R2 +602eb1e0| 1 plan9 ADC.S R0@>$28, R1, R2 +001ea1e0| 1 plan9 ADC R0<<$28, R1, R1 +201ea1e0| 1 plan9 ADC R0>>$28, R1, R1 +401ea1e0| 1 plan9 ADC R0->$28, R1, R1 +601ea1e0| 1 plan9 ADC R0@>$28, R1, R1 +001eb1e0| 1 plan9 ADC.S R0<<$28, R1, R1 +201eb1e0| 1 plan9 ADC.S R0>>$28, R1, R1 +401eb1e0| 1 plan9 ADC.S R0->$28, R1, R1 +601eb1e0| 1 plan9 ADC.S R0@>$28, R1, R1 +1031a2e0| 1 plan9 ADC R0<>R1, R2, R3 +5031a2e0| 1 plan9 ADC R0->R1, R2, R3 +7031a2e0| 1 plan9 ADC R0@>R1, R2, R3 +1031b2e0| 1 plan9 ADC.S R0<>R1, R2, R3 +5031b2e0| 1 plan9 ADC.S R0->R1, R2, R3 +7031b2e0| 1 plan9 ADC.S R0@>R1, R2, R3 +1021a2e0| 1 plan9 ADC R0<>R1, R2, R2 +5021a2e0| 1 plan9 ADC R0->R1, R2, R2 +7021a2e0| 1 plan9 ADC R0@>R1, R2, R2 +1021b2e0| 1 plan9 ADC.S R0<>R1, R2, R2 +5021b2e0| 1 plan9 ADC.S R0->R1, R2, R2 +7021b2e0| 1 plan9 ADC.S R0@>R1, R2, R2 +ff0037e3| 1 plan9 TEQ $255, R7 +ff0439e3| 1 plan9 TEQ $4278190080, R9 +090f37e1| 1 plan9 TEQ R9<<$30, R7 +290f37e1| 1 plan9 TEQ R9>>$30, R7 +490f37e1| 1 plan9 TEQ R9->$30, R7 +690f37e1| 1 plan9 TEQ R9@>$30, R7 +190837e1| 1 plan9 TEQ R9<>R8, R7 +590837e1| 1 plan9 TEQ R9->R8, R7 +790837e1| 1 plan9 TEQ R9@>R8, R7 +ff0017e3| 1 plan9 TST $255, R7 +ff0419e3| 1 plan9 TST $4278190080, R9 +090f17e1| 1 plan9 TST R9<<$30, R7 +290f17e1| 1 plan9 TST R9>>$30, R7 +490f17e1| 1 plan9 TST R9->$30, R7 +690f17e1| 1 plan9 TST R9@>$30, R7 +190817e1| 1 plan9 TST R9<>R8, R7 +590817e1| 1 plan9 TST R9->R8, R7 +790817e1| 1 plan9 TST R9@>R8, R7 +ff0057e3| 1 plan9 CMP $255, R7 +ff0459e3| 1 plan9 CMP $4278190080, R9 +090f57e1| 1 plan9 CMP R9<<$30, R7 +290f57e1| 1 plan9 CMP R9>>$30, R7 +490f57e1| 1 plan9 CMP R9->$30, R7 +690f57e1| 1 plan9 CMP R9@>$30, R7 +190857e1| 1 plan9 CMP R9<>R8, R7 +590857e1| 1 plan9 CMP R9->R8, R7 +790857e1| 1 plan9 CMP R9@>R8, R7 +ff0077e3| 1 plan9 CMN $255, R7 +ff0479e3| 1 plan9 CMN $4278190080, R9 +090f77e1| 1 plan9 CMN R9<<$30, R7 +290f77e1| 1 plan9 CMN R9>>$30, R7 +490f77e1| 1 plan9 CMN R9->$30, R7 +690f77e1| 1 plan9 CMN R9@>$30, R7 +190877e1| 1 plan9 CMN R9<>R8, R7 +590877e1| 1 plan9 CMN R9->R8, R7 +790877e1| 1 plan9 CMN R9@>R8, R7 +0c00000a| 1 plan9 B.EQ 0x38 +0b00001a| 1 plan9 B.NE 0x34 +0a00002a| 1 plan9 B.CS 0x30 +0900003a| 1 plan9 B.CC 0x2c +0800004a| 1 plan9 B.MI 0x28 +0700005a| 1 plan9 B.PL 0x24 +0600006a| 1 plan9 B.VS 0x20 +0500007a| 1 plan9 B.VC 0x1c +0400008a| 1 plan9 B.HI 0x18 +0300009a| 1 plan9 B.LS 0x14 +020000aa| 1 plan9 B.GE 0x10 +010000ba| 1 plan9 B.LT 0xc +000000ca| 1 plan9 B.GT 0x8 +ffffffda| 1 plan9 B.LE 0x4 +fdffffea| 1 plan9 B 0xfffffffc +fcffffea| 1 plan9 B 0xfffffff8 +fbffffea| 1 plan9 B 0xfffffff4 +faffffea| 1 plan9 B 0xfffffff0 +f9ffffea| 1 plan9 B 0xffffffec +feffffea| 1 plan9 B 0x0 +0c00000b| 1 plan9 BL.EQ 0x38 +0b00001b| 1 plan9 BL.NE 0x34 +0a00002b| 1 plan9 BL.CS 0x30 +0900003b| 1 plan9 BL.CC 0x2c +0800004b| 1 plan9 BL.MI 0x28 +0700005b| 1 plan9 BL.PL 0x24 +0600006b| 1 plan9 BL.VS 0x20 +0500007b| 1 plan9 BL.VC 0x1c +0400008b| 1 plan9 BL.HI 0x18 +0300009b| 1 plan9 BL.LS 0x14 +020000ab| 1 plan9 BL.GE 0x10 +010000bb| 1 plan9 BL.LT 0xc +000000cb| 1 plan9 BL.GT 0x8 +ffffffdb| 1 plan9 BL.LE 0x4 +fdffffeb| 1 plan9 BL 0xfffffffc +fcffffeb| 1 plan9 BL 0xfffffff8 +fbffffeb| 1 plan9 BL 0xfffffff4 +faffffeb| 1 plan9 BL 0xfffffff0 +f9ffffeb| 1 plan9 BL 0xffffffec +feffffeb| 1 plan9 BL 0x0 +ff10c0e3| 1 plan9 BIC $255, R0, R1 +ff14c0e3| 1 plan9 BIC $4278190080, R0, R1 +ff10d0e3| 1 plan9 BIC.S $255, R0, R1 +ff14d0e3| 1 plan9 BIC.S $4278190080, R0, R1 +ff00c0e3| 1 plan9 BIC $255, R0, R0 +ff04c0e3| 1 plan9 BIC $4278190080, R0, R0 +ff00d0e3| 1 plan9 BIC.S $255, R0, R0 +ff04d0e3| 1 plan9 BIC.S $4278190080, R0, R0 +0020c1e1| 1 plan9 BIC R0, R1, R2 +0020d1e1| 1 plan9 BIC.S R0, R1, R2 +0010c1e1| 1 plan9 BIC R0, R1, R1 +0010d1e1| 1 plan9 BIC.S R0, R1, R1 +202ec1e1| 1 plan9 BIC R0>>$28, R1, R2 +002ec1e1| 1 plan9 BIC R0<<$28, R1, R2 +402ec1e1| 1 plan9 BIC R0->$28, R1, R2 +602ec1e1| 1 plan9 BIC R0@>$28, R1, R2 +202ed1e1| 1 plan9 BIC.S R0>>$28, R1, R2 +002ed1e1| 1 plan9 BIC.S R0<<$28, R1, R2 +402ed1e1| 1 plan9 BIC.S R0->$28, R1, R2 +602ed1e1| 1 plan9 BIC.S R0@>$28, R1, R2 +001ec1e1| 1 plan9 BIC R0<<$28, R1, R1 +201ec1e1| 1 plan9 BIC R0>>$28, R1, R1 +401ec1e1| 1 plan9 BIC R0->$28, R1, R1 +601ec1e1| 1 plan9 BIC R0@>$28, R1, R1 +001ed1e1| 1 plan9 BIC.S R0<<$28, R1, R1 +201ed1e1| 1 plan9 BIC.S R0>>$28, R1, R1 +401ed1e1| 1 plan9 BIC.S R0->$28, R1, R1 +601ed1e1| 1 plan9 BIC.S R0@>$28, R1, R1 +1031c2e1| 1 plan9 BIC R0<>R1, R2, R3 +5031c2e1| 1 plan9 BIC R0->R1, R2, R3 +7031c2e1| 1 plan9 BIC R0@>R1, R2, R3 +1031d2e1| 1 plan9 BIC.S R0<>R1, R2, R3 +5031d2e1| 1 plan9 BIC.S R0->R1, R2, R3 +7031d2e1| 1 plan9 BIC.S R0@>R1, R2, R3 +1021c2e1| 1 plan9 BIC R0<>R1, R2, R2 +5021c2e1| 1 plan9 BIC R0->R1, R2, R2 +7021c2e1| 1 plan9 BIC R0@>R1, R2, R2 +1021d2e1| 1 plan9 BIC.S R0<>R1, R2, R2 +5021d2e1| 1 plan9 BIC.S R0->R1, R2, R2 +7021d2e1| 1 plan9 BIC.S R0@>R1, R2, R2 +2567a0e1| 1 plan9 LSR $14, R5, R6 +a567a0e1| 1 plan9 LSR $15, R5, R6 +256fa0e1| 1 plan9 LSR $30, R5, R6 +a56fa0e1| 1 plan9 LSR $31, R5, R6 +2567b0e1| 1 plan9 LSR.S $14, R5, R6 +a567b0e1| 1 plan9 LSR.S $15, R5, R6 +256fb0e1| 1 plan9 LSR.S $30, R5, R6 +a56fb0e1| 1 plan9 LSR.S $31, R5, R6 +2557a0e1| 1 plan9 LSR $14, R5, R5 +a557a0e1| 1 plan9 LSR $15, R5, R5 +255fa0e1| 1 plan9 LSR $30, R5, R5 +a55fa0e1| 1 plan9 LSR $31, R5, R5 +2557b0e1| 1 plan9 LSR.S $14, R5, R5 +a557b0e1| 1 plan9 LSR.S $15, R5, R5 +255fb0e1| 1 plan9 LSR.S $30, R5, R5 +a55fb0e1| 1 plan9 LSR.S $31, R5, R5 +3675a0e1| 1 plan9 LSR R5, R6, R7 +3675b0e1| 1 plan9 LSR.S R5, R6, R7 +3775a0e1| 1 plan9 LSR R5, R7, R7 +3775b0e1| 1 plan9 LSR.S R5, R7, R7 +4567a0e1| 1 plan9 ASR $14, R5, R6 +c567a0e1| 1 plan9 ASR $15, R5, R6 +456fa0e1| 1 plan9 ASR $30, R5, R6 +c56fa0e1| 1 plan9 ASR $31, R5, R6 +4567b0e1| 1 plan9 ASR.S $14, R5, R6 +c567b0e1| 1 plan9 ASR.S $15, R5, R6 +456fb0e1| 1 plan9 ASR.S $30, R5, R6 +c56fb0e1| 1 plan9 ASR.S $31, R5, R6 +4557a0e1| 1 plan9 ASR $14, R5, R5 +c557a0e1| 1 plan9 ASR $15, R5, R5 +455fa0e1| 1 plan9 ASR $30, R5, R5 +c55fa0e1| 1 plan9 ASR $31, R5, R5 +4557b0e1| 1 plan9 ASR.S $14, R5, R5 +c557b0e1| 1 plan9 ASR.S $15, R5, R5 +455fb0e1| 1 plan9 ASR.S $30, R5, R5 +c55fb0e1| 1 plan9 ASR.S $31, R5, R5 +5675a0e1| 1 plan9 ASR R5, R6, R7 +5675b0e1| 1 plan9 ASR.S R5, R6, R7 +5775a0e1| 1 plan9 ASR R5, R7, R7 +5775b0e1| 1 plan9 ASR.S R5, R7, R7 +0567a0e1| 1 plan9 LSL $14, R5, R6 +8567a0e1| 1 plan9 LSL $15, R5, R6 +056fa0e1| 1 plan9 LSL $30, R5, R6 +856fa0e1| 1 plan9 LSL $31, R5, R6 +0567b0e1| 1 plan9 LSL.S $14, R5, R6 +8567b0e1| 1 plan9 LSL.S $15, R5, R6 +056fb0e1| 1 plan9 LSL.S $30, R5, R6 +856fb0e1| 1 plan9 LSL.S $31, R5, R6 +0557a0e1| 1 plan9 LSL $14, R5, R5 +8557a0e1| 1 plan9 LSL $15, R5, R5 +055fa0e1| 1 plan9 LSL $30, R5, R5 +855fa0e1| 1 plan9 LSL $31, R5, R5 +0557b0e1| 1 plan9 LSL.S $14, R5, R5 +8557b0e1| 1 plan9 LSL.S $15, R5, R5 +055fb0e1| 1 plan9 LSL.S $30, R5, R5 +855fb0e1| 1 plan9 LSL.S $31, R5, R5 +1675a0e1| 1 plan9 LSL R5, R6, R7 +1675b0e1| 1 plan9 LSL.S R5, R6, R7 +1775a0e1| 1 plan9 LSL R5, R7, R7 +1775b0e1| 1 plan9 LSL.S R5, R7, R7 +c23124e1| 1 plan9 SMLAWT R1, R2, R3, R4 +823124e1| 1 plan9 SMLAWB R1, R2, R3, R4 +923164e0| 1 plan9 MLS R1, R2, R3, R4 +923124e0| 1 plan9 MLA R1, R2, R3, R4 +923134e0| 1 plan9 MLA.S R1, R2, R3, R4 +123154e7| 1 plan9 SMMLA R1, R2, R3, R4 +d23154e7| 1 plan9 SMMLS R1, R2, R3, R4 +823104e1| 1 plan9 SMLABB R1, R2, R3, R4 +a23104e1| 1 plan9 SMLATB R1, R2, R3, R4 +c23104e1| 1 plan9 SMLABT R1, R2, R3, R4 +e23104e1| 1 plan9 SMLATT R1, R2, R3, R4 +123104e7| 1 plan9 SMLAD R1, R2, R3, R4 +323104e7| 1 plan9 SMLAD.X R1, R2, R3, R4 +523104e7| 1 plan9 SMLSD R1, R2, R3, R4 +723104e7| 1 plan9 SMLSD.X R1, R2, R3, R4 +9231e4e0| 1 plan9 SMLAL R1, R2, R4, R3 +9231f4e0| 1 plan9 SMLAL.S R1, R2, R4, R3 +123144e7| 1 plan9 SMLALD R1, R2, R4, R3 +323144e7| 1 plan9 SMLALD.X R1, R2, R4, R3 +523144e7| 1 plan9 SMLSLD R1, R2, R4, R3 +723144e7| 1 plan9 SMLSLD.X R1, R2, R4, R3 +9231a4e0| 1 plan9 UMLAL R1, R2, R4, R3 +923144e0| 1 plan9 UMAAL R1, R2, R4, R3 +9231b4e0| 1 plan9 UMLAL.S R1, R2, R4, R3 +930204e0| 1 plan9 MUL R2, R3, R4 +920404e0| 1 plan9 MUL R4, R2, R4 +930214e0| 1 plan9 MUL.S R2, R3, R4 +920414e0| 1 plan9 MUL.S R4, R2, R4 +960507e0| 1 plan9 MUL R5, R6, R7 +950707e0| 1 plan9 MUL R7, R5, R7 +960517e0| 1 plan9 MUL.S R5, R6, R7 +950717e0| 1 plan9 MUL.S R7, R5, R7 +923184e0| 1 plan9 UMULL R1, R2, R4, R3 +923194e0| 1 plan9 UMULL.S R1, R2, R4, R3 +9231c4e0| 1 plan9 SMULL R1, R2, R4, R3 +9231d4e0| 1 plan9 SMULL.S R1, R2, R4, R3 +12f153e7| 1 plan9 SMMUL R1, R2, R3 +820163e1| 1 plan9 SMULBB R1, R2, R3 +a20163e1| 1 plan9 SMULTB R1, R2, R3 +c20163e1| 1 plan9 SMULBT R1, R2, R3 +e20163e1| 1 plan9 SMULTT R1, R2, R3 +a20123e1| 1 plan9 SMULWB R1, R2, R3 +e20123e1| 1 plan9 SMULWT R1, R2, R3 +12f103e7| 1 plan9 SMUAD R1, R2, R3 +32f103e7| 1 plan9 SMUAD.X R1, R2, R3 +52f103e7| 1 plan9 SMUSD R1, R2, R3 +72f103e7| 1 plan9 SMUSD.X R1, R2, R3 +312fbfe6| 1 plan9 REV R1, R2 +b12fbfe6| 1 plan9 REV16 R1, R2 +b12fffe6| 1 plan9 REVSH R1, R2 +312fffe6| 1 plan9 RBIT R1, R2 +112f6fe1| 1 plan9 CLZ R1, R2 +f0ffd6f5| 1 gnu pld [r6, #4080] +f0ff59f5| 1 gnu pld [r9, #-4080] +f0ff96f5| 1 gnu pldw [r6, #4080] +f0ff19f5| 1 gnu pldw [r9, #-4080] +f0ffdff5| 1 gnu pld [pc, #4080] +f0ff5ff5| 1 gnu pld [pc, #-4080] +00f0d2f7| 1 gnu pld [r2, r0] +00f052f7| 1 gnu pld [r2, -r0] +00f092f7| 1 gnu pldw [r2, r0] +00f012f7| 1 gnu pldw [r2, -r0] +80f0d2f7| 1 gnu pld [r2, r0, lsl #1] +80f052f7| 1 gnu pld [r2, -r0, lsl #1] +a0f0d2f7| 1 gnu pld [r2, r0, lsr #1] +a0f052f7| 1 gnu pld [r2, -r0, lsr #1] +c0f0d2f7| 1 gnu pld [r2, r0, asr #1] +c0f052f7| 1 gnu pld [r2, -r0, asr #1] +e0f0d2f7| 1 gnu pld [r2, r0, ror #1] +e0f052f7| 1 gnu pld [r2, -r0, ror #1] +80f092f7| 1 gnu pldw [r2, r0, lsl #1] +80f012f7| 1 gnu pldw [r2, -r0, lsl #1] +a0f092f7| 1 gnu pldw [r2, r0, lsr #1] +a0f012f7| 1 gnu pldw [r2, -r0, lsr #1] +c0f092f7| 1 gnu pldw [r2, r0, asr #1] +c0f012f7| 1 gnu pldw [r2, -r0, asr #1] +e0f092f7| 1 gnu pldw [r2, r0, ror #1] +e0f012f7| 1 gnu pldw [r2, -r0, ror #1] +f0ffd2f4| 1 gnu pli [r2, #4080] +f0ff52f4| 1 gnu pli [r2, #-4080] +82f0d3f6| 1 gnu pli [r3, r2, lsl #1] +82f053f6| 1 gnu pli [r3, -r2, lsl #1] +a2f0d3f6| 1 gnu pli [r3, r2, lsr #1] +a2f053f6| 1 gnu pli [r3, -r2, lsr #1] +c2f0d3f6| 1 gnu pli [r3, r2, asr #1] +c2f053f6| 1 gnu pli [r3, -r2, asr #1] +e2f0d3f6| 1 gnu pli [r3, r2, ror #1] +e2f053f6| 1 gnu pli [r3, -r2, ror #1] +939007e1| 1 gnu swp r9, r3, [r7] +948042e1| 1 gnu swpb r8, r4, [r2] +000000ef| 1 plan9 SVC $0 +ffff00ef| 1 plan9 SVC $65535 +ff10e0e3| 1 plan9 MVN $255, R1 +ff14e0e3| 1 plan9 MVN $4278190080, R1 +ff10f0e3| 1 plan9 MVN.S $255, R1 +ff14f0e3| 1 plan9 MVN.S $4278190080, R1 +097fe0e1| 1 plan9 MVN R9<<$30, R7 +297fe0e1| 1 plan9 MVN R9>>$30, R7 +497fe0e1| 1 plan9 MVN R9->$30, R7 +697fe0e1| 1 plan9 MVN R9@>$30, R7 +097ff0e1| 1 plan9 MVN.S R9<<$30, R7 +297ff0e1| 1 plan9 MVN.S R9>>$30, R7 +497ff0e1| 1 plan9 MVN.S R9->$30, R7 +697ff0e1| 1 plan9 MVN.S R9@>$30, R7 +1978e0e1| 1 plan9 MVN R9<>R8, R7 +5978e0e1| 1 plan9 MVN R9->R8, R7 +7978e0e1| 1 plan9 MVN R9@>R8, R7 +1978f0e1| 1 plan9 MVN.S R9<>R8, R7 +5978f0e1| 1 plan9 MVN.S R9->R8, R7 +7978f0e1| 1 plan9 MVN.S R9@>R8, R7 +550081e8| 1 plan9 STM [R0,R2,R4,R6], R1 +5f0f81e8| 1 plan9 STM [R0-R4,R6,R8-R11], R1 +5500a1e8| 1 plan9 STM [R0,R2,R4,R6], R1! +5f0fa1e8| 1 plan9 STM [R0-R4,R6,R8-R11], R1! +550091e8| 1 plan9 LDM [R0,R2,R4,R6], R1 +5f0f91e8| 1 plan9 LDM [R0-R4,R6,R8-R11], R1 +5500b1e8| 1 plan9 LDM [R0,R2,R4,R6], R1! +5f0fb1e8| 1 plan9 LDM [R0-R4,R6,R8-R11], R1! +550001e8| 1 plan9 STMDA [R0,R2,R4,R6], R1 +5f0f01e8| 1 plan9 STMDA [R0-R4,R6,R8-R11], R1 +550021e8| 1 plan9 STMDA [R0,R2,R4,R6], R1! +5f0f21e8| 1 plan9 STMDA [R0-R4,R6,R8-R11], R1! +550011e8| 1 plan9 LDMDA [R0,R2,R4,R6], R1 +5f0f11e8| 1 plan9 LDMDA [R0-R4,R6,R8-R11], R1 +550031e8| 1 plan9 LDMDA [R0,R2,R4,R6], R1! +5f0f31e8| 1 plan9 LDMDA [R0-R4,R6,R8-R11], R1! +550001e9| 1 plan9 STMDB [R0,R2,R4,R6], R1 +5f0f01e9| 1 plan9 STMDB [R0-R4,R6,R8-R11], R1 +550021e9| 1 plan9 STMDB [R0,R2,R4,R6], R1! +5f0f21e9| 1 plan9 STMDB [R0-R4,R6,R8-R11], R1! +550011e9| 1 plan9 LDMDB [R0,R2,R4,R6], R1 +5f0f11e9| 1 plan9 LDMDB [R0-R4,R6,R8-R11], R1 +550031e9| 1 plan9 LDMDB [R0,R2,R4,R6], R1! +5f0f31e9| 1 plan9 LDMDB [R0-R4,R6,R8-R11], R1! +55008ae9| 1 plan9 STMIB [R0,R2,R4,R6], R10 +5f0f8ae9| 1 plan9 STMIB [R0-R4,R6,R8-R11], R10 +5500aae9| 1 plan9 STMIB [R0,R2,R4,R6], R10! +5f0faae9| 1 plan9 STMIB [R0-R4,R6,R8-R11], R10! +55009ae9| 1 plan9 LDMIB [R0,R2,R4,R6], R10 +5f0f9ae9| 1 plan9 LDMIB [R0-R4,R6,R8-R11], R10 +5500bae9| 1 plan9 LDMIB [R0,R2,R4,R6], R10! +5f0fbae9| 1 plan9 LDMIB [R0-R4,R6,R8-R11], R10! +0340a0e1| 1 plan9 MOVW R3, R4 +0920a0e1| 1 plan9 MOVW R9, R2 +ff90a0e3| 1 plan9 MOVW $255, R9 +ff94a0e3| 1 plan9 MOVW $4278190080, R9 +aaaa0a13| 1 plan9 MOVW.NE $43690, R10 +aaaa4a03| 1 plan9 MOVT.EQ $43690, R10 +5110e0e3| 1 plan9 MVN $81, R1 +001082e5| 1 plan9 MOVW R1, (R2) +001082e4| 1 plan9 MOVW.P R1, (R2) +0010a2e5| 1 plan9 MOVW.W R1, (R2) +201082e5| 1 plan9 MOVW R1, 0x20(R2) +201082e4| 1 plan9 MOVW.P R1, 0x20(R2) +2010a2e5| 1 plan9 MOVW.W R1, 0x20(R2) +201002e5| 1 plan9 MOVW R1, -0x20(R2) +201002e4| 1 plan9 MOVW.P R1, -0x20(R2) +201022e5| 1 plan9 MOVW.W R1, -0x20(R2) +001092e5| 1 plan9 MOVW (R2), R1 +001092e4| 1 plan9 MOVW.P (R2), R1 +0010b2e5| 1 plan9 MOVW.W (R2), R1 +201092e5| 1 plan9 MOVW 0x20(R2), R1 +201092e4| 1 plan9 MOVW.P 0x20(R2), R1 +2010b2e5| 1 plan9 MOVW.W 0x20(R2), R1 +201012e5| 1 plan9 MOVW -0x20(R2), R1 +201012e4| 1 plan9 MOVW.P -0x20(R2), R1 +201032e5| 1 plan9 MOVW.W -0x20(R2), R1 +00100fe1| 1 plan9 MRS APSR, R1 +fef02ce3| 1 plan9 MSR $254, APSR +fff42ce3| 1 plan9 MSR $4278190080, APSR +05f02c01| 1 plan9 MSR.EQ R5, APSR +09f02c11| 1 plan9 MSR.NE R9, APSR +109af1ee| 1 plan9 VMRS FPSCR, R9 +10aaf1ee| 1 plan9 VMRS FPSCR, R10 +109ae1ee| 1 plan9 VMSR R9, FPSCR +10aae1ee| 1 plan9 VMSR R10, FPSCR +202e91e7| 1 plan9 MOVW (R1)(R0>>28), R2 +002e91e7| 1 plan9 MOVW (R1)(R0<<28), R2 +402e91e7| 1 plan9 MOVW (R1)(R0->28), R2 +602e91e7| 1 plan9 MOVW (R1)(R0@>28), R2 +202e11e7| 1 plan9 MOVW.U (R1)(R0>>28), R2 +002e11e7| 1 plan9 MOVW.U (R1)(R0<<28), R2 +402e11e7| 1 plan9 MOVW.U (R1)(R0->28), R2 +602e11e7| 1 plan9 MOVW.U (R1)(R0@>28), R2 +202eb1e7| 1 plan9 MOVW.W (R1)(R0>>28), R2 +002eb1e7| 1 plan9 MOVW.W (R1)(R0<<28), R2 +402eb1e7| 1 plan9 MOVW.W (R1)(R0->28), R2 +602eb1e7| 1 plan9 MOVW.W (R1)(R0@>28), R2 +202e9ae6| 1 plan9 MOVW.P (R10)(R0>>28), R2 +002e9ae6| 1 plan9 MOVW.P (R10)(R0<<28), R2 +402e9ae6| 1 plan9 MOVW.P (R10)(R0->28), R2 +602e9ae6| 1 plan9 MOVW.P (R10)(R0@>28), R2 +202e81e7| 1 plan9 MOVW R2, (R1)(R0>>28) +002e81e7| 1 plan9 MOVW R2, (R1)(R0<<28) +402e81e7| 1 plan9 MOVW R2, (R1)(R0->28) +602e81e7| 1 plan9 MOVW R2, (R1)(R0@>28) +202e01e7| 1 plan9 MOVW.U R2, (R1)(R0>>28) +002e01e7| 1 plan9 MOVW.U R2, (R1)(R0<<28) +402e01e7| 1 plan9 MOVW.U R2, (R1)(R0->28) +602e01e7| 1 plan9 MOVW.U R2, (R1)(R0@>28) +202ea1e7| 1 plan9 MOVW.W R2, (R1)(R0>>28) +002ea1e7| 1 plan9 MOVW.W R2, (R1)(R0<<28) +402ea1e7| 1 plan9 MOVW.W R2, (R1)(R0->28) +602ea1e7| 1 plan9 MOVW.W R2, (R1)(R0@>28) +202e85e6| 1 plan9 MOVW.P R2, (R5)(R0>>28) +002e85e6| 1 plan9 MOVW.P R2, (R5)(R0<<28) +402e85e6| 1 plan9 MOVW.P R2, (R5)(R0->28) +602e85e6| 1 plan9 MOVW.P R2, (R5)(R0@>28) +0010c2e5| 1 plan9 MOVB R1, (R2) +0010c2e4| 1 plan9 MOVB.P R1, (R2) +0010e2e5| 1 plan9 MOVB.W R1, (R2) +2010c2e5| 1 plan9 MOVB R1, 0x20(R2) +2010c2e4| 1 plan9 MOVB.P R1, 0x20(R2) +2010e2e5| 1 plan9 MOVB.W R1, 0x20(R2) +201042e5| 1 plan9 MOVB R1, -0x20(R2) +201042e4| 1 plan9 MOVB.P R1, -0x20(R2) +201062e5| 1 plan9 MOVB.W R1, -0x20(R2) +d010d2e1| 1 plan9 MOVBS (R2), R1 +d010d2e0| 1 plan9 MOVBS.P (R2), R1 +d010f2e1| 1 plan9 MOVBS.W (R2), R1 +d012d2e1| 1 plan9 MOVBS 0x20(R2), R1 +d012d2e0| 1 plan9 MOVBS.P 0x20(R2), R1 +d012f2e1| 1 plan9 MOVBS.W 0x20(R2), R1 +d01252e1| 1 plan9 MOVBS -0x20(R2), R1 +d01252e0| 1 plan9 MOVBS.P -0x20(R2), R1 +d01272e1| 1 plan9 MOVBS.W -0x20(R2), R1 +0010d2e5| 1 plan9 MOVBU (R2), R1 +0010dfe5| 1 plan9 MOVBU (R15), R1 +0020dfe5| 1 plan9 MOVBU (R15), R2 +0010d2e4| 1 plan9 MOVBU.P (R2), R1 +0010f2e5| 1 plan9 MOVBU.W (R2), R1 +2010d2e5| 1 plan9 MOVBU 0x20(R2), R1 +2010d2e4| 1 plan9 MOVBU.P 0x20(R2), R1 +2010f2e5| 1 plan9 MOVBU.W 0x20(R2), R1 +201052e5| 1 plan9 MOVBU -0x20(R2), R1 +201052e4| 1 plan9 MOVBU.P -0x20(R2), R1 +201072e5| 1 plan9 MOVBU.W -0x20(R2), R1 +202ec1e7| 1 plan9 MOVB R2, (R1)(R0>>28) +002ec1e7| 1 plan9 MOVB R2, (R1)(R0<<28) +402ec1e7| 1 plan9 MOVB R2, (R1)(R0->28) +602ec1e7| 1 plan9 MOVB R2, (R1)(R0@>28) +202e41e7| 1 plan9 MOVB.U R2, (R1)(R0>>28) +002e41e7| 1 plan9 MOVB.U R2, (R1)(R0<<28) +402e41e7| 1 plan9 MOVB.U R2, (R1)(R0->28) +602e41e7| 1 plan9 MOVB.U R2, (R1)(R0@>28) +202ee1e7| 1 plan9 MOVB.W R2, (R1)(R0>>28) +002ee1e7| 1 plan9 MOVB.W R2, (R1)(R0<<28) +402ee1e7| 1 plan9 MOVB.W R2, (R1)(R0->28) +602ee1e7| 1 plan9 MOVB.W R2, (R1)(R0@>28) +202e61e7| 1 plan9 MOVB.W.U R2, (R1)(R0>>28) +002e61e7| 1 plan9 MOVB.W.U R2, (R1)(R0<<28) +402e61e7| 1 plan9 MOVB.W.U R2, (R1)(R0->28) +602e61e7| 1 plan9 MOVB.W.U R2, (R1)(R0@>28) +202ec5e6| 1 plan9 MOVB.P R2, (R5)(R0>>28) +002ec5e6| 1 plan9 MOVB.P R2, (R5)(R0<<28) +402ec5e6| 1 plan9 MOVB.P R2, (R5)(R0->28) +602ec5e6| 1 plan9 MOVB.P R2, (R5)(R0@>28) +202ed1e7| 1 plan9 MOVBU (R1)(R0>>28), R2 +002ed1e7| 1 plan9 MOVBU (R1)(R0<<28), R2 +402ed1e7| 1 plan9 MOVBU (R1)(R0->28), R2 +602ed1e7| 1 plan9 MOVBU (R1)(R0@>28), R2 +202e51e7| 1 plan9 MOVBU.U (R1)(R0>>28), R2 +002e51e7| 1 plan9 MOVBU.U (R1)(R0<<28), R2 +402e51e7| 1 plan9 MOVBU.U (R1)(R0->28), R2 +602e51e7| 1 plan9 MOVBU.U (R1)(R0@>28), R2 +202ef1e7| 1 plan9 MOVBU.W (R1)(R0>>28), R2 +002ef1e7| 1 plan9 MOVBU.W (R1)(R0<<28), R2 +402ef1e7| 1 plan9 MOVBU.W (R1)(R0->28), R2 +602ef1e7| 1 plan9 MOVBU.W (R1)(R0@>28), R2 +202e71e7| 1 plan9 MOVBU.W.U (R1)(R0>>28), R2 +002e71e7| 1 plan9 MOVBU.W.U (R1)(R0<<28), R2 +402e71e7| 1 plan9 MOVBU.W.U (R1)(R0->28), R2 +602e71e7| 1 plan9 MOVBU.W.U (R1)(R0@>28), R2 +202edae6| 1 plan9 MOVBU.P (R10)(R0>>28), R2 +002edae6| 1 plan9 MOVBU.P (R10)(R0<<28), R2 +402edae6| 1 plan9 MOVBU.P (R10)(R0->28), R2 +602edae6| 1 plan9 MOVBU.P (R10)(R0@>28), R2 +d02091e1| 1 plan9 MOVBS (R1)(R0), R2 +d02011e1| 1 plan9 MOVBS.U (R1)(R0), R2 +d020b1e1| 1 plan9 MOVBS.W (R1)(R0), R2 +d02091e0| 1 plan9 MOVBS.P (R1)(R0), R2 +b040c3e1| 1 plan9 MOVH R4, (R3) +b032c4e1| 1 plan9 MOVH R3, 0x20(R4) +b032e4e1| 1 plan9 MOVH.W R3, 0x20(R4) +b032c4e0| 1 plan9 MOVH.P R3, 0x20(R4) +b03244e1| 1 plan9 MOVH R3, -0x20(R4) +b03264e1| 1 plan9 MOVH.W R3, -0x20(R4) +b03244e0| 1 plan9 MOVH.P R3, -0x20(R4) +b080d9e1| 1 plan9 MOVHU (R9), R8 +b080f9e1| 1 plan9 MOVHU.W (R9), R8 +b080d9e0| 1 plan9 MOVHU.P (R9), R8 +f080d9e1| 1 plan9 MOVHS (R9), R8 +f080f9e1| 1 plan9 MOVHS.W (R9), R8 +f080d9e0| 1 plan9 MOVHS.P (R9), R8 +b282d9e1| 1 plan9 MOVHU 0x22(R9), R8 +b282f9e1| 1 plan9 MOVHU.W 0x22(R9), R8 +b282d9e0| 1 plan9 MOVHU.P 0x22(R9), R8 +f282d9e1| 1 plan9 MOVHS 0x22(R9), R8 +f282f9e1| 1 plan9 MOVHS.W 0x22(R9), R8 +f282d9e0| 1 plan9 MOVHS.P 0x22(R9), R8 +b48259e1| 1 plan9 MOVHU -0x24(R9), R8 +b48279e1| 1 plan9 MOVHU.W -0x24(R9), R8 +b48259e0| 1 plan9 MOVHU.P -0x24(R9), R8 +f48259e1| 1 plan9 MOVHS -0x24(R9), R8 +f48279e1| 1 plan9 MOVHS.W -0x24(R9), R8 +f48259e0| 1 plan9 MOVHS.P -0x24(R9), R8 +002a31ee| 1 plan9 VADD.F32 S0, S2, S4 +202a31ee| 1 plan9 VADD.F32 S1, S2, S4 +802a31ee| 1 plan9 VADD.F32 S0, S3, S4 +002a71ee| 1 plan9 VADD.F32 S0, S2, S5 +035b340e| 1 plan9 VADD.EQ.F64 D3, D4, D5 +002a321e| 1 plan9 VADD.NE.F32 S0, S4, S4 +035b35ee| 1 plan9 VADD.F64 D3, D5, D5 +402a31ee| 1 plan9 VSUB.F32 S0, S2, S4 +602a31ee| 1 plan9 VSUB.F32 S1, S2, S4 +c02a31ee| 1 plan9 VSUB.F32 S0, S3, S4 +402a71ee| 1 plan9 VSUB.F32 S0, S2, S5 +435b340e| 1 plan9 VSUB.EQ.F64 D3, D4, D5 +402a321e| 1 plan9 VSUB.NE.F32 S0, S4, S4 +435b35ee| 1 plan9 VSUB.F64 D3, D5, D5 +002a21ee| 1 plan9 VMUL.F32 S0, S2, S4 +202a21ee| 1 plan9 VMUL.F32 S1, S2, S4 +802a21ee| 1 plan9 VMUL.F32 S0, S3, S4 +002a61ee| 1 plan9 VMUL.F32 S0, S2, S5 +035b240e| 1 plan9 VMUL.EQ.F64 D3, D4, D5 +002a221e| 1 plan9 VMUL.NE.F32 S0, S4, S4 +035b25ee| 1 plan9 VMUL.F64 D3, D5, D5 +002a81ee| 1 plan9 VDIV.F32 S0, S2, S4 +202a81ee| 1 plan9 VDIV.F32 S1, S2, S4 +802a81ee| 1 plan9 VDIV.F32 S0, S3, S4 +002ac1ee| 1 plan9 VDIV.F32 S0, S2, S5 +035b840e| 1 plan9 VDIV.EQ.F64 D3, D4, D5 +002a821e| 1 plan9 VDIV.NE.F32 S0, S4, S4 +035b85ee| 1 plan9 VDIV.F64 D3, D5, D5 +401ab1ee| 1 plan9 VNEG.F32 S0, S2 +601ab1ee| 1 plan9 VNEG.F32 S1, S2 +401af1ee| 1 plan9 VNEG.F32 S0, S3 +445bb1ee| 1 plan9 VNEG.F64 D4, D5 +c01ab0ee| 1 plan9 VABS.F32 S0, S2 +e01ab0ee| 1 plan9 VABS.F32 S1, S2 +c01af0ee| 1 plan9 VABS.F32 S0, S3 +c45bb0ee| 1 plan9 VABS.F64 D4, D5 +c01ab1ee| 1 plan9 VSQRT.F32 S0, S2 +e01ab1ee| 1 plan9 VSQRT.F32 S1, S2 +c01af1ee| 1 plan9 VSQRT.F32 S0, S3 +c45bb1ee| 1 plan9 VSQRT.F64 D4, D5 +c01ab7ee| 1 gnu vcvt.f64.f32 d1, s0 +c45bb7ee| 1 gnu vcvt.f32.f64 s10, d4 +9f9f98e1| 1 gnu ldrex r9, [r8] +9f9fd8e1| 1 gnu ldrexb r9, [r8] +9f9ff8e1| 1 gnu ldrexh r9, [r8] +9fcfbbe1| 1 gnu ldrexd ip, [fp] +935f84e1| 1 gnu strex r5, r3, [r4] +935fc4e1| 1 gnu strexb r5, r3, [r4] +935fe4e1| 1 gnu strexh r5, r3, [r4] +98afa9e1| 1 gnu strexd sl, r8, [r9] +104b08ee| 1 gnu vmov.32 d8[0], r4 +108b14ee| 1 gnu vmov.32 r8, d4[0] +445ab0ee| 1 gnu vmov.f32 s10, s8 +467bb0ee| 1 gnu vmov.f64 d7, d6 +c68abdee| 1 gnu vcvt.s32.f32 s16, s12 +c68abcee| 1 gnu vcvt.u32.f32 s16, s12 +c68bbdee| 1 gnu vcvt.s32.f64 s16, d6 +c68bbcee| 1 gnu vcvt.u32.f64 s16, d6 +c68ab8ee| 1 gnu vcvt.f32.s32 s16, s12 +468ab8ee| 1 gnu vcvt.f32.u32 s16, s12 +c68bb8ee| 1 gnu vcvt.f64.s32 d8, s12 +468bb8ee| 1 gnu vcvt.f64.u32 d8, s12 +000000ea| 1 plan9 B 0x8 +feffffea| 1 plan9 B 0x0 +fcffffea| 1 plan9 B 0xfffffff8 +1f90cfe7| 1 plan9 BFC $16, $0, R9 +9fb4dee7| 1 plan9 BFC $22, $9, R11 +1790cfe7| 1 plan9 BFI $16, $0, R7, R9 +98b4dee7| 1 plan9 BFI $22, $9, R8, R11 +742321e1| 1 plan9 BKPT $4660 +000000eb| 1 plan9 BL 0x8 +feffffeb| 1 plan9 BL 0x0 +fcffffeb| 1 plan9 BL 0xfffffff8 +000000fa| 1 plan9 BLX 0x8 +fefffffa| 1 plan9 BLX 0x0 +fcfffffa| 1 plan9 BLX 0xfffffff8 +33ff2fe1| 1 plan9 BLX R3 +13ff2fe1| 1 plan9 BX R3 +23ff2fe1| 1 plan9 BXJ R3 +1ff07ff5| 1 plan9 CLREX +f7f020e3| 1 gnu dbg #7 +58f07ff5| 1 gnu dmb #8 +49f07ff5| 1 gnu dsb #9 +62f07ff5| 1 gnu isb #2 +009a94ed| 1 plan9 MOVF (R4), F9 +009ad4ed| 1 plan9 MOVF (R4), S19 +009b940d| 1 plan9 MOVD.EQ (R4), F9 +003a9a1d| 1 plan9 MOVF.NE (R10), F3 +003ada1d| 1 plan9 MOVF.NE (R10), S7 +003b9aed| 1 plan9 MOVD (R10), F3 +089a93ed| 1 plan9 MOVF 0x20(R3), F9 +089ad3ed| 1 plan9 MOVF 0x20(R3), S19 +089b940d| 1 plan9 MOVD.EQ 0x20(R4), F9 +083a1a1d| 1 plan9 MOVF.NE -0x20(R10), F3 +083a5a1d| 1 plan9 MOVF.NE -0x20(R10), S7 +083b1aed| 1 plan9 MOVD -0x20(R10), F3 +009a84ed| 1 plan9 MOVF F9, (R4) +009ac4ed| 1 plan9 MOVF S19, (R4) +009b840d| 1 plan9 MOVD.EQ F9, (R4) +003a8a1d| 1 plan9 MOVF.NE F3, (R10) +003aca1d| 1 plan9 MOVF.NE S7, (R10) +003b8aed| 1 plan9 MOVD F3, (R10) +089a83ed| 1 plan9 MOVF F9, 0x20(R3) +089ac3ed| 1 plan9 MOVF S19, 0x20(R3) +089b840d| 1 plan9 MOVD.EQ F9, 0x20(R4) +083a0a1d| 1 plan9 MOVF.NE F3, -0x20(R10) +083a4a1d| 1 plan9 MOVF.NE S7, -0x20(R10) +083b0aed| 1 plan9 MOVD F3, -0x20(R10) +d060c8e1| 1 gnu ldrd r6, [r8] +d06048e1| 1 gnu ldrd r6, [r8] +d060e8e1| 1 gnu ldrd r6, [r8, #0]! +d06068e1| 1 gnu ldrd r6, [r8, #0]! +d060c8e0| 1 gnu ldrd r6, [r8], #0 +d06048e0| 1 gnu ldrd r6, [r8], #0 +d062c8e1| 1 gnu ldrd r6, [r8, #32] +d06248e1| 1 gnu ldrd r6, [r8, #-32] +d062e8e1| 1 gnu ldrd r6, [r8, #32]! +d06268e1| 1 gnu ldrd r6, [r8, #-32]! +d062c8e0| 1 gnu ldrd r6, [r8], #32 +d06248e0| 1 gnu ldrd r6, [r8], #-32 +d24089e1| 1 gnu ldrd r4, [r9, r2] +d240a9e1| 1 gnu ldrd r4, [r9, r2]! +d24009e1| 1 gnu ldrd r4, [r9, -r2] +d24029e1| 1 gnu ldrd r4, [r9, -r2]! +f060c8e1| 1 gnu strd r6, [r8] +f06048e1| 1 gnu strd r6, [r8] +f060e8e1| 1 gnu strd r6, [r8, #0]! +f06068e1| 1 gnu strd r6, [r8, #0]! +f060c8e0| 1 gnu strd r6, [r8], #0 +f06048e0| 1 gnu strd r6, [r8], #0 +f062c8e1| 1 gnu strd r6, [r8, #32] +f06248e1| 1 gnu strd r6, [r8, #-32] +f062e8e1| 1 gnu strd r6, [r8, #32]! +f06268e1| 1 gnu strd r6, [r8, #-32]! +f062c8e0| 1 gnu strd r6, [r8], #32 +f06248e0| 1 gnu strd r6, [r8], #-32 +f24089e1| 1 gnu strd r4, [r9, r2] +f240a9e1| 1 gnu strd r4, [r9, r2]! +f24009e1| 1 gnu strd r4, [r9, -r2] +f24029e1| 1 gnu strd r4, [r9, -r2]! +0010b2e4| 1 gnu ldrt r1, [r2], #0 +2010b2e4| 1 gnu ldrt r1, [r2], #32 +201032e4| 1 gnu ldrt r1, [r2], #-32 +0040bde4| 1 gnu ldrt r4, [sp], #0 +2040bde4| 1 gnu ldrt r4, [sp], #32 +20403de4| 1 gnu ldrt r4, [sp], #-32 +2314b2e6| 1 gnu ldrt r1, [r2], r3, lsr #8 +0314b2e6| 1 gnu ldrt r1, [r2], r3, lsl #8 +4314b2e6| 1 gnu ldrt r1, [r2], r3, asr #8 +6314b2e6| 1 gnu ldrt r1, [r2], r3, ror #8 +231432e6| 1 gnu ldrt r1, [r2], -r3, lsr #8 +031432e6| 1 gnu ldrt r1, [r2], -r3, lsl #8 +431432e6| 1 gnu ldrt r1, [r2], -r3, asr #8 +631432e6| 1 gnu ldrt r1, [r2], -r3, ror #8 +0010a2e4| 1 gnu strt r1, [r2], #0 +2010a2e4| 1 gnu strt r1, [r2], #32 +201022e4| 1 gnu strt r1, [r2], #-32 +0040ade4| 1 gnu strt r4, [sp], #0 +2040ade4| 1 gnu strt r4, [sp], #32 +20402de4| 1 gnu strt r4, [sp], #-32 +2314a2e6| 1 gnu strt r1, [r2], r3, lsr #8 +0314a2e6| 1 gnu strt r1, [r2], r3, lsl #8 +4314a2e6| 1 gnu strt r1, [r2], r3, asr #8 +6314a2e6| 1 gnu strt r1, [r2], r3, ror #8 +231422e6| 1 gnu strt r1, [r2], -r3, lsr #8 +031422e6| 1 gnu strt r1, [r2], -r3, lsl #8 +431422e6| 1 gnu strt r1, [r2], -r3, asr #8 +631422e6| 1 gnu strt r1, [r2], -r3, ror #8 +0010f2e4| 1 gnu ldrbt r1, [r2], #0 +2010f2e4| 1 gnu ldrbt r1, [r2], #32 +201072e4| 1 gnu ldrbt r1, [r2], #-32 +0040fde4| 1 gnu ldrbt r4, [sp], #0 +2040fde4| 1 gnu ldrbt r4, [sp], #32 +20407de4| 1 gnu ldrbt r4, [sp], #-32 +2314f2e6| 1 gnu ldrbt r1, [r2], r3, lsr #8 +0314f2e6| 1 gnu ldrbt r1, [r2], r3, lsl #8 +4314f2e6| 1 gnu ldrbt r1, [r2], r3, asr #8 +6314f2e6| 1 gnu ldrbt r1, [r2], r3, ror #8 +231472e6| 1 gnu ldrbt r1, [r2], -r3, lsr #8 +031472e6| 1 gnu ldrbt r1, [r2], -r3, lsl #8 +431472e6| 1 gnu ldrbt r1, [r2], -r3, asr #8 +631472e6| 1 gnu ldrbt r1, [r2], -r3, ror #8 +0010e2e4| 1 gnu strbt r1, [r2], #0 +2010e2e4| 1 gnu strbt r1, [r2], #32 +201062e4| 1 gnu strbt r1, [r2], #-32 +0040ede4| 1 gnu strbt r4, [sp], #0 +2040ede4| 1 gnu strbt r4, [sp], #32 +20406de4| 1 gnu strbt r4, [sp], #-32 +2314e2e6| 1 gnu strbt r1, [r2], r3, lsr #8 +0314e2e6| 1 gnu strbt r1, [r2], r3, lsl #8 +4314e2e6| 1 gnu strbt r1, [r2], r3, asr #8 +6314e2e6| 1 gnu strbt r1, [r2], r3, ror #8 +231462e6| 1 gnu strbt r1, [r2], -r3, lsr #8 +031462e6| 1 gnu strbt r1, [r2], -r3, lsl #8 +431462e6| 1 gnu strbt r1, [r2], -r3, asr #8 +631462e6| 1 gnu strbt r1, [r2], -r3, ror #8 +d010f2e0| 1 gnu ldrsbt r1, [r2], #0 +d012f2e0| 1 gnu ldrsbt r1, [r2], #32 +d01272e0| 1 gnu ldrsbt r1, [r2], #-32 +d040fde0| 1 gnu ldrsbt r4, [sp], #0 +d042fde0| 1 gnu ldrsbt r4, [sp], #32 +d0427de0| 1 gnu ldrsbt r4, [sp], #-32 +d310b2e0| 1 gnu ldrsbt r1, [r2], r3 +d640bde0| 1 gnu ldrsbt r4, [sp], r6 +d31032e0| 1 gnu ldrsbt r1, [r2], -r3 +d6403de0| 1 gnu ldrsbt r4, [sp], -r6 +b010f2e0| 1 gnu ldrht r1, [r2], #0 +b012f2e0| 1 gnu ldrht r1, [r2], #32 +b01272e0| 1 gnu ldrht r1, [r2], #-32 +b040fde0| 1 gnu ldrht r4, [sp], #0 +b042fde0| 1 gnu ldrht r4, [sp], #32 +b0427de0| 1 gnu ldrht r4, [sp], #-32 +b310b2e0| 1 gnu ldrht r1, [r2], r3 +b640bde0| 1 gnu ldrht r4, [sp], r6 +b31032e0| 1 gnu ldrht r1, [r2], -r3 +b6403de0| 1 gnu ldrht r4, [sp], -r6 +f010f2e0| 1 gnu ldrsht r1, [r2], #0 +f012f2e0| 1 gnu ldrsht r1, [r2], #32 +f01272e0| 1 gnu ldrsht r1, [r2], #-32 +f040fde0| 1 gnu ldrsht r4, [sp], #0 +f042fde0| 1 gnu ldrsht r4, [sp], #32 +f0427de0| 1 gnu ldrsht r4, [sp], #-32 +f310b2e0| 1 gnu ldrsht r1, [r2], r3 +f640bde0| 1 gnu ldrsht r4, [sp], r6 +f31032e0| 1 gnu ldrsht r1, [r2], -r3 +f6403de0| 1 gnu ldrsht r4, [sp], -r6 +b010f2e0| 1 gnu ldrht r1, [r2], #0 +b012f2e0| 1 gnu ldrht r1, [r2], #32 +b01272e0| 1 gnu ldrht r1, [r2], #-32 +b040fde0| 1 gnu ldrht r4, [sp], #0 +b042fde0| 1 gnu ldrht r4, [sp], #32 +b0427de0| 1 gnu ldrht r4, [sp], #-32 +b310b2e0| 1 gnu ldrht r1, [r2], r3 +b640bde0| 1 gnu ldrht r4, [sp], r6 +b31032e0| 1 gnu ldrht r1, [r2], -r3 +b6403de0| 1 gnu ldrht r4, [sp], -r6 +b010e2e0| 1 gnu strht r1, [r2], #0 +b012e2e0| 1 gnu strht r1, [r2], #32 +b01262e0| 1 gnu strht r1, [r2], #-32 +b040ede0| 1 gnu strht r4, [sp], #0 +b042ede0| 1 gnu strht r4, [sp], #32 +b0426de0| 1 gnu strht r4, [sp], #-32 +b310a2e0| 1 gnu strht r1, [r2], r3 +b640ade0| 1 gnu strht r4, [sp], r6 +b31022e0| 1 gnu strht r1, [r2], -r3 +b6402de0| 1 gnu strht r4, [sp], -r6 +00f020e3| 1 gnu nop +445ab0ee| 1 gnu vmov.f32 s10, s8 +645af0ee| 1 gnu vmov.f32 s11, s9 +467bb0ee| 1 gnu vmov.f64 d7, d6 +104b08ee| 1 gnu vmov.32 d8[0], r4 +104b28ee| 1 gnu vmov.32 d8[1], r4 +108b14ee| 1 gnu vmov.32 r8, d4[0] +108b34ee| 1 gnu vmov.32 r8, d4[1] +c68abdee| 1 gnu vcvt.s32.f32 s16, s12 +e68afdee| 1 gnu vcvt.s32.f32 s17, s13 +c68abcee| 1 gnu vcvt.u32.f32 s16, s12 +e68afcee| 1 gnu vcvt.u32.f32 s17, s13 +c68bbdee| 1 gnu vcvt.s32.f64 s16, d6 +c68bfdee| 1 gnu vcvt.s32.f64 s17, d6 +c68bbcee| 1 gnu vcvt.u32.f64 s16, d6 +c68bfcee| 1 gnu vcvt.u32.f64 s17, d6 +c68ab8ee| 1 gnu vcvt.f32.s32 s16, s12 +e68af8ee| 1 gnu vcvt.f32.s32 s17, s13 +468ab8ee| 1 gnu vcvt.f32.u32 s16, s12 +668af8ee| 1 gnu vcvt.f32.u32 s17, s13 +c68bb8ee| 1 gnu vcvt.f64.s32 d8, s12 +e68bb8ee| 1 gnu vcvt.f64.s32 d8, s13 +468bb8ee| 1 gnu vcvt.f64.u32 d8, s12 +668bb8ee| 1 gnu vcvt.f64.u32 d8, s13 +c01ab7ee| 1 gnu vcvt.f64.f32 d1, s0 +e01ab7ee| 1 gnu vcvt.f64.f32 d1, s1 +c45bb7ee| 1 gnu vcvt.f32.f64 s10, d4 +c65bf7ee| 1 gnu vcvt.f32.f64 s11, d6 +102083e6| 1 gnu pkhbt r2, r3, r0 +102283e6| 1 gnu pkhbt r2, r3, r0, lsl #4 +502083e6| 1 gnu pkhtb r2, r3, r0, asr #32 +d02083e6| 1 gnu pkhtb r2, r3, r0, asr #1 +502283e6| 1 gnu pkhtb r2, r3, r0, asr #4 +faaf2de9| 1 gnu push {r1, r3, r4, r5, r6, r7, r8, r9, sl, fp, sp, pc} +04202de5| 1 gnu push {r2} +faafbde8| 1 gnu pop {r1, r3, r4, r5, r6, r7, r8, r9, sl, fp, sp, pc} +04209de4| 1 gnu pop {r2} +556003e1| 1 gnu qadd r6, r5, r3 +156f28e6| 1 gnu qadd16 r6, r8, r5 +956f28e6| 1 gnu qadd8 r6, r8, r5 +550044e1| 1 gnu qdadd r0, r5, r4 +550066e1| 1 gnu qdsub r0, r5, r6 +156f68e6| 1 gnu uqadd16 r6, r8, r5 +956f68e6| 1 gnu uqadd8 r6, r8, r5 +356f28e6| 1 gnu qasx r6, r8, r5 +556f28e6| 1 gnu qsax r6, r8, r5 +356f64e6| 1 gnu uqasx r6, r4, r5 +553f64e6| 1 gnu uqsax r3, r4, r5 +556022e1| 1 gnu qsub r6, r5, r2 +774f21e6| 1 gnu qsub16 r4, r1, r7 +f74f21e6| 1 gnu qsub8 r4, r1, r7 +774f61e6| 1 gnu uqsub16 r4, r1, r7 +f74f61e6| 1 gnu uqsub8 r4, r1, r7 +6670a0e1| 1 gnu rrx r7, r6 +6670b0e1| 1 gnu rrxs r7, r6 +112f13e6| 1 gnu sadd16 r2, r3, r1 +992f13e6| 1 gnu sadd8 r2, r3, r9 +112f33e6| 1 gnu shadd16 r2, r3, r1 +992f33e6| 1 gnu shadd8 r2, r3, r9 +712f13e6| 1 gnu ssub16 r2, r3, r1 +f92f13e6| 1 gnu ssub8 r2, r3, r9 +712f33e6| 1 gnu shsub16 r2, r3, r1 +f92f33e6| 1 gnu shsub8 r2, r3, r9 +112f53e6| 1 gnu uadd16 r2, r3, r1 +992f53e6| 1 gnu uadd8 r2, r3, r9 +112f73e6| 1 gnu uhadd16 r2, r3, r1 +992f73e6| 1 gnu uhadd8 r2, r3, r9 +712f53e6| 1 gnu usub16 r2, r3, r1 +f92f53e6| 1 gnu usub8 r2, r3, r9 +712f73e6| 1 gnu uhsub16 r2, r3, r1 +f92f73e6| 1 gnu uhsub8 r2, r3, r9 +332f14e6| 1 gnu sasx r2, r4, r3 +532f14e6| 1 gnu ssax r2, r4, r3 +332f54e6| 1 gnu uasx r2, r4, r3 +532f54e6| 1 gnu usax r2, r4, r3 +332f34e6| 1 gnu shasx r2, r4, r3 +532f34e6| 1 gnu shsax r2, r4, r3 +332f74e6| 1 gnu uhasx r2, r4, r3 +532f74e6| 1 gnu uhsax r2, r4, r3 +dc51afe7| 1 gnu sbfx r5, ip, #3, #16 +dc51efe7| 1 gnu ubfx r5, ip, #3, #16 +b12f88e6| 1 gnu sel r2, r8, r1 +000201f1| 1 gnu setend be +000001f1| 1 gnu setend le +04f020e3| 1 gnu sev +1155aae6| 1 gnu ssat r5, #11, r1, lsl #10 +5155aae6| 1 gnu ssat r5, #11, r1, asr #10 +335faae6| 1 gnu ssat16 r5, #11, r3 +1155eae6| 1 gnu usat r5, #10, r1, lsl #10 +5155eae6| 1 gnu usat r5, #10, r1, asr #10 +335feae6| 1 gnu usat16 r5, #10, r3 +7788a9e6| 1 gnu sxtab r8, r9, r7, ror #16 +778889e6| 1 gnu sxtab16 r8, r9, r7, ror #16 +7788b9e6| 1 gnu sxtah r8, r9, r7, ror #16 +7784afe6| 1 gnu sxtb r8, r7, ror #8 +778c8fe6| 1 gnu sxtb16 r8, r7, ror #24 +7780bfe6| 1 gnu sxth r8, r7 +7788e9e6| 1 gnu uxtab r8, r9, r7, ror #16 +7788c9e6| 1 gnu uxtab16 r8, r9, r7, ror #16 +7788f9e6| 1 gnu uxtah r8, r9, r7, ror #16 +7784efe6| 1 gnu uxtb r8, r7, ror #8 +778ccfe6| 1 gnu uxtb16 r8, r7, ror #24 +7780ffe6| 1 gnu uxth r8, r7 +11f288e7| 1 gnu usad8 r8, r1, r2 +112388e7| 1 gnu usada8 r8, r1, r3, r2 +02f020e3| 1 gnu wfe +03f020e3| 1 gnu wfi +01f020e3| 1 gnu yield diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/arg.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/arg.go new file mode 100644 index 00000000000..96df14dfaad --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/arg.go @@ -0,0 +1,494 @@ +// Generated by ARM internal tool +// DO NOT EDIT + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +// Naming for Go decoder arguments: +// +// - arg_Wd: a W register encoded in the Rd[4:0] field (31 is wzr) +// +// - arg_Xd: a X register encoded in the Rd[4:0] field (31 is xzr) +// +// - arg_Wds: a W register encoded in the Rd[4:0] field (31 is wsp) +// +// - arg_Xds: a X register encoded in the Rd[4:0] field (31 is sp) +// +// - arg_Wn: encoded in Rn[9:5] +// +// - arg_Wm: encoded in Rm[20:16] +// +// - arg_Wm_extend__UXTB_0__UXTH_1__LSL_UXTW_2__UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4: +// a W register encoded in Rm with an extend encoded in option[15:13] and an amount +// encoded in imm3[12:10] in the range [0,4]. +// +// - arg_Rm_extend__UXTB_0__UXTH_1__UXTW_2__LSL_UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4: +// a W or X register encoded in Rm with an extend encoded in option[15:13] and an +// amount encoded in imm3[12:10] in the range [0,4]. If the extend is UXTX or SXTX, +// it's an X register else, it's a W register. +// +// - arg_Wm_shift__LSL_0__LSR_1__ASR_2__0_31: +// a W register encoded in Rm with a shift encoded in shift[23:22] and an amount +// encoded in imm6[15:10] in the range [0,31]. +// +// - arg_IAddSub: +// An immediate for a add/sub instruction encoded in imm12[21:10] with an optional +// left shift of 12 encoded in shift[23:22]. +// +// - arg_Rt_31_1__W_0__X_1: +// a W or X register encoded in Rt[4:0]. The width specifier is encoded in the field +// [31:31] (offset 31, bit count 1) and the register is W for 0 and X for 1. +// +// - arg_[s|u]label_FIELDS_POWER: +// a program label encoded as "FIELDS" times 2^POWER in the range [MIN, MAX] (determined +// by signd/unsigned, FIELDS and POWER), e.g. +// arg_slabel_imm14_2 +// arg_slabel_imm19_2 +// arg_slabel_imm26_2 +// arg_slabel_immhi_immlo_0 +// arg_slabel_immhi_immlo_12 +// +// - arg_Xns_mem_post_imm7_8_signed: +// addressing mode of post-index with a base register: Xns and a signed offset encoded +// in the "imm7" field times 8 +// +// - arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__3_1: +// addressing mode of extended register with a base register: Xns, an offset register +// (|) with an extend encoded in option[15:13] and a shift amount encoded in +// S[12:12] in the range [0,3] (S=0:0, S=1:3). +// +// - arg_Xns_mem_optional_imm12_4_unsigned: +// addressing mode of unsigned offset with a base register: Xns and an optional unsigned +// offset encoded in the "imm12" field times 4 +// +// - arg_Xns_mem_wb_imm7_4_signed: +// addressing mode of pre-index with a base register: Xns and the signed offset encoded +// in the "imm7" field times 4 +// +// - arg_Xns_mem_post_size_1_8_unsigned__4_0__8_1__16_2__32_3: +// a post-index immediate offset, encoded in the "size" field. It can have the following values: +// #4 when size = 00 +// #8 when size = 01 +// #16 when size = 10 +// #32 when size = 11 +// +// - arg_immediate_0_127_CRm_op2: +// an immediate encoded in "CRm:op2" in the range 0 to 127 +// +// - arg_immediate_bitmask_64_N_imms_immr: +// a bitmask immediate for 64-bit variant and encoded in "N:imms:immr" +// +// - arg_immediate_SBFX_SBFM_64M_bitfield_width_64_imms: +// an immediate for the bitfield of SBFX 64-bit variant +// +// - arg_immediate_shift_32_implicit_inverse_imm16_hw: +// a 32-bit immediate of the bitwise inverse of which can be encoded in "imm16:hw" +// +// - arg_cond_[Not]AllowALNV_[Invert|Normal]: +// a standard condition, encoded in the "cond" field, excluding (NotAllow) AL and NV with +// its least significant bit [Yes|No] inverted, e.g. +// arg_cond_AllowALNV_Normal +// arg_cond_NotAllowALNV_Invert +// +// - arg_immediate_OptLSL_amount_16_0_48: +// An immediate for MOV[KNZ] instruction encoded in imm16[20:5] with an optional +// left shift of 16 in the range [0, 48] encoded in hw[22, 21] +// +// - arg_immediate_0_width_m1_immh_immb__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8: +// the left shift amount, in the range 0 to the operand width in bits minus 1, +// encoded in the "immh:immb" field. It can have the following values: +// (UInt(immh:immb)-8) when immh = 0001 +// (UInt(immh:immb)-16) when immh = 001x +// (UInt(immh:immb)-32) when immh = 01xx +// (UInt(immh:immb)-64) when immh = 1xxx +// +// - arg_immediate_1_width_immh_immb__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4: +// the right shift amount, in the range 1 to the destination operand width in +// bits, encoded in the "immh:immb" field. It can have the following values: +// (16-UInt(immh:immb)) when immh = 0001 +// (32-UInt(immh:immb)) when immh = 001x +// (64-UInt(immh:immb)) when immh = 01xx +// +// - arg_immediate_8x8_a_b_c_d_e_f_g_h: +// a 64-bit immediate 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh', +// encoded in "a:b:c:d:e:f:g:h". +// +// - arg_immediate_fbits_min_1_max_32_sub_64_scale: +// the number of bits after the binary point in the fixed-point destination, +// in the range 1 to 32, encoded as 64 minus "scale". +// +// - arg_immediate_floatzero: #0.0 +// +// - arg_immediate_exp_3_pre_4_a_b_c_d_e_f_g_h: +// a signed floating-point constant with 3-bit exponent and normalized 4 bits of precision, +// encoded in "a:b:c:d:e:f:g:h" +// +// - arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__64UIntimmhimmb_4__128UIntimmhimmb_8: +// the number of fractional bits, in the range 1 to the operand width, encoded +// in the "immh:immb" field. It can have the following values: +// (64-UInt(immh:immb)) when immh = 01xx +// (128-UInt(immh:immb)) when immh = 1xxx +// +// - arg_immediate_index_Q_imm4__imm4lt20gt_00__imm4_10: +// the lowest numbered byte element to be extracted, encoded in the "Q:imm4" field. +// It can have the following values: +// imm4<2:0> when Q = 0, imm4<3> = 0 +// imm4 when Q = 1, imm4<3> = x +// +// - arg_sysop_AT_SYS_CR_system: +// system operation for system instruction: AT encoded in the "op1:CRm<0>:op2" field +// +// - arg_prfop_Rt: +// prefectch operation encoded in the "Rt" +// +// - arg_sysreg_o0_op1_CRn_CRm_op2: +// system register name encoded in the "o0:op1:CRn:CRm:op2" +// +// - arg_pstatefield_op1_op2__SPSel_05__DAIFSet_36__DAIFClr_37: +// PSTATE field name encoded in the "op1:op2" field +// +// - arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: +// one register with arrangement specifier encoded in the "size:Q" field which can have the following values: +// 8B when size = 00, Q = 0 +// 16B when size = 00, Q = 1 +// 4H when size = 01, Q = 0 +// 8H when size = 01, Q = 1 +// 2S when size = 10, Q = 0 +// 4S when size = 10, Q = 1 +// 2D when size = 11, Q = 1 +// The encoding size = 11, Q = 0 is reserved. +// +// - arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: +// three registers with arrangement specifier encoded in the "size:Q" field which can have the following values: +// 8B when size = 00, Q = 0 +// 16B when size = 00, Q = 1 +// 4H when size = 01, Q = 0 +// 8H when size = 01, Q = 1 +// 2S when size = 10, Q = 0 +// 4S when size = 10, Q = 1 +// 2D when size = 11, Q = 1 +// The encoding size = 11, Q = 0 is reserved. +// +// - arg_Vt_1_arrangement_H_index__Q_S_size_1: +// one register with arrangement:H and element index encoded in "Q:S:size<1>". + +type instArg uint16 + +const ( + _ instArg = iota + arg_Bt + arg_Cm + arg_Cn + arg_cond_AllowALNV_Normal + arg_conditional + arg_cond_NotAllowALNV_Invert + arg_Da + arg_Dd + arg_Dm + arg_Dn + arg_Dt + arg_Dt2 + arg_Hd + arg_Hn + arg_Ht + arg_IAddSub + arg_immediate_0_127_CRm_op2 + arg_immediate_0_15_CRm + arg_immediate_0_15_nzcv + arg_immediate_0_31_imm5 + arg_immediate_0_31_immr + arg_immediate_0_31_imms + arg_immediate_0_63_b5_b40 + arg_immediate_0_63_immh_immb__UIntimmhimmb64_8 + arg_immediate_0_63_immr + arg_immediate_0_63_imms + arg_immediate_0_65535_imm16 + arg_immediate_0_7_op1 + arg_immediate_0_7_op2 + arg_immediate_0_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4 + arg_immediate_0_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8 + arg_immediate_0_width_m1_immh_immb__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8 + arg_immediate_0_width_size__8_0__16_1__32_2 + arg_immediate_1_64_immh_immb__128UIntimmhimmb_8 + arg_immediate_1_width_immh_immb__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4 + arg_immediate_1_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4 + arg_immediate_1_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4__128UIntimmhimmb_8 + arg_immediate_8x8_a_b_c_d_e_f_g_h + arg_immediate_ASR_SBFM_32M_bitfield_0_31_immr + arg_immediate_ASR_SBFM_64M_bitfield_0_63_immr + arg_immediate_BFI_BFM_32M_bitfield_lsb_32_immr + arg_immediate_BFI_BFM_32M_bitfield_width_32_imms + arg_immediate_BFI_BFM_64M_bitfield_lsb_64_immr + arg_immediate_BFI_BFM_64M_bitfield_width_64_imms + arg_immediate_BFXIL_BFM_32M_bitfield_lsb_32_immr + arg_immediate_BFXIL_BFM_32M_bitfield_width_32_imms + arg_immediate_BFXIL_BFM_64M_bitfield_lsb_64_immr + arg_immediate_BFXIL_BFM_64M_bitfield_width_64_imms + arg_immediate_bitmask_32_imms_immr + arg_immediate_bitmask_64_N_imms_immr + arg_immediate_exp_3_pre_4_a_b_c_d_e_f_g_h + arg_immediate_exp_3_pre_4_imm8 + arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__64UIntimmhimmb_4__128UIntimmhimmb_8 + arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__64UIntimmhimmb_4__128UIntimmhimmb_8 + arg_immediate_fbits_min_1_max_32_sub_64_scale + arg_immediate_fbits_min_1_max_64_sub_64_scale + arg_immediate_floatzero + arg_immediate_index_Q_imm4__imm4lt20gt_00__imm4_10 + arg_immediate_LSL_UBFM_32M_bitfield_0_31_immr + arg_immediate_LSL_UBFM_64M_bitfield_0_63_immr + arg_immediate_LSR_UBFM_32M_bitfield_0_31_immr + arg_immediate_LSR_UBFM_64M_bitfield_0_63_immr + arg_immediate_MSL__a_b_c_d_e_f_g_h_cmode__8_0__16_1 + arg_immediate_optional_0_15_CRm + arg_immediate_optional_0_65535_imm16 + arg_immediate_OptLSL__a_b_c_d_e_f_g_h_cmode__0_0__8_1 + arg_immediate_OptLSL__a_b_c_d_e_f_g_h_cmode__0_0__8_1__16_2__24_3 + arg_immediate_OptLSL_amount_16_0_16 + arg_immediate_OptLSL_amount_16_0_48 + arg_immediate_OptLSLZero__a_b_c_d_e_f_g_h + arg_immediate_SBFIZ_SBFM_32M_bitfield_lsb_32_immr + arg_immediate_SBFIZ_SBFM_32M_bitfield_width_32_imms + arg_immediate_SBFIZ_SBFM_64M_bitfield_lsb_64_immr + arg_immediate_SBFIZ_SBFM_64M_bitfield_width_64_imms + arg_immediate_SBFX_SBFM_32M_bitfield_lsb_32_immr + arg_immediate_SBFX_SBFM_32M_bitfield_width_32_imms + arg_immediate_SBFX_SBFM_64M_bitfield_lsb_64_immr + arg_immediate_SBFX_SBFM_64M_bitfield_width_64_imms + arg_immediate_shift_32_implicit_imm16_hw + arg_immediate_shift_32_implicit_inverse_imm16_hw + arg_immediate_shift_64_implicit_imm16_hw + arg_immediate_shift_64_implicit_inverse_imm16_hw + arg_immediate_UBFIZ_UBFM_32M_bitfield_lsb_32_immr + arg_immediate_UBFIZ_UBFM_32M_bitfield_width_32_imms + arg_immediate_UBFIZ_UBFM_64M_bitfield_lsb_64_immr + arg_immediate_UBFIZ_UBFM_64M_bitfield_width_64_imms + arg_immediate_UBFX_UBFM_32M_bitfield_lsb_32_immr + arg_immediate_UBFX_UBFM_32M_bitfield_width_32_imms + arg_immediate_UBFX_UBFM_64M_bitfield_lsb_64_immr + arg_immediate_UBFX_UBFM_64M_bitfield_width_64_imms + arg_immediate_zero + arg_option_DMB_BO_system_CRm + arg_option_DSB_BO_system_CRm + arg_option_ISB_BI_system_CRm + arg_prfop_Rt + arg_pstatefield_op1_op2__SPSel_05__DAIFSet_36__DAIFClr_37 + arg_Qd + arg_Qn + arg_Qt + arg_Qt2 + arg_Rm_extend__UXTB_0__UXTH_1__UXTW_2__LSL_UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4 + arg_Rn_16_5__W_1__W_2__W_4__X_8 + arg_Rt_31_1__W_0__X_1 + arg_Sa + arg_Sd + arg_slabel_imm14_2 + arg_slabel_imm19_2 + arg_slabel_imm26_2 + arg_slabel_immhi_immlo_0 + arg_slabel_immhi_immlo_12 + arg_Sm + arg_Sn + arg_St + arg_St2 + arg_sysop_AT_SYS_CR_system + arg_sysop_DC_SYS_CR_system + arg_sysop_IC_SYS_CR_system + arg_sysop_SYS_CR_system + arg_sysop_TLBI_SYS_CR_system + arg_sysreg_o0_op1_CRn_CRm_op2 + arg_Vd_16_5__B_1__H_2__S_4__D_8 + arg_Vd_19_4__B_1__H_2__S_4 + arg_Vd_19_4__B_1__H_2__S_4__D_8 + arg_Vd_19_4__D_8 + arg_Vd_19_4__S_4__D_8 + arg_Vd_22_1__S_0 + arg_Vd_22_1__S_0__D_1 + arg_Vd_22_1__S_1 + arg_Vd_22_2__B_0__H_1__S_2 + arg_Vd_22_2__B_0__H_1__S_2__D_3 + arg_Vd_22_2__D_3 + arg_Vd_22_2__H_0__S_1__D_2 + arg_Vd_22_2__H_1__S_2 + arg_Vd_22_2__S_1__D_2 + arg_Vd_arrangement_16B + arg_Vd_arrangement_2D + arg_Vd_arrangement_4S + arg_Vd_arrangement_D_index__1 + arg_Vd_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4__imm5lt4gt_8_1 + arg_Vd_arrangement_imm5_Q___8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81 + arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__2S_40__4S_41__2D_81 + arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41 + arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81 + arg_Vd_arrangement_immh___SEEAdvancedSIMDmodifiedimmediate_0__8H_1__4S_2__2D_4 + arg_Vd_arrangement_Q___2S_0__4S_1 + arg_Vd_arrangement_Q___4H_0__8H_1 + arg_Vd_arrangement_Q___8B_0__16B_1 + arg_Vd_arrangement_Q_sz___2S_00__4S_10__2D_11 + arg_Vd_arrangement_size___4S_1__2D_2 + arg_Vd_arrangement_size___8H_0__1Q_3 + arg_Vd_arrangement_size___8H_0__4S_1__2D_2 + arg_Vd_arrangement_size_Q___4H_00__8H_01__2S_10__4S_11__1D_20__2D_21 + arg_Vd_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21 + arg_Vd_arrangement_size_Q___8B_00__16B_01 + arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11 + arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21 + arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vd_arrangement_sz___4S_0__2D_1 + arg_Vd_arrangement_sz_Q___2S_00__4S_01 + arg_Vd_arrangement_sz_Q___2S_00__4S_01__2D_11 + arg_Vd_arrangement_sz_Q___2S_10__4S_11 + arg_Vd_arrangement_sz_Q___4H_00__8H_01__2S_10__4S_11 + arg_Vm_22_1__S_0__D_1 + arg_Vm_22_2__B_0__H_1__S_2__D_3 + arg_Vm_22_2__D_3 + arg_Vm_22_2__H_1__S_2 + arg_Vm_arrangement_4S + arg_Vm_arrangement_Q___8B_0__16B_1 + arg_Vm_arrangement_size___8H_0__4S_1__2D_2 + arg_Vm_arrangement_size___H_1__S_2_index__size_L_H_M__HLM_1__HL_2_1 + arg_Vm_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21 + arg_Vm_arrangement_size_Q___8B_00__16B_01 + arg_Vm_arrangement_size_Q___8B_00__16B_01__1D_30__2D_31 + arg_Vm_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21 + arg_Vm_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vm_arrangement_sz_Q___2S_00__4S_01__2D_11 + arg_Vm_arrangement_sz___S_0__D_1_index__sz_L_H__HL_00__H_10_1 + arg_Vn_19_4__B_1__H_2__S_4__D_8 + arg_Vn_19_4__D_8 + arg_Vn_19_4__H_1__S_2__D_4 + arg_Vn_19_4__S_4__D_8 + arg_Vn_1_arrangement_16B + arg_Vn_22_1__D_1 + arg_Vn_22_1__S_0__D_1 + arg_Vn_22_2__B_0__H_1__S_2__D_3 + arg_Vn_22_2__D_3 + arg_Vn_22_2__H_0__S_1__D_2 + arg_Vn_22_2__H_1__S_2 + arg_Vn_2_arrangement_16B + arg_Vn_3_arrangement_16B + arg_Vn_4_arrangement_16B + arg_Vn_arrangement_16B + arg_Vn_arrangement_4S + arg_Vn_arrangement_D_index__1 + arg_Vn_arrangement_D_index__imm5_1 + arg_Vn_arrangement_imm5___B_1__H_2_index__imm5__imm5lt41gt_1__imm5lt42gt_2_1 + arg_Vn_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5_imm4__imm4lt30gt_1__imm4lt31gt_2__imm4lt32gt_4__imm4lt3gt_8_1 + arg_Vn_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4__imm5lt4gt_8_1 + arg_Vn_arrangement_imm5___B_1__H_2__S_4_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4_1 + arg_Vn_arrangement_imm5___D_8_index__imm5_1 + arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__2S_40__4S_41__2D_81 + arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41 + arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81 + arg_Vn_arrangement_immh___SEEAdvancedSIMDmodifiedimmediate_0__8H_1__4S_2__2D_4 + arg_Vn_arrangement_Q___8B_0__16B_1 + arg_Vn_arrangement_Q_sz___2S_00__4S_10__2D_11 + arg_Vn_arrangement_Q_sz___4S_10 + arg_Vn_arrangement_S_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4_1 + arg_Vn_arrangement_size___2D_3 + arg_Vn_arrangement_size___8H_0__4S_1__2D_2 + arg_Vn_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21 + arg_Vn_arrangement_size_Q___8B_00__16B_01 + arg_Vn_arrangement_size_Q___8B_00__16B_01__1D_30__2D_31 + arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11 + arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21 + arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__4S_21 + arg_Vn_arrangement_sz___2D_1 + arg_Vn_arrangement_sz___2S_0__2D_1 + arg_Vn_arrangement_sz___4S_0__2D_1 + arg_Vn_arrangement_sz_Q___2S_00__4S_01 + arg_Vn_arrangement_sz_Q___2S_00__4S_01__2D_11 + arg_Vn_arrangement_sz_Q___4H_00__8H_01__2S_10__4S_11 + arg_Vt_1_arrangement_B_index__Q_S_size_1 + arg_Vt_1_arrangement_D_index__Q_1 + arg_Vt_1_arrangement_H_index__Q_S_size_1 + arg_Vt_1_arrangement_S_index__Q_S_1 + arg_Vt_1_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31 + arg_Vt_2_arrangement_B_index__Q_S_size_1 + arg_Vt_2_arrangement_D_index__Q_1 + arg_Vt_2_arrangement_H_index__Q_S_size_1 + arg_Vt_2_arrangement_S_index__Q_S_1 + arg_Vt_2_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31 + arg_Vt_2_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vt_3_arrangement_B_index__Q_S_size_1 + arg_Vt_3_arrangement_D_index__Q_1 + arg_Vt_3_arrangement_H_index__Q_S_size_1 + arg_Vt_3_arrangement_S_index__Q_S_1 + arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31 + arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Vt_4_arrangement_B_index__Q_S_size_1 + arg_Vt_4_arrangement_D_index__Q_1 + arg_Vt_4_arrangement_H_index__Q_S_size_1 + arg_Vt_4_arrangement_S_index__Q_S_1 + arg_Vt_4_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31 + arg_Vt_4_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31 + arg_Wa + arg_Wd + arg_Wds + arg_Wm + arg_Wm_extend__UXTB_0__UXTH_1__LSL_UXTW_2__UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4 + arg_Wm_shift__LSL_0__LSR_1__ASR_2__0_31 + arg_Wm_shift__LSL_0__LSR_1__ASR_2__ROR_3__0_31 + arg_Wn + arg_Wns + arg_Ws + arg_Wt + arg_Wt2 + arg_Xa + arg_Xd + arg_Xds + arg_Xm + arg_Xm_shift__LSL_0__LSR_1__ASR_2__0_63 + arg_Xm_shift__LSL_0__LSR_1__ASR_2__ROR_3__0_63 + arg_Xn + arg_Xns + arg_Xns_mem + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__1_1 + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__2_1 + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__3_1 + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__4_1 + arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__absent_0__0_1 + arg_Xns_mem_offset + arg_Xns_mem_optional_imm12_16_unsigned + arg_Xns_mem_optional_imm12_1_unsigned + arg_Xns_mem_optional_imm12_2_unsigned + arg_Xns_mem_optional_imm12_4_unsigned + arg_Xns_mem_optional_imm12_8_unsigned + arg_Xns_mem_optional_imm7_16_signed + arg_Xns_mem_optional_imm7_4_signed + arg_Xns_mem_optional_imm7_8_signed + arg_Xns_mem_optional_imm9_1_signed + arg_Xns_mem_post_fixedimm_1 + arg_Xns_mem_post_fixedimm_12 + arg_Xns_mem_post_fixedimm_16 + arg_Xns_mem_post_fixedimm_2 + arg_Xns_mem_post_fixedimm_24 + arg_Xns_mem_post_fixedimm_3 + arg_Xns_mem_post_fixedimm_32 + arg_Xns_mem_post_fixedimm_4 + arg_Xns_mem_post_fixedimm_6 + arg_Xns_mem_post_fixedimm_8 + arg_Xns_mem_post_imm7_16_signed + arg_Xns_mem_post_imm7_4_signed + arg_Xns_mem_post_imm7_8_signed + arg_Xns_mem_post_imm9_1_signed + arg_Xns_mem_post_Q__16_0__32_1 + arg_Xns_mem_post_Q__24_0__48_1 + arg_Xns_mem_post_Q__32_0__64_1 + arg_Xns_mem_post_Q__8_0__16_1 + arg_Xns_mem_post_size__1_0__2_1__4_2__8_3 + arg_Xns_mem_post_size__2_0__4_1__8_2__16_3 + arg_Xns_mem_post_size__3_0__6_1__12_2__24_3 + arg_Xns_mem_post_size__4_0__8_1__16_2__32_3 + arg_Xns_mem_post_Xm + arg_Xns_mem_wb_imm7_16_signed + arg_Xns_mem_wb_imm7_4_signed + arg_Xns_mem_wb_imm7_8_signed + arg_Xns_mem_wb_imm9_1_signed + arg_Xs + arg_Xt + arg_Xt2 +) diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition.go new file mode 100755 index 00000000000..d673857212e --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition.go @@ -0,0 +1,329 @@ +// Generated by ARM internal tool +// DO NOT EDIT + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +// Following functions are used as the predicator: canDecode of according instruction +// Refer to instFormat inside decode.go for more details + +func at_sys_cr_system_cond(instr uint32) bool { + return sys_op_4((instr>>16)&0x7, 0x7, 0x8, (instr>>5)&0x7) == Sys_AT +} + +func bfi_bfm_32m_bitfield_cond(instr uint32) bool { + return (instr>>5)&0x1f != 0x1f && uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func bfi_bfm_64m_bitfield_cond(instr uint32) bool { + return (instr>>5)&0x1f != 0x1f && uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func bfxil_bfm_32m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) >= uint8((instr>>16)&0x3f) +} + +func bfxil_bfm_64m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) >= uint8((instr>>16)&0x3f) +} + +func cinc_csinc_32_condsel_cond(instr uint32) bool { + return instr&0x1f0000 != 0x1f0000 && instr&0xe000 != 0xe000 && instr&0x3e0 != 0x3e0 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cinc_csinc_64_condsel_cond(instr uint32) bool { + return instr&0x1f0000 != 0x1f0000 && instr&0xe000 != 0xe000 && instr&0x3e0 != 0x3e0 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cinv_csinv_32_condsel_cond(instr uint32) bool { + return instr&0x1f0000 != 0x1f0000 && instr&0xe000 != 0xe000 && instr&0x3e0 != 0x3e0 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cinv_csinv_64_condsel_cond(instr uint32) bool { + return instr&0x1f0000 != 0x1f0000 && instr&0xe000 != 0xe000 && instr&0x3e0 != 0x3e0 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cneg_csneg_32_condsel_cond(instr uint32) bool { + return instr&0xe000 != 0xe000 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func cneg_csneg_64_condsel_cond(instr uint32) bool { + return instr&0xe000 != 0xe000 && (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func csinc_general_cond(instr uint32) bool { + return instr&0xe000 != 0xe000 +} +func csinv_general_cond(instr uint32) bool { + return instr&0xe000 != 0xe000 +} +func dc_sys_cr_system_cond(instr uint32) bool { + return sys_op_4((instr>>16)&0x7, 0x7, (instr>>8)&0xf, (instr>>5)&0x7) == Sys_DC +} + +func ic_sys_cr_system_cond(instr uint32) bool { + return sys_op_4((instr>>16)&0x7, 0x7, (instr>>8)&0xf, (instr>>5)&0x7) == Sys_IC +} + +func lsl_ubfm_32m_bitfield_cond(instr uint32) bool { + return instr&0xfc00 != 0x7c00 && (instr>>10)&0x3f+1 == (instr>>16)&0x3f +} + +func lsl_ubfm_64m_bitfield_cond(instr uint32) bool { + return instr&0xfc00 != 0xfc00 && (instr>>10)&0x3f+1 == (instr>>16)&0x3f +} + +func mov_orr_32_log_imm_cond(instr uint32) bool { + return !move_wide_preferred_4((instr>>31)&0x1, (instr>>22)&0x1, (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func mov_orr_64_log_imm_cond(instr uint32) bool { + return !move_wide_preferred_4((instr>>31)&0x1, (instr>>22)&0x1, (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func mov_movn_32_movewide_cond(instr uint32) bool { + return !(is_zero((instr>>5)&0xffff) && (instr>>21)&0x3 != 0x0) && !is_ones_n16((instr>>5)&0xffff) +} + +func mov_movn_64_movewide_cond(instr uint32) bool { + return !(is_zero((instr>>5)&0xffff) && (instr>>21)&0x3 != 0x0) +} + +func mov_add_32_addsub_imm_cond(instr uint32) bool { + return instr&0x1f == 0x1f || (instr>>5)&0x1f == 0x1f +} + +func mov_add_64_addsub_imm_cond(instr uint32) bool { + return instr&0x1f == 0x1f || (instr>>5)&0x1f == 0x1f +} + +func mov_movz_32_movewide_cond(instr uint32) bool { + return !(is_zero((instr>>5)&0xffff) && (instr>>21)&0x3 != 0x0) +} + +func mov_movz_64_movewide_cond(instr uint32) bool { + return !(is_zero((instr>>5)&0xffff) && (instr>>21)&0x3 != 0x0) +} + +func ror_extr_32_extract_cond(instr uint32) bool { + return (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func ror_extr_64_extract_cond(instr uint32) bool { + return (instr>>5)&0x1f == (instr>>16)&0x1f +} + +func sbfiz_sbfm_32m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func sbfiz_sbfm_64m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func sbfx_sbfm_32m_bitfield_cond(instr uint32) bool { + return bfxpreferred_4((instr>>31)&0x1, extract_bit((instr>>29)&0x3, 1), (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func sbfx_sbfm_64m_bitfield_cond(instr uint32) bool { + return bfxpreferred_4((instr>>31)&0x1, extract_bit((instr>>29)&0x3, 1), (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func tlbi_sys_cr_system_cond(instr uint32) bool { + return sys_op_4((instr>>16)&0x7, 0x8, (instr>>8)&0xf, (instr>>5)&0x7) == Sys_TLBI +} + +func ubfiz_ubfm_32m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func ubfiz_ubfm_64m_bitfield_cond(instr uint32) bool { + return uint8((instr>>10)&0x3f) < uint8((instr>>16)&0x3f) +} + +func ubfx_ubfm_32m_bitfield_cond(instr uint32) bool { + return bfxpreferred_4((instr>>31)&0x1, extract_bit((instr>>29)&0x3, 1), (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func ubfx_ubfm_64m_bitfield_cond(instr uint32) bool { + return bfxpreferred_4((instr>>31)&0x1, extract_bit((instr>>29)&0x3, 1), (instr>>10)&0x3f, (instr>>16)&0x3f) +} + +func fcvtzs_asisdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func fcvtzs_asimdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func fcvtzu_asisdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func fcvtzu_asimdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func mov_umov_asimdins_w_w_cond(instr uint32) bool { + return ((instr>>16)&0x1f)&0x7 == 0x4 +} + +func mov_umov_asimdins_x_x_cond(instr uint32) bool { + return ((instr>>16)&0x1f)&0xf == 0x8 +} + +func mov_orr_asimdsame_only_cond(instr uint32) bool { + return (instr>>16)&0x1f == (instr>>5)&0x1f +} + +func rshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func scvtf_asisdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func scvtf_asimdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func shl_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func shl_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func shrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sli_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sli_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqrshrn_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqrshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqrshrun_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqrshrun_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshl_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshl_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshlu_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshlu_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshrn_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshrun_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sqshrun_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sri_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sri_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func srshr_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func srshr_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func srsra_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func srsra_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sshll_asimdshf_l_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sshr_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sshr_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ssra_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ssra_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func sxtl_sshll_asimdshf_l_cond(instr uint32) bool { + return instr&0x780000 != 0x0 && bit_count((instr>>19)&0xf) == 1 +} + +func ucvtf_asisdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ucvtf_asimdshf_c_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqrshrn_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqrshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqshl_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqshl_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqshrn_asisdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uqshrn_asimdshf_n_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func urshr_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func urshr_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ursra_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ursra_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ushll_asimdshf_l_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ushr_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func ushr_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func usra_asisdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func usra_asimdshf_r_cond(instr uint32) bool { + return instr&0x780000 != 0x0 +} +func uxtl_ushll_asimdshf_l_cond(instr uint32) bool { + return instr&0x780000 != 0x0 && bit_count((instr>>19)&0xf) == 1 +} diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition_util.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition_util.go new file mode 100755 index 00000000000..62c0c3b0ac9 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/condition_util.go @@ -0,0 +1,81 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +func extract_bit(value, bit uint32) uint32 { + return (value >> bit) & 1 +} + +func bfxpreferred_4(sf, opc1, imms, immr uint32) bool { + if imms < immr { + return false + } + if (imms>>5 == sf) && (imms&0x1f == 0x1f) { + return false + } + if immr == 0 { + if sf == 0 && (imms == 7 || imms == 15) { + return false + } + if sf == 1 && opc1 == 0 && (imms == 7 || + imms == 15 || imms == 31) { + return false + } + } + return true +} + +func move_wide_preferred_4(sf, N, imms, immr uint32) bool { + if sf == 1 && N != 1 { + return false + } + if sf == 0 && !(N == 0 && ((imms>>5)&1) == 0) { + return false + } + if imms < 16 { + return (-immr)%16 <= (15 - imms) + } + width := uint32(32) + if sf == 1 { + width = uint32(64) + } + if imms >= (width - 15) { + return (immr % 16) <= (imms - (width - 15)) + } + return false +} + +type Sys uint8 + +const ( + Sys_AT Sys = iota + Sys_DC + Sys_IC + Sys_TLBI + Sys_SYS +) + +func sys_op_4(op1, crn, crm, op2 uint32) Sys { + // TODO: system instruction + return Sys_SYS +} + +func is_zero(x uint32) bool { + return x == 0 +} + +func is_ones_n16(x uint32) bool { + return x == 0xffff +} + +func bit_count(x uint32) uint8 { + var count uint8 + for count = 0; x > 0; x >>= 1 { + if (x & 1) == 1 { + count++ + } + } + return count +} diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/decode.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/decode.go new file mode 100644 index 00000000000..5e29c47696e --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/decode.go @@ -0,0 +1,2768 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +import ( + "encoding/binary" + "fmt" +) + +type instArgs [5]instArg + +// An instFormat describes the format of an instruction encoding. +// An instruction with 32-bit value x matches the format if x&mask == value +// and the predicator: canDecode(x) return true. +type instFormat struct { + mask uint32 + value uint32 + op Op + // args describe how to decode the instruction arguments. + // args is stored as a fixed-size array. + // if there are fewer than len(args) arguments, args[i] == 0 marks + // the end of the argument list. + args instArgs + canDecode func(instr uint32) bool +} + +var ( + errShort = fmt.Errorf("truncated instruction") + errUnknown = fmt.Errorf("unknown instruction") +) + +var decoderCover []bool + +func init() { + decoderCover = make([]bool, len(instFormats)) +} + +// Decode decodes the 4 bytes in src as a single instruction. +func Decode(src []byte) (inst Inst, err error) { + if len(src) < 4 { + return Inst{}, errShort + } + + x := binary.LittleEndian.Uint32(src) + +Search: + for i := range instFormats { + f := &instFormats[i] + if x&f.mask != f.value { + continue + } + if f.canDecode != nil && !f.canDecode(x) { + continue + } + // Decode args. + var args Args + for j, aop := range f.args { + if aop == 0 { + break + } + arg := decodeArg(aop, x) + if arg == nil { // Cannot decode argument + continue Search + } + args[j] = arg + } + decoderCover[i] = true + inst = Inst{ + Op: f.op, + Args: args, + Enc: x, + } + return inst, nil + } + return Inst{}, errUnknown +} + +// decodeArg decodes the arg described by aop from the instruction bits x. +// It returns nil if x cannot be decoded according to aop. +func decodeArg(aop instArg, x uint32) Arg { + switch aop { + default: + return nil + + case arg_Da: + return D0 + Reg((x>>10)&(1<<5-1)) + + case arg_Dd: + return D0 + Reg(x&(1<<5-1)) + + case arg_Dm: + return D0 + Reg((x>>16)&(1<<5-1)) + + case arg_Dn: + return D0 + Reg((x>>5)&(1<<5-1)) + + case arg_Hd: + return H0 + Reg(x&(1<<5-1)) + + case arg_Hn: + return H0 + Reg((x>>5)&(1<<5-1)) + + case arg_IAddSub: + imm12 := (x >> 10) & (1<<12 - 1) + shift := (x >> 22) & (1<<2 - 1) + if shift > 1 { + return nil + } + shift = shift * 12 + return ImmShift{uint16(imm12), uint8(shift)} + + case arg_Sa: + return S0 + Reg((x>>10)&(1<<5-1)) + + case arg_Sd: + return S0 + Reg(x&(1<<5-1)) + + case arg_Sm: + return S0 + Reg((x>>16)&(1<<5-1)) + + case arg_Sn: + return S0 + Reg((x>>5)&(1<<5-1)) + + case arg_Wa: + return W0 + Reg((x>>10)&(1<<5-1)) + + case arg_Wd: + return W0 + Reg(x&(1<<5-1)) + + case arg_Wds: + return RegSP(W0) + RegSP(x&(1<<5-1)) + + case arg_Wm: + return W0 + Reg((x>>16)&(1<<5-1)) + + case arg_Rm_extend__UXTB_0__UXTH_1__UXTW_2__LSL_UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4: + return handle_ExtendedRegister(x, true) + + case arg_Wm_extend__UXTB_0__UXTH_1__LSL_UXTW_2__UXTX_3__SXTB_4__SXTH_5__SXTW_6__SXTX_7__0_4: + return handle_ExtendedRegister(x, false) + + case arg_Wn: + return W0 + Reg((x>>5)&(1<<5-1)) + + case arg_Wns: + return RegSP(W0) + RegSP((x>>5)&(1<<5-1)) + + case arg_Xa: + return X0 + Reg((x>>10)&(1<<5-1)) + + case arg_Xd: + return X0 + Reg(x&(1<<5-1)) + + case arg_Xds: + return RegSP(X0) + RegSP(x&(1<<5-1)) + + case arg_Xm: + return X0 + Reg((x>>16)&(1<<5-1)) + + case arg_Wm_shift__LSL_0__LSR_1__ASR_2__0_31: + return handle_ImmediateShiftedRegister(x, 31, true, false) + + case arg_Wm_shift__LSL_0__LSR_1__ASR_2__ROR_3__0_31: + return handle_ImmediateShiftedRegister(x, 31, true, true) + + case arg_Xm_shift__LSL_0__LSR_1__ASR_2__0_63: + return handle_ImmediateShiftedRegister(x, 63, false, false) + + case arg_Xm_shift__LSL_0__LSR_1__ASR_2__ROR_3__0_63: + return handle_ImmediateShiftedRegister(x, 63, false, true) + + case arg_Xn: + return X0 + Reg((x>>5)&(1<<5-1)) + + case arg_Xns: + return RegSP(X0) + RegSP((x>>5)&(1<<5-1)) + + case arg_slabel_imm14_2: + imm14 := ((x >> 5) & (1<<14 - 1)) + return PCRel(((int64(imm14) << 2) << 48) >> 48) + + case arg_slabel_imm19_2: + imm19 := ((x >> 5) & (1<<19 - 1)) + return PCRel(((int64(imm19) << 2) << 43) >> 43) + + case arg_slabel_imm26_2: + imm26 := (x & (1<<26 - 1)) + return PCRel(((int64(imm26) << 2) << 36) >> 36) + + case arg_slabel_immhi_immlo_0: + immhi := ((x >> 5) & (1<<19 - 1)) + immlo := ((x >> 29) & (1<<2 - 1)) + immhilo := (immhi)<<2 | immlo + return PCRel((int64(immhilo) << 43) >> 43) + + case arg_slabel_immhi_immlo_12: + immhi := ((x >> 5) & (1<<19 - 1)) + immlo := ((x >> 29) & (1<<2 - 1)) + immhilo := (immhi)<<2 | immlo + return PCRel(((int64(immhilo) << 12) << 31) >> 31) + + case arg_Xns_mem: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrOffset, 0} + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__1_1: + return handle_MemExtend(x, 1, false) + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__2_1: + return handle_MemExtend(x, 2, false) + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__3_1: + return handle_MemExtend(x, 3, false) + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__absent_0__0_1: + return handle_MemExtend(x, 1, true) + + case arg_Xns_mem_optional_imm12_1_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12)} + + case arg_Xns_mem_optional_imm12_2_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12 << 1)} + + case arg_Xns_mem_optional_imm12_4_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12 << 2)} + + case arg_Xns_mem_optional_imm12_8_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12 << 3)} + + case arg_Xns_mem_optional_imm7_4_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrOffset, ((int32(imm7 << 2)) << 23) >> 23} + + case arg_Xns_mem_optional_imm7_8_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrOffset, ((int32(imm7 << 3)) << 22) >> 22} + + case arg_Xns_mem_optional_imm9_1_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm9 := (x >> 12) & (1<<9 - 1) + return MemImmediate{Rn, AddrOffset, (int32(imm9) << 23) >> 23} + + case arg_Xns_mem_post_imm7_4_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPostIndex, ((int32(imm7 << 2)) << 23) >> 23} + + case arg_Xns_mem_post_imm7_8_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPostIndex, ((int32(imm7 << 3)) << 22) >> 22} + + case arg_Xns_mem_post_imm9_1_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm9 := (x >> 12) & (1<<9 - 1) + return MemImmediate{Rn, AddrPostIndex, ((int32(imm9)) << 23) >> 23} + + case arg_Xns_mem_wb_imm7_4_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPreIndex, ((int32(imm7 << 2)) << 23) >> 23} + + case arg_Xns_mem_wb_imm7_8_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPreIndex, ((int32(imm7 << 3)) << 22) >> 22} + + case arg_Xns_mem_wb_imm9_1_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm9 := (x >> 12) & (1<<9 - 1) + return MemImmediate{Rn, AddrPreIndex, ((int32(imm9)) << 23) >> 23} + + case arg_Ws: + return W0 + Reg((x>>16)&(1<<5-1)) + + case arg_Wt: + return W0 + Reg(x&(1<<5-1)) + + case arg_Wt2: + return W0 + Reg((x>>10)&(1<<5-1)) + + case arg_Xs: + return X0 + Reg((x>>16)&(1<<5-1)) + + case arg_Xt: + return X0 + Reg(x&(1<<5-1)) + + case arg_Xt2: + return X0 + Reg((x>>10)&(1<<5-1)) + + case arg_immediate_0_127_CRm_op2: + crm_op2 := (x >> 5) & (1<<7 - 1) + return Imm_hint(crm_op2) + + case arg_immediate_0_15_CRm: + crm := (x >> 8) & (1<<4 - 1) + return Imm{crm, false} + + case arg_immediate_0_15_nzcv: + nzcv := x & (1<<4 - 1) + return Imm{nzcv, false} + + case arg_immediate_0_31_imm5: + imm5 := (x >> 16) & (1<<5 - 1) + return Imm{imm5, false} + + case arg_immediate_0_31_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, false} + + case arg_immediate_0_31_imms: + imms := (x >> 10) & (1<<6 - 1) + if imms > 31 { + return nil + } + return Imm{imms, true} + + case arg_immediate_0_63_b5_b40: + b5 := (x >> 31) & 1 + b40 := (x >> 19) & (1<<5 - 1) + return Imm{(b5 << 5) | b40, true} + + case arg_immediate_0_63_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, false} + + case arg_immediate_0_63_imms: + imms := (x >> 10) & (1<<6 - 1) + return Imm{imms, true} + + case arg_immediate_0_65535_imm16: + imm16 := (x >> 5) & (1<<16 - 1) + return Imm{imm16, false} + + case arg_immediate_0_7_op1: + op1 := (x >> 16) & (1<<3 - 1) + return Imm{op1, true} + + case arg_immediate_0_7_op2: + op2 := (x >> 5) & (1<<3 - 1) + return Imm{op2, true} + + case arg_immediate_ASR_SBFM_32M_bitfield_0_31_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_ASR_SBFM_64M_bitfield_0_63_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_BFI_BFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{32 - immr, true} + + case arg_immediate_BFI_BFM_32M_bitfield_width_32_imms: + imms := (x >> 10) & (1<<6 - 1) + if imms > 31 { + return nil + } + return Imm{imms + 1, true} + + case arg_immediate_BFI_BFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{64 - immr, true} + + case arg_immediate_BFI_BFM_64M_bitfield_width_64_imms: + imms := (x >> 10) & (1<<6 - 1) + return Imm{imms + 1, true} + + case arg_immediate_BFXIL_BFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_BFXIL_BFM_32M_bitfield_width_32_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 32-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_BFXIL_BFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_BFXIL_BFM_64M_bitfield_width_64_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 64-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_bitmask_32_imms_immr: + return handle_bitmasks(x, 32) + + case arg_immediate_bitmask_64_N_imms_immr: + return handle_bitmasks(x, 64) + + case arg_immediate_LSL_UBFM_32M_bitfield_0_31_immr: + imms := (x >> 10) & (1<<6 - 1) + shift := 31 - imms + if shift > 31 { + return nil + } + return Imm{shift, true} + + case arg_immediate_LSL_UBFM_64M_bitfield_0_63_immr: + imms := (x >> 10) & (1<<6 - 1) + shift := 63 - imms + if shift > 63 { + return nil + } + return Imm{shift, true} + + case arg_immediate_LSR_UBFM_32M_bitfield_0_31_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_LSR_UBFM_64M_bitfield_0_63_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_optional_0_15_CRm: + crm := (x >> 8) & (1<<4 - 1) + return Imm_clrex(crm) + + case arg_immediate_optional_0_65535_imm16: + imm16 := (x >> 5) & (1<<16 - 1) + return Imm_dcps(imm16) + + case arg_immediate_OptLSL_amount_16_0_16: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + if shift > 16 { + return nil + } + return ImmShift{uint16(imm16), uint8(shift)} + + case arg_immediate_OptLSL_amount_16_0_48: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + return ImmShift{uint16(imm16), uint8(shift)} + + case arg_immediate_SBFIZ_SBFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{32 - immr, true} + + case arg_immediate_SBFIZ_SBFM_32M_bitfield_width_32_imms: + imms := (x >> 10) & (1<<6 - 1) + if imms > 31 { + return nil + } + return Imm{imms + 1, true} + + case arg_immediate_SBFIZ_SBFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{64 - immr, true} + + case arg_immediate_SBFIZ_SBFM_64M_bitfield_width_64_imms: + imms := (x >> 10) & (1<<6 - 1) + return Imm{imms + 1, true} + + case arg_immediate_SBFX_SBFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_SBFX_SBFM_32M_bitfield_width_32_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 32-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_SBFX_SBFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_SBFX_SBFM_64M_bitfield_width_64_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 64-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_shift_32_implicit_imm16_hw: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + if shift > 16 { + return nil + } + result := uint32(imm16) << shift + return Imm{result, false} + + case arg_immediate_shift_32_implicit_inverse_imm16_hw: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + if shift > 16 { + return nil + } + result := uint32(imm16) << shift + return Imm{^result, false} + + case arg_immediate_shift_64_implicit_imm16_hw: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + result := uint64(imm16) << shift + return Imm64{result, false} + + case arg_immediate_shift_64_implicit_inverse_imm16_hw: + imm16 := (x >> 5) & (1<<16 - 1) + hw := (x >> 21) & (1<<2 - 1) + shift := hw * 16 + result := uint64(imm16) << shift + return Imm64{^result, false} + + case arg_immediate_UBFIZ_UBFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{32 - immr, true} + + case arg_immediate_UBFIZ_UBFM_32M_bitfield_width_32_imms: + imms := (x >> 10) & (1<<6 - 1) + if imms > 31 { + return nil + } + return Imm{imms + 1, true} + + case arg_immediate_UBFIZ_UBFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{64 - immr, true} + + case arg_immediate_UBFIZ_UBFM_64M_bitfield_width_64_imms: + imms := (x >> 10) & (1<<6 - 1) + return Imm{imms + 1, true} + + case arg_immediate_UBFX_UBFM_32M_bitfield_lsb_32_immr: + immr := (x >> 16) & (1<<6 - 1) + if immr > 31 { + return nil + } + return Imm{immr, true} + + case arg_immediate_UBFX_UBFM_32M_bitfield_width_32_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 32-immr { + return nil + } + return Imm{width, true} + + case arg_immediate_UBFX_UBFM_64M_bitfield_lsb_64_immr: + immr := (x >> 16) & (1<<6 - 1) + return Imm{immr, true} + + case arg_immediate_UBFX_UBFM_64M_bitfield_width_64_imms: + immr := (x >> 16) & (1<<6 - 1) + imms := (x >> 10) & (1<<6 - 1) + width := imms - immr + 1 + if width < 1 || width > 64-immr { + return nil + } + return Imm{width, true} + + case arg_Rt_31_1__W_0__X_1: + b5 := (x >> 31) & 1 + Rt := x & (1<<5 - 1) + if b5 == 0 { + return W0 + Reg(Rt) + } else { + return X0 + Reg(Rt) + } + + case arg_cond_AllowALNV_Normal: + cond := (x >> 12) & (1<<4 - 1) + return Cond{uint8(cond), false} + + case arg_conditional: + cond := x & (1<<4 - 1) + return Cond{uint8(cond), false} + + case arg_cond_NotAllowALNV_Invert: + cond := (x >> 12) & (1<<4 - 1) + if (cond >> 1) == 7 { + return nil + } + return Cond{uint8(cond), true} + + case arg_Cm: + CRm := (x >> 8) & (1<<4 - 1) + return Imm_c(CRm) + + case arg_Cn: + CRn := (x >> 12) & (1<<4 - 1) + return Imm_c(CRn) + + case arg_option_DMB_BO_system_CRm: + CRm := (x >> 8) & (1<<4 - 1) + return Imm_option(CRm) + + case arg_option_DSB_BO_system_CRm: + CRm := (x >> 8) & (1<<4 - 1) + return Imm_option(CRm) + + case arg_option_ISB_BI_system_CRm: + CRm := (x >> 8) & (1<<4 - 1) + if CRm == 15 { + return Imm_option(CRm) + } + return Imm{CRm, false} + + case arg_prfop_Rt: + Rt := x & (1<<5 - 1) + return Imm_prfop(Rt) + + case arg_pstatefield_op1_op2__SPSel_05__DAIFSet_36__DAIFClr_37: + op1 := (x >> 16) & (1<<3 - 1) + op2 := (x >> 5) & (1<<3 - 1) + if (op1 == 0) && (op2 == 5) { + return SPSel + } else if (op1 == 3) && (op2 == 6) { + return DAIFSet + } else if (op1 == 3) && (op2 == 7) { + return DAIFClr + } + return nil + + case arg_sysreg_o0_op1_CRn_CRm_op2: + op0 := (x >> 19) & (1<<2 - 1) + op1 := (x >> 16) & (1<<3 - 1) + CRn := (x >> 12) & (1<<4 - 1) + CRm := (x >> 8) & (1<<4 - 1) + op2 := (x >> 5) & (1<<3 - 1) + return Systemreg{uint8(op0), uint8(op1), uint8(CRn), uint8(CRm), uint8(op2)} + + case arg_sysop_AT_SYS_CR_system: + //TODO: system instruction + return nil + + case arg_sysop_DC_SYS_CR_system: + //TODO: system instruction + return nil + + case arg_sysop_SYS_CR_system: + //TODO: system instruction + return nil + + case arg_sysop_TLBI_SYS_CR_system: + //TODO: system instruction + return nil + + case arg_Bt: + return B0 + Reg(x&(1<<5-1)) + + case arg_Dt: + return D0 + Reg(x&(1<<5-1)) + + case arg_Dt2: + return D0 + Reg((x>>10)&(1<<5-1)) + + case arg_Ht: + return H0 + Reg(x&(1<<5-1)) + + case arg_immediate_0_63_immh_immb__UIntimmhimmb64_8: + immh := (x >> 19) & (1<<4 - 1) + if (immh & 8) == 0 { + return nil + } + immb := (x >> 16) & (1<<3 - 1) + return Imm{(immh << 3) + immb - 64, true} + + case arg_immediate_0_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if immh == 1 { + return Imm{(immh << 3) + immb - 8, true} + } else if (immh >> 1) == 1 { + return Imm{(immh << 3) + immb - 16, true} + } else if (immh >> 2) == 1 { + return Imm{(immh << 3) + immb - 32, true} + } else { + return nil + } + + case arg_immediate_0_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8: + fallthrough + + case arg_immediate_0_width_m1_immh_immb__UIntimmhimmb8_1__UIntimmhimmb16_2__UIntimmhimmb32_4__UIntimmhimmb64_8: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if immh == 1 { + return Imm{(immh << 3) + immb - 8, true} + } else if (immh >> 1) == 1 { + return Imm{(immh << 3) + immb - 16, true} + } else if (immh >> 2) == 1 { + return Imm{(immh << 3) + immb - 32, true} + } else if (immh >> 3) == 1 { + return Imm{(immh << 3) + immb - 64, true} + } else { + return nil + } + + case arg_immediate_0_width_size__8_0__16_1__32_2: + size := (x >> 22) & (1<<2 - 1) + switch size { + case 0: + return Imm{8, true} + case 1: + return Imm{16, true} + case 2: + return Imm{32, true} + default: + return nil + } + + case arg_immediate_1_64_immh_immb__128UIntimmhimmb_8: + immh := (x >> 19) & (1<<4 - 1) + if (immh & 8) == 0 { + return nil + } + immb := (x >> 16) & (1<<3 - 1) + return Imm{128 - ((immh << 3) + immb), true} + + case arg_immediate_1_width_immh_immb__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4: + fallthrough + + case arg_immediate_1_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if immh == 1 { + return Imm{16 - ((immh << 3) + immb), true} + } else if (immh >> 1) == 1 { + return Imm{32 - ((immh << 3) + immb), true} + } else if (immh >> 2) == 1 { + return Imm{64 - ((immh << 3) + immb), true} + } else { + return nil + } + + case arg_immediate_1_width_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__16UIntimmhimmb_1__32UIntimmhimmb_2__64UIntimmhimmb_4__128UIntimmhimmb_8: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if immh == 1 { + return Imm{16 - ((immh << 3) + immb), true} + } else if (immh >> 1) == 1 { + return Imm{32 - ((immh << 3) + immb), true} + } else if (immh >> 2) == 1 { + return Imm{64 - ((immh << 3) + immb), true} + } else if (immh >> 3) == 1 { + return Imm{128 - ((immh << 3) + immb), true} + } else { + return nil + } + + case arg_immediate_8x8_a_b_c_d_e_f_g_h: + var imm uint64 + if x&(1<<5) != 0 { + imm = (1 << 8) - 1 + } else { + imm = 0 + } + if x&(1<<6) != 0 { + imm += ((1 << 8) - 1) << 8 + } + if x&(1<<7) != 0 { + imm += ((1 << 8) - 1) << 16 + } + if x&(1<<8) != 0 { + imm += ((1 << 8) - 1) << 24 + } + if x&(1<<9) != 0 { + imm += ((1 << 8) - 1) << 32 + } + if x&(1<<16) != 0 { + imm += ((1 << 8) - 1) << 40 + } + if x&(1<<17) != 0 { + imm += ((1 << 8) - 1) << 48 + } + if x&(1<<18) != 0 { + imm += ((1 << 8) - 1) << 56 + } + return Imm64{imm, false} + + case arg_immediate_exp_3_pre_4_a_b_c_d_e_f_g_h: + pre := (x >> 5) & (1<<4 - 1) + exp := 1 - ((x >> 17) & 1) + exp = (exp << 2) + (((x >> 16) & 1) << 1) + ((x >> 9) & 1) + s := ((x >> 18) & 1) + return Imm_fp{uint8(s), int8(exp) - 3, uint8(pre)} + + case arg_immediate_exp_3_pre_4_imm8: + pre := (x >> 13) & (1<<4 - 1) + exp := 1 - ((x >> 19) & 1) + exp = (exp << 2) + ((x >> 17) & (1<<2 - 1)) + s := ((x >> 20) & 1) + return Imm_fp{uint8(s), int8(exp) - 3, uint8(pre)} + + case arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__64UIntimmhimmb_4__128UIntimmhimmb_8: + fallthrough + + case arg_immediate_fbits_min_1_max_0_sub_0_immh_immb__SEEAdvancedSIMDmodifiedimmediate_0__64UIntimmhimmb_4__128UIntimmhimmb_8: + immh := (x >> 19) & (1<<4 - 1) + immb := (x >> 16) & (1<<3 - 1) + if (immh >> 2) == 1 { + return Imm{64 - ((immh << 3) + immb), true} + } else if (immh >> 3) == 1 { + return Imm{128 - ((immh << 3) + immb), true} + } else { + return nil + } + + case arg_immediate_fbits_min_1_max_32_sub_64_scale: + scale := (x >> 10) & (1<<6 - 1) + fbits := 64 - scale + if fbits > 32 { + return nil + } + return Imm{fbits, true} + + case arg_immediate_fbits_min_1_max_64_sub_64_scale: + scale := (x >> 10) & (1<<6 - 1) + fbits := 64 - scale + return Imm{fbits, true} + + case arg_immediate_floatzero: + return Imm{0, true} + + case arg_immediate_index_Q_imm4__imm4lt20gt_00__imm4_10: + Q := (x >> 30) & 1 + imm4 := (x >> 11) & (1<<4 - 1) + if Q == 1 || (imm4>>3) == 0 { + return Imm{imm4, true} + } else { + return nil + } + + case arg_immediate_MSL__a_b_c_d_e_f_g_h_cmode__8_0__16_1: + var shift uint8 + imm8 := (x >> 16) & (1<<3 - 1) + imm8 = (imm8 << 5) | ((x >> 5) & (1<<5 - 1)) + if (x>>12)&1 == 0 { + shift = 8 + 128 + } else { + shift = 16 + 128 + } + return ImmShift{uint16(imm8), shift} + + case arg_immediate_OptLSL__a_b_c_d_e_f_g_h_cmode__0_0__8_1: + imm8 := (x >> 16) & (1<<3 - 1) + imm8 = (imm8 << 5) | ((x >> 5) & (1<<5 - 1)) + cmode1 := (x >> 13) & 1 + shift := 8 * cmode1 + return ImmShift{uint16(imm8), uint8(shift)} + + case arg_immediate_OptLSL__a_b_c_d_e_f_g_h_cmode__0_0__8_1__16_2__24_3: + imm8 := (x >> 16) & (1<<3 - 1) + imm8 = (imm8 << 5) | ((x >> 5) & (1<<5 - 1)) + cmode1 := (x >> 13) & (1<<2 - 1) + shift := 8 * cmode1 + return ImmShift{uint16(imm8), uint8(shift)} + + case arg_immediate_OptLSLZero__a_b_c_d_e_f_g_h: + imm8 := (x >> 16) & (1<<3 - 1) + imm8 = (imm8 << 5) | ((x >> 5) & (1<<5 - 1)) + return ImmShift{uint16(imm8), 0} + + case arg_immediate_zero: + return Imm{0, true} + + case arg_Qd: + return Q0 + Reg(x&(1<<5-1)) + + case arg_Qn: + return Q0 + Reg((x>>5)&(1<<5-1)) + + case arg_Qt: + return Q0 + Reg(x&(1<<5-1)) + + case arg_Qt2: + return Q0 + Reg((x>>10)&(1<<5-1)) + + case arg_Rn_16_5__W_1__W_2__W_4__X_8: + imm5 := (x >> 16) & (1<<5 - 1) + if ((imm5 & 1) == 1) || ((imm5 & 2) == 2) || ((imm5 & 4) == 4) { + return W0 + Reg((x>>5)&(1<<5-1)) + } else if (imm5 & 8) == 8 { + return X0 + Reg((x>>5)&(1<<5-1)) + } else { + return nil + } + + case arg_St: + return S0 + Reg(x&(1<<5-1)) + + case arg_St2: + return S0 + Reg((x>>10)&(1<<5-1)) + + case arg_Vd_16_5__B_1__H_2__S_4__D_8: + imm5 := (x >> 16) & (1<<5 - 1) + Rd := x & (1<<5 - 1) + if imm5&1 == 1 { + return B0 + Reg(Rd) + } else if imm5&2 == 2 { + return H0 + Reg(Rd) + } else if imm5&4 == 4 { + return S0 + Reg(Rd) + } else if imm5&8 == 8 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_19_4__B_1__H_2__S_4: + immh := (x >> 19) & (1<<4 - 1) + Rd := x & (1<<5 - 1) + if immh == 1 { + return B0 + Reg(Rd) + } else if immh>>1 == 1 { + return H0 + Reg(Rd) + } else if immh>>2 == 1 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_19_4__B_1__H_2__S_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rd := x & (1<<5 - 1) + if immh == 1 { + return B0 + Reg(Rd) + } else if immh>>1 == 1 { + return H0 + Reg(Rd) + } else if immh>>2 == 1 { + return S0 + Reg(Rd) + } else if immh>>3 == 1 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_19_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rd := x & (1<<5 - 1) + if immh>>3 == 1 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_19_4__S_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rd := x & (1<<5 - 1) + if immh>>2 == 1 { + return S0 + Reg(Rd) + } else if immh>>3 == 1 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_1__S_0: + sz := (x >> 22) & 1 + Rd := x & (1<<5 - 1) + if sz == 0 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_1__S_0__D_1: + sz := (x >> 22) & 1 + Rd := x & (1<<5 - 1) + if sz == 0 { + return S0 + Reg(Rd) + } else { + return D0 + Reg(Rd) + } + + case arg_Vd_22_1__S_1: + sz := (x >> 22) & 1 + Rd := x & (1<<5 - 1) + if sz == 1 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__B_0__H_1__S_2: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 0 { + return B0 + Reg(Rd) + } else if size == 1 { + return H0 + Reg(Rd) + } else if size == 2 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__B_0__H_1__S_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 0 { + return B0 + Reg(Rd) + } else if size == 1 { + return H0 + Reg(Rd) + } else if size == 2 { + return S0 + Reg(Rd) + } else { + return D0 + Reg(Rd) + } + + case arg_Vd_22_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 3 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__H_0__S_1__D_2: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 0 { + return H0 + Reg(Rd) + } else if size == 1 { + return S0 + Reg(Rd) + } else if size == 2 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__H_1__S_2: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 1 { + return H0 + Reg(Rd) + } else if size == 2 { + return S0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_22_2__S_1__D_2: + size := (x >> 22) & (1<<2 - 1) + Rd := x & (1<<5 - 1) + if size == 1 { + return S0 + Reg(Rd) + } else if size == 2 { + return D0 + Reg(Rd) + } else { + return nil + } + + case arg_Vd_arrangement_16B: + Rd := x & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + + case arg_Vd_arrangement_2D: + Rd := x & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + + case arg_Vd_arrangement_4S: + Rd := x & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + + case arg_Vd_arrangement_D_index__1: + Rd := x & (1<<5 - 1) + return RegisterWithArrangementAndIndex{V0 + Reg(Rd), ArrangementD, 1, 0} + + case arg_Vd_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4__imm5lt4gt_8_1: + var a Arrangement + var index uint32 + Rd := x & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm5 >> 1 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm5 >> 2 + } else if imm5&4 == 4 { + a = ArrangementS + index = imm5 >> 3 + } else if imm5&8 == 8 { + a = ArrangementD + index = imm5 >> 4 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rd), a, uint8(index), 0} + + case arg_Vd_arrangement_imm5_Q___8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81: + Rd := x & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + Q := (x >> 30) & 1 + if imm5&1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + } else if imm5&2 == 2 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + } else if imm5&4 == 4 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + } else if (imm5&8 == 8) && (Q == 1) { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } else { + return nil + } + + case arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__2S_40__4S_41__2D_81: + Rd := x & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + } else if immh>>3 == 1 { + if Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + } + return nil + + case arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41: + Rd := x & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + } else if immh>>1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + } else if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + } + return nil + + case arg_Vd_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81: + Rd := x & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + } else if immh>>1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + } else if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + } else if immh>>3 == 1 { + if Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + } + return nil + + case arg_Vd_arrangement_immh___SEEAdvancedSIMDmodifiedimmediate_0__8H_1__4S_2__2D_4: + Rd := x & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + if immh == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if immh>>1 == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if immh>>2 == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_Q___2S_0__4S_1: + Rd := x & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + + case arg_Vd_arrangement_Q___4H_0__8H_1: + Rd := x & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + + case arg_Vd_arrangement_Q___8B_0__16B_1: + Rd := x & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + + case arg_Vd_arrangement_Q_sz___2S_00__4S_10__2D_11: + Rd := x & (1<<5 - 1) + Q := (x >> 30) & 1 + sz := (x >> 22) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_size___4S_1__2D_2: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if size == 2 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_size___8H_0__1Q_3: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 3 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement1Q, 0} + } + return nil + + case arg_Vd_arrangement_size___8H_0__4S_1__2D_2: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if size == 2 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___4H_00__8H_01__2S_10__4S_11__1D_20__2D_21: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement1D, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___8B_00__16B_01: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + return nil + + case arg_Vd_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rd := x & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_sz___4S_0__2D_1: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + if sz == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + + case arg_Vd_arrangement_sz_Q___2S_00__4S_01: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + return nil + + case arg_Vd_arrangement_sz_Q___2S_00__4S_01__2D_11: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2D, 0} + } + return nil + + case arg_Vd_arrangement_sz_Q___2S_10__4S_11: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + return nil + + case arg_Vd_arrangement_sz_Q___4H_00__8H_01__2S_10__4S_11: + Rd := x & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4H, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement8H, 0} + } else if sz == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement2S, 0} + } else /* sz == 1 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rd), Arrangement4S, 0} + } + + case arg_Vm_22_1__S_0__D_1: + sz := (x >> 22) & 1 + Rm := (x >> 16) & (1<<5 - 1) + if sz == 0 { + return S0 + Reg(Rm) + } else { + return D0 + Reg(Rm) + } + + case arg_Vm_22_2__B_0__H_1__S_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rm := (x >> 16) & (1<<5 - 1) + if size == 0 { + return B0 + Reg(Rm) + } else if size == 1 { + return H0 + Reg(Rm) + } else if size == 2 { + return S0 + Reg(Rm) + } else { + return D0 + Reg(Rm) + } + + case arg_Vm_22_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rm := (x >> 16) & (1<<5 - 1) + if size == 3 { + return D0 + Reg(Rm) + } else { + return nil + } + + case arg_Vm_22_2__H_1__S_2: + size := (x >> 22) & (1<<2 - 1) + Rm := (x >> 16) & (1<<5 - 1) + if size == 1 { + return H0 + Reg(Rm) + } else if size == 2 { + return S0 + Reg(Rm) + } else { + return nil + } + + case arg_Vm_arrangement_4S: + Rm := (x >> 16) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + + case arg_Vm_arrangement_Q___8B_0__16B_1: + Rm := (x >> 16) & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } + + case arg_Vm_arrangement_size___8H_0__4S_1__2D_2: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8H, 0} + } else if size == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } else if size == 2 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2D, 0} + } + return nil + + case arg_Vm_arrangement_size___H_1__S_2_index__size_L_H_M__HLM_1__HL_2_1: + var a Arrangement + var index uint32 + var vm uint32 + Rm := (x >> 16) & (1<<4 - 1) + size := (x >> 22) & 3 + H := (x >> 11) & 1 + L := (x >> 21) & 1 + M := (x >> 20) & 1 + if size == 1 { + a = ArrangementH + index = (H << 2) | (L << 1) | M + vm = Rm + } else if size == 2 { + a = ArrangementS + index = (H << 1) | L + vm = (M << 4) | Rm + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(vm), a, uint8(index), 0} + + case arg_Vm_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } + return nil + + case arg_Vm_arrangement_size_Q___8B_00__16B_01: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } + return nil + + case arg_Vm_arrangement_size_Q___8B_00__16B_01__1D_30__2D_31: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement1D, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2D, 0} + } + return nil + + case arg_Vm_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } + return nil + + case arg_Vm_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rm := (x >> 16) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2D, 0} + } + return nil + + case arg_Vm_arrangement_sz_Q___2S_00__4S_01__2D_11: + Rm := (x >> 16) & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rm), Arrangement2D, 0} + } + return nil + + case arg_Vm_arrangement_sz___S_0__D_1_index__sz_L_H__HL_00__H_10_1: + var a Arrangement + var index uint32 + Rm := (x >> 16) & (1<<5 - 1) + sz := (x >> 22) & 1 + H := (x >> 11) & 1 + L := (x >> 21) & 1 + if sz == 0 { + a = ArrangementS + index = (H << 1) | L + } else if sz == 1 && L == 0 { + a = ArrangementD + index = H + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rm), a, uint8(index), 0} + + case arg_Vn_19_4__B_1__H_2__S_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if immh == 1 { + return B0 + Reg(Rn) + } else if immh>>1 == 1 { + return H0 + Reg(Rn) + } else if immh>>2 == 1 { + return S0 + Reg(Rn) + } else if immh>>3 == 1 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_19_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if immh>>3 == 1 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_19_4__H_1__S_2__D_4: + immh := (x >> 19) & (1<<4 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if immh == 1 { + return H0 + Reg(Rn) + } else if immh>>1 == 1 { + return S0 + Reg(Rn) + } else if immh>>2 == 1 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_19_4__S_4__D_8: + immh := (x >> 19) & (1<<4 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if immh>>2 == 1 { + return S0 + Reg(Rn) + } else if immh>>3 == 1 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_1_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 1} + + case arg_Vn_22_1__D_1: + sz := (x >> 22) & 1 + Rn := (x >> 5) & (1<<5 - 1) + if sz == 1 { + return D0 + Reg(Rn) + } + return nil + + case arg_Vn_22_1__S_0__D_1: + sz := (x >> 22) & 1 + Rn := (x >> 5) & (1<<5 - 1) + if sz == 0 { + return S0 + Reg(Rn) + } else { + return D0 + Reg(Rn) + } + + case arg_Vn_22_2__B_0__H_1__S_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if size == 0 { + return B0 + Reg(Rn) + } else if size == 1 { + return H0 + Reg(Rn) + } else if size == 2 { + return S0 + Reg(Rn) + } else { + return D0 + Reg(Rn) + } + + case arg_Vn_22_2__D_3: + size := (x >> 22) & (1<<2 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if size == 3 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_22_2__H_0__S_1__D_2: + size := (x >> 22) & (1<<2 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if size == 0 { + return H0 + Reg(Rn) + } else if size == 1 { + return S0 + Reg(Rn) + } else if size == 2 { + return D0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_22_2__H_1__S_2: + size := (x >> 22) & (1<<2 - 1) + Rn := (x >> 5) & (1<<5 - 1) + if size == 1 { + return H0 + Reg(Rn) + } else if size == 2 { + return S0 + Reg(Rn) + } else { + return nil + } + + case arg_Vn_2_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 2} + + case arg_Vn_3_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 3} + + case arg_Vn_4_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 4} + + case arg_Vn_arrangement_16B: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + + case arg_Vn_arrangement_4S: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + + case arg_Vn_arrangement_D_index__1: + Rn := (x >> 5) & (1<<5 - 1) + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), ArrangementD, 1, 0} + + case arg_Vn_arrangement_D_index__imm5_1: + Rn := (x >> 5) & (1<<5 - 1) + index := (x >> 20) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), ArrangementD, uint8(index), 0} + + case arg_Vn_arrangement_imm5___B_1__H_2_index__imm5__imm5lt41gt_1__imm5lt42gt_2_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm5 >> 1 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm5 >> 2 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5_imm4__imm4lt30gt_1__imm4lt31gt_2__imm4lt32gt_4__imm4lt3gt_8_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + imm4 := (x >> 11) & (1<<4 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm4 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm4 >> 1 + } else if imm5&4 == 4 { + a = ArrangementS + index = imm4 >> 2 + } else if imm5&8 == 8 { + a = ArrangementD + index = imm4 >> 3 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_imm5___B_1__H_2__S_4__D_8_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4__imm5lt4gt_8_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm5 >> 1 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm5 >> 2 + } else if imm5&4 == 4 { + a = ArrangementS + index = imm5 >> 3 + } else if imm5&8 == 8 { + a = ArrangementD + index = imm5 >> 4 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_imm5___B_1__H_2__S_4_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&1 == 1 { + a = ArrangementB + index = imm5 >> 1 + } else if imm5&2 == 2 { + a = ArrangementH + index = imm5 >> 2 + } else if imm5&4 == 4 { + a = ArrangementS + index = imm5 >> 3 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_imm5___D_8_index__imm5_1: + var a Arrangement + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + if imm5&15 == 8 { + a = ArrangementD + index = imm5 >> 4 + } else { + return nil + } + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), a, uint8(index), 0} + + case arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__2S_40__4S_41__2D_81: + Rn := (x >> 5) & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + } else if immh>>3 == 1 { + if Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + } + return nil + + case arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41: + Rn := (x >> 5) & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } + } else if immh>>1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } + } else if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + } + return nil + + case arg_Vn_arrangement_immh_Q___SEEAdvancedSIMDmodifiedimmediate_00__8B_10__16B_11__4H_20__8H_21__2S_40__4S_41__2D_81: + Rn := (x >> 5) & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + Q := (x >> 30) & 1 + if immh == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } + } else if immh>>1 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } + } else if immh>>2 == 1 { + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + } else if immh>>3 == 1 { + if Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + } + return nil + + case arg_Vn_arrangement_immh___SEEAdvancedSIMDmodifiedimmediate_0__8H_1__4S_2__2D_4: + Rn := (x >> 5) & (1<<5 - 1) + immh := (x >> 19) & (1<<4 - 1) + if immh == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if immh>>1 == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if immh>>2 == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_Q___8B_0__16B_1: + Rn := (x >> 5) & (1<<5 - 1) + Q := (x >> 30) & 1 + if Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } + + case arg_Vn_arrangement_Q_sz___2S_00__4S_10__2D_11: + Rn := (x >> 5) & (1<<5 - 1) + Q := (x >> 30) & 1 + sz := (x >> 22) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_Q_sz___4S_10: + Rn := (x >> 5) & (1<<5 - 1) + Q := (x >> 30) & 1 + sz := (x >> 22) & 1 + if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_S_index__imm5__imm5lt41gt_1__imm5lt42gt_2__imm5lt43gt_4_1: + var index uint32 + Rn := (x >> 5) & (1<<5 - 1) + imm5 := (x >> 16) & (1<<5 - 1) + index = imm5 >> 3 + return RegisterWithArrangementAndIndex{V0 + Reg(Rn), ArrangementS, uint8(index), 0} + + case arg_Vn_arrangement_size___2D_3: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 3 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_size___8H_0__4S_1__2D_2: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + if size == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if size == 2 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___4H_10__8H_11__2S_20__4S_21: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__1D_30__2D_31: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement1D, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__4S_21: + Rn := (x >> 5) & (1<<5 - 1) + size := (x >> 22) & 3 + Q := (x >> 30) & 1 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8B, 0} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement16B, 0} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_sz___2D_1: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + if sz == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_sz___2S_0__2D_1: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + if sz == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + + case arg_Vn_arrangement_sz___4S_0__2D_1: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + if sz == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + + case arg_Vn_arrangement_sz_Q___2S_00__4S_01: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + return nil + + case arg_Vn_arrangement_sz_Q___2S_00__4S_01__2D_11: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } else if sz == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2D, 0} + } + return nil + + case arg_Vn_arrangement_sz_Q___4H_00__8H_01__2S_10__4S_11: + Rn := (x >> 5) & (1<<5 - 1) + sz := (x >> 22) & 1 + Q := (x >> 30) & 1 + if sz == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4H, 0} + } else if sz == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement8H, 0} + } else if sz == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement2S, 0} + } else /* sz == 1 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rn), Arrangement4S, 0} + } + + case arg_Vt_1_arrangement_B_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 10) & 3 + index := (Q << 3) | (S << 2) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementB, uint8(index), 1} + + case arg_Vt_1_arrangement_D_index__Q_1: + Rt := x & (1<<5 - 1) + index := (x >> 30) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementD, uint8(index), 1} + + case arg_Vt_1_arrangement_H_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 11) & 1 + index := (Q << 2) | (S << 1) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementH, uint8(index), 1} + + case arg_Vt_1_arrangement_S_index__Q_S_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + index := (Q << 1) | S + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementS, uint8(index), 1} + + case arg_Vt_1_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 1} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 1} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 1} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 1} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 1} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 1} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement1D, 1} + } else /* size == 3 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 1} + } + + case arg_Vt_2_arrangement_B_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 10) & 3 + index := (Q << 3) | (S << 2) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementB, uint8(index), 2} + + case arg_Vt_2_arrangement_D_index__Q_1: + Rt := x & (1<<5 - 1) + index := (x >> 30) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementD, uint8(index), 2} + + case arg_Vt_2_arrangement_H_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 11) & 1 + index := (Q << 2) | (S << 1) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementH, uint8(index), 2} + + case arg_Vt_2_arrangement_S_index__Q_S_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + index := (Q << 1) | S + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementS, uint8(index), 2} + + case arg_Vt_2_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 2} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 2} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 2} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 2} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 2} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 2} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement1D, 2} + } else /* size == 3 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 2} + } + + case arg_Vt_2_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 2} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 2} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 2} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 2} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 2} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 2} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 2} + } + return nil + + case arg_Vt_3_arrangement_B_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 10) & 3 + index := (Q << 3) | (S << 2) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementB, uint8(index), 3} + + case arg_Vt_3_arrangement_D_index__Q_1: + Rt := x & (1<<5 - 1) + index := (x >> 30) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementD, uint8(index), 3} + + case arg_Vt_3_arrangement_H_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 11) & 1 + index := (Q << 2) | (S << 1) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementH, uint8(index), 3} + + case arg_Vt_3_arrangement_S_index__Q_S_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + index := (Q << 1) | S + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementS, uint8(index), 3} + + case arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 3} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 3} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 3} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 3} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 3} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 3} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement1D, 3} + } else /* size == 3 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 3} + } + + case arg_Vt_3_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 3} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 3} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 3} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 3} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 3} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 3} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 3} + } + return nil + + case arg_Vt_4_arrangement_B_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 10) & 3 + index := (Q << 3) | (S << 2) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementB, uint8(index), 4} + + case arg_Vt_4_arrangement_D_index__Q_1: + Rt := x & (1<<5 - 1) + index := (x >> 30) & 1 + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementD, uint8(index), 4} + + case arg_Vt_4_arrangement_H_index__Q_S_size_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + size := (x >> 11) & 1 + index := (Q << 2) | (S << 1) | (size) + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementH, uint8(index), 4} + + case arg_Vt_4_arrangement_S_index__Q_S_1: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + S := (x >> 12) & 1 + index := (Q << 1) | S + return RegisterWithArrangementAndIndex{V0 + Reg(Rt), ArrangementS, uint8(index), 4} + + case arg_Vt_4_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__1D_30__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 4} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 4} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 4} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 4} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 4} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 4} + } else if size == 3 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement1D, 4} + } else /* size == 3 && Q == 1 */ { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 4} + } + + case arg_Vt_4_arrangement_size_Q___8B_00__16B_01__4H_10__8H_11__2S_20__4S_21__2D_31: + Rt := x & (1<<5 - 1) + Q := (x >> 30) & 1 + size := (x >> 10) & 3 + if size == 0 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8B, 4} + } else if size == 0 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement16B, 4} + } else if size == 1 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4H, 4} + } else if size == 1 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement8H, 4} + } else if size == 2 && Q == 0 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2S, 4} + } else if size == 2 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement4S, 4} + } else if size == 3 && Q == 1 { + return RegisterWithArrangement{V0 + Reg(Rt), Arrangement2D, 4} + } + return nil + + case arg_Xns_mem_extend_m__UXTW_2__LSL_3__SXTW_6__SXTX_7__0_0__4_1: + return handle_MemExtend(x, 4, false) + + case arg_Xns_mem_offset: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrOffset, 0} + + case arg_Xns_mem_optional_imm12_16_unsigned: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm12 := (x >> 10) & (1<<12 - 1) + return MemImmediate{Rn, AddrOffset, int32(imm12 << 4)} + + case arg_Xns_mem_optional_imm7_16_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrOffset, ((int32(imm7 << 4)) << 21) >> 21} + + case arg_Xns_mem_post_fixedimm_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 1} + + case arg_Xns_mem_post_fixedimm_12: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 12} + + case arg_Xns_mem_post_fixedimm_16: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 16} + + case arg_Xns_mem_post_fixedimm_2: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 2} + + case arg_Xns_mem_post_fixedimm_24: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 24} + + case arg_Xns_mem_post_fixedimm_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 3} + + case arg_Xns_mem_post_fixedimm_32: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 32} + + case arg_Xns_mem_post_fixedimm_4: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 4} + + case arg_Xns_mem_post_fixedimm_6: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 6} + + case arg_Xns_mem_post_fixedimm_8: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + return MemImmediate{Rn, AddrPostIndex, 8} + + case arg_Xns_mem_post_imm7_16_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPostIndex, ((int32(imm7 << 4)) << 21) >> 21} + + case arg_Xns_mem_post_Q__16_0__32_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Q := (x >> 30) & 1 + return MemImmediate{Rn, AddrPostIndex, int32((Q + 1) * 16)} + + case arg_Xns_mem_post_Q__24_0__48_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Q := (x >> 30) & 1 + return MemImmediate{Rn, AddrPostIndex, int32((Q + 1) * 24)} + + case arg_Xns_mem_post_Q__32_0__64_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Q := (x >> 30) & 1 + return MemImmediate{Rn, AddrPostIndex, int32((Q + 1) * 32)} + + case arg_Xns_mem_post_Q__8_0__16_1: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Q := (x >> 30) & 1 + return MemImmediate{Rn, AddrPostIndex, int32((Q + 1) * 8)} + + case arg_Xns_mem_post_size__1_0__2_1__4_2__8_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + size := (x >> 10) & 3 + return MemImmediate{Rn, AddrPostIndex, int32(1 << size)} + + case arg_Xns_mem_post_size__2_0__4_1__8_2__16_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + size := (x >> 10) & 3 + return MemImmediate{Rn, AddrPostIndex, int32(2 << size)} + + case arg_Xns_mem_post_size__3_0__6_1__12_2__24_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + size := (x >> 10) & 3 + return MemImmediate{Rn, AddrPostIndex, int32(3 << size)} + + case arg_Xns_mem_post_size__4_0__8_1__16_2__32_3: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + size := (x >> 10) & 3 + return MemImmediate{Rn, AddrPostIndex, int32(4 << size)} + + case arg_Xns_mem_post_Xm: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + Rm := (x >> 16) & (1<<5 - 1) + return MemImmediate{Rn, AddrPostReg, int32(Rm)} + + case arg_Xns_mem_wb_imm7_16_signed: + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + imm7 := (x >> 15) & (1<<7 - 1) + return MemImmediate{Rn, AddrPreIndex, ((int32(imm7 << 4)) << 21) >> 21} + } +} + +func handle_ExtendedRegister(x uint32, has_width bool) Arg { + s := (x >> 29) & 1 + rm := (x >> 16) & (1<<5 - 1) + option := (x >> 13) & (1<<3 - 1) + imm3 := (x >> 10) & (1<<3 - 1) + rn := (x >> 5) & (1<<5 - 1) + rd := x & (1<<5 - 1) + is_32bit := !has_width + var rea RegExtshiftAmount + if has_width { + if option&0x3 != 0x3 { + rea.reg = W0 + Reg(rm) + } else { + rea.reg = X0 + Reg(rm) + } + } else { + rea.reg = W0 + Reg(rm) + } + switch option { + case 0: + rea.extShift = uxtb + case 1: + rea.extShift = uxth + case 2: + if is_32bit && (rn == 31 || (s == 0 && rd == 31)) { + if imm3 != 0 { + rea.extShift = lsl + } else { + rea.extShift = ExtShift(0) + } + } else { + rea.extShift = uxtw + } + case 3: + if !is_32bit && (rn == 31 || (s == 0 && rd == 31)) { + if imm3 != 0 { + rea.extShift = lsl + } else { + rea.extShift = ExtShift(0) + } + } else { + rea.extShift = uxtx + } + case 4: + rea.extShift = sxtb + case 5: + rea.extShift = sxth + case 6: + rea.extShift = sxtw + case 7: + rea.extShift = sxtx + } + rea.show_zero = false + rea.amount = uint8(imm3) + return rea +} + +func handle_ImmediateShiftedRegister(x uint32, max uint8, is_w, has_ror bool) Arg { + var rsa RegExtshiftAmount + if is_w { + rsa.reg = W0 + Reg((x>>16)&(1<<5-1)) + } else { + rsa.reg = X0 + Reg((x>>16)&(1<<5-1)) + } + switch (x >> 22) & 0x3 { + case 0: + rsa.extShift = lsl + case 1: + rsa.extShift = lsr + case 2: + rsa.extShift = asr + case 3: + if has_ror { + rsa.extShift = ror + } else { + return nil + } + } + rsa.show_zero = true + rsa.amount = uint8((x >> 10) & (1<<6 - 1)) + if rsa.amount == 0 && rsa.extShift == lsl { + rsa.extShift = ExtShift(0) + } else if rsa.amount > max { + return nil + } + return rsa +} + +func handle_MemExtend(x uint32, mult uint8, absent bool) Arg { + var extend ExtShift + var Rm Reg + option := (x >> 13) & (1<<3 - 1) + Rn := RegSP(X0) + RegSP(x>>5&(1<<5-1)) + if (option & 1) != 0 { + Rm = Reg(X0) + Reg(x>>16&(1<<5-1)) + } else { + Rm = Reg(W0) + Reg(x>>16&(1<<5-1)) + } + switch option { + default: + return nil + case 2: + extend = uxtw + case 3: + extend = lsl + case 6: + extend = sxtw + case 7: + extend = sxtx + } + amount := (uint8((x >> 12) & 1)) * mult + return MemExtend{Rn, Rm, extend, amount, absent} +} + +func handle_bitmasks(x uint32, datasize uint8) Arg { + var length, levels, esize, i uint8 + var welem, wmask uint64 + n := (x >> 22) & 1 + imms := uint8((x >> 10) & (1<<6 - 1)) + immr := uint8((x >> 16) & (1<<6 - 1)) + if n != 0 { + length = 6 + } else if (imms & 32) == 0 { + length = 5 + } else if (imms & 16) == 0 { + length = 4 + } else if (imms & 8) == 0 { + length = 3 + } else if (imms & 4) == 0 { + length = 2 + } else if (imms & 2) == 0 { + length = 1 + } else { + return nil + } + levels = 1< datasize { + return nil + } + welem = 1<<(s+1) - 1 + ror := (welem >> r) | (welem << (esize - r)) + ror &= ((1 << esize) - 1) + wmask = 0 + for i = 0; i < datasize; i += esize { + wmask = (wmask << esize) | ror + } + return Imm64{wmask, false} +} diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/decode_test.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/decode_test.go new file mode 100644 index 00000000000..a79ee1cae6b --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/decode_test.go @@ -0,0 +1,78 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +import ( + "encoding/hex" + "io/ioutil" + "strings" + "testing" +) + +func TestDecode(t *testing.T) { + data, err := ioutil.ReadFile("testdata/cases.txt") + if err != nil { + t.Fatal(err) + } + all := string(data) + for strings.Contains(all, "\t\t") { + all = strings.Replace(all, "\t\t", "\t", -1) + } + for _, line := range strings.Split(all, "\n") { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + f := strings.SplitN(line, "\t", 3) + i := strings.Index(f[0], "|") + if i < 0 { + t.Errorf("parsing %q: missing | separator", f[0]) + continue + } + if i%2 != 0 { + t.Errorf("parsing %q: misaligned | separator", f[0]) + } + code, err := hex.DecodeString(f[0][:i] + f[0][i+1:]) + if err != nil { + t.Errorf("parsing %q: %v", f[0], err) + continue + } + syntax, asm := f[1], f[2] + inst, decodeErr := Decode(code) + if decodeErr != nil && decodeErr != errUnknown { + // Some rarely used system instructions are not supported + // Following logicals will filter such unknown instructions + + t.Errorf("parsing %x: %s", code, decodeErr) + continue + } + var out string + switch syntax { + case "gnu": + out = GNUSyntax(inst) + case "plan9": + out = GoSyntax(inst, 0, nil, nil) + default: + t.Errorf("unknown syntax %q", syntax) + continue + } + // TODO: system instruction. + var Todo = strings.Fields(` + sys + dc + at + tlbi + ic + hvc + smc + `) + if strings.Replace(out, " ", "", -1) != strings.Replace(asm, " ", "", -1) && !hasPrefix(asm, Todo...) { + // Exclude MSR since GNU objdump result is incorrect. eg. 0xd504431f msr s0_4_c4_c3_0, xzr + if !strings.HasSuffix(asm, " nv") && !strings.HasPrefix(asm, "msr") { + t.Errorf("Decode(%s) [%s] = %s, want %s", f[0], syntax, out, asm) + } + } + } +} diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/ext_test.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/ext_test.go new file mode 100644 index 00000000000..bf0ee13d41f --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/ext_test.go @@ -0,0 +1,601 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Support for testing against external disassembler program. +// Copied and simplified from ../../arm/armasm/ext_test.go. + +package arm64asm + +import ( + "bufio" + "bytes" + "encoding/hex" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "math/rand" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + "testing" + "time" +) + +var ( + dumpTest = flag.Bool("dump", false, "dump all encodings") + mismatch = flag.Bool("mismatch", false, "log allowed mismatches") + longTest = flag.Bool("long", false, "long test") + keep = flag.Bool("keep", false, "keep object files around") + debug = false +) + +// An ExtInst represents a single decoded instruction parsed +// from an external disassembler's output. +type ExtInst struct { + addr uint64 + enc [4]byte + nenc int + text string +} + +func (r ExtInst) String() string { + return fmt.Sprintf("%#x: % x: %s", r.addr, r.enc, r.text) +} + +// An ExtDis is a connection between an external disassembler and a test. +type ExtDis struct { + Arch Mode + Dec chan ExtInst + File *os.File + Size int + KeepFile bool + Cmd *exec.Cmd +} + +// InstJson describes instruction fields value got from ARMv8-A Reference Manual +type InstJson struct { + Name string + Bits string + Arch string + Syntax string + Code string + Alias string + Enc uint32 +} + +// A Mode is an instruction execution mode. +type Mode int + +const ( + _ Mode = iota + ModeARM64 +) + +// Run runs the given command - the external disassembler - and returns +// a buffered reader of its standard output. +func (ext *ExtDis) Run(cmd ...string) (*bufio.Reader, error) { + if *keep { + log.Printf("%s\n", strings.Join(cmd, " ")) + } + ext.Cmd = exec.Command(cmd[0], cmd[1:]...) + out, err := ext.Cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("stdoutpipe: %v", err) + } + if err := ext.Cmd.Start(); err != nil { + return nil, fmt.Errorf("exec: %v", err) + } + + b := bufio.NewReaderSize(out, 1<<20) + return b, nil +} + +// Wait waits for the command started with Run to exit. +func (ext *ExtDis) Wait() error { + return ext.Cmd.Wait() +} + +// testExtDis tests a set of byte sequences against an external disassembler. +// The disassembler is expected to produce the given syntax and run +// in the given architecture mode (16, 32, or 64-bit). +// The extdis function must start the external disassembler +// and then parse its output, sending the parsed instructions on ext.Dec. +// The generate function calls its argument f once for each byte sequence +// to be tested. The generate function itself will be called twice, and it must +// make the same sequence of calls to f each time. +// When a disassembly does not match the internal decoding, +// allowedMismatch determines whether this mismatch should be +// allowed, or else considered an error. +func testExtDis( + t *testing.T, + syntax string, + arch Mode, + extdis func(ext *ExtDis) error, + generate func(f func([]byte)), + allowedMismatch func(text string, inst *Inst, dec ExtInst) bool, +) { + start := time.Now() + ext := &ExtDis{ + Dec: make(chan ExtInst), + Arch: arch, + } + errc := make(chan error) + + // First pass: write instructions to input file for external disassembler. + file, f, size, err := writeInst(generate) + if err != nil { + t.Fatal(err) + } + ext.Size = size + ext.File = f + defer func() { + f.Close() + if !*keep { + os.Remove(file) + } + }() + + // Second pass: compare disassembly against our decodings. + var ( + totalTests = 0 + totalSkips = 0 + totalErrors = 0 + + errors = make([]string, 0, 100) // Sampled errors, at most cap + ) + go func() { + errc <- extdis(ext) + }() + + generate(func(enc []byte) { + dec, ok := <-ext.Dec + if !ok { + t.Errorf("decoding stream ended early") + return + } + inst, text := disasm(syntax, pad(enc)) + + totalTests++ + if *dumpTest { + fmt.Printf("%x -> %s [%d]\n", enc[:len(enc)], dec.text, dec.nenc) + } + if text != dec.text && !strings.Contains(dec.text, "unknown") && syntax == "gnu" { + suffix := "" + if allowedMismatch(text, &inst, dec) { + totalSkips++ + if !*mismatch { + return + } + suffix += " (allowed mismatch)" + } + totalErrors++ + cmp := fmt.Sprintf("decode(%x) = %q, %d, want %q, %d%s\n", enc, text, len(enc), dec.text, dec.nenc, suffix) + + if len(errors) >= cap(errors) { + j := rand.Intn(totalErrors) + if j >= cap(errors) { + return + } + errors = append(errors[:j], errors[j+1:]...) + } + errors = append(errors, cmp) + } + }) + + if *mismatch { + totalErrors -= totalSkips + } + + for _, b := range errors { + t.Log(b) + } + + if totalErrors > 0 { + t.Fail() + } + t.Logf("%d test cases, %d expected mismatches, %d failures; %.0f cases/second", totalTests, totalSkips, totalErrors, float64(totalTests)/time.Since(start).Seconds()) + t.Logf("decoder coverage: %.1f%%;\n", decodeCoverage()) + if err := <-errc; err != nil { + t.Fatalf("external disassembler: %v", err) + } + +} + +// Start address of text. +const start = 0x8000 + +// writeInst writes the generated byte sequences to a new file +// starting at offset start. That file is intended to be the input to +// the external disassembler. +func writeInst(generate func(func([]byte))) (file string, f *os.File, size int, err error) { + f, err = ioutil.TempFile("", "arm64asm") + if err != nil { + return + } + + file = f.Name() + + f.Seek(start, 0) + w := bufio.NewWriter(f) + defer w.Flush() + size = 0 + generate(func(x []byte) { + if debug { + fmt.Printf("%#x: %x%x\n", start+size, x, zeros[len(x):]) + } + w.Write(x) + w.Write(zeros[len(x):]) + size += len(zeros) + }) + return file, f, size, nil +} + +var zeros = []byte{0, 0, 0, 0} + +// pad pads the code sequence with pops. +func pad(enc []byte) []byte { + if len(enc) < 4 { + enc = append(enc[:len(enc):len(enc)], zeros[:4-len(enc)]...) + } + return enc +} + +// disasm returns the decoded instruction and text +// for the given source bytes, using the given syntax and mode. +func disasm(syntax string, src []byte) (inst Inst, text string) { + var err error + inst, err = Decode(src) + if err != nil { + text = "error: " + err.Error() + return + } + text = inst.String() + switch syntax { + case "gnu": + text = GNUSyntax(inst) + case "plan9": // [sic] + text = GoSyntax(inst, 0, nil, nil) + default: + text = "error: unknown syntax " + syntax + } + return +} + +// decodecoverage returns a floating point number denoting the +// decoder coverage. +func decodeCoverage() float64 { + n := 0 + for _, t := range decoderCover { + if t { + n++ + } + } + return 100 * float64(1+n) / float64(1+len(decoderCover)) +} + +// Helpers for writing disassembler output parsers. + +// hasPrefix reports whether any of the space-separated words in the text s +// begins with any of the given prefixes. +func hasPrefix(s string, prefixes ...string) bool { + for _, prefix := range prefixes { + for cur_s := s; cur_s != ""; { + if strings.HasPrefix(cur_s, prefix) { + return true + } + i := strings.Index(cur_s, " ") + if i < 0 { + break + } + cur_s = cur_s[i+1:] + } + } + return false +} + +// isHex reports whether b is a hexadecimal character (0-9a-fA-F). +func isHex(b byte) bool { + return ('0' <= b && b <= '9') || ('a' <= b && b <= 'f') || ('A' <= b && b <= 'F') +} + +// parseHex parses the hexadecimal byte dump in hex, +// appending the parsed bytes to raw and returning the updated slice. +// The returned bool reports whether any invalid hex was found. +// Spaces and tabs between bytes are okay but any other non-hex is not. +func parseHex(hex []byte, raw []byte) ([]byte, bool) { + hex = bytes.TrimSpace(hex) + for j := 0; j < len(hex); { + for hex[j] == ' ' || hex[j] == '\t' { + j++ + } + if j >= len(hex) { + break + } + if j+2 > len(hex) || !isHex(hex[j]) || !isHex(hex[j+1]) { + return nil, false + } + raw = append(raw, unhex(hex[j])<<4|unhex(hex[j+1])) + j += 2 + } + return raw, true +} + +func unhex(b byte) byte { + if '0' <= b && b <= '9' { + return b - '0' + } else if 'A' <= b && b <= 'F' { + return b - 'A' + 10 + } else if 'a' <= b && b <= 'f' { + return b - 'a' + 10 + } + return 0 +} + +// index is like bytes.Index(s, []byte(t)) but avoids the allocation. +func index(s []byte, t string) int { + i := 0 + for { + j := bytes.IndexByte(s[i:], t[0]) + if j < 0 { + return -1 + } + i = i + j + if i+len(t) > len(s) { + return -1 + } + for k := 1; k < len(t); k++ { + if s[i+k] != t[k] { + goto nomatch + } + } + return i + nomatch: + i++ + } +} + +// fixSpace rewrites runs of spaces, tabs, and newline characters into single spaces in s. +// If s must be rewritten, it is rewritten in place. +func fixSpace(s []byte) []byte { + s = bytes.TrimSpace(s) + for i := 0; i < len(s); i++ { + if s[i] == '\t' || s[i] == '\n' || i > 0 && s[i] == ' ' && s[i-1] == ' ' { + goto Fix + } + } + return s + +Fix: + b := s + w := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if c == '\t' || c == '\n' { + c = ' ' + } + if c == ' ' && w > 0 && b[w-1] == ' ' { + continue + } + b[w] = c + w++ + } + if w > 0 && b[w-1] == ' ' { + w-- + } + return b[:w] +} + +// Fllowing regular expressions matches instructions using relative addressing mode. +// pcrel matches B instructions and BL instructions. +// pcrelr matches instrucions which consisted of register arguments and label arguments. +// pcrelim matches instructions which consisted of register arguments, immediate +// arguments and lable arguments. +// pcrelrzr and prcelimzr matches instructions when register arguments is zero register. +// pcrelprfm matches PRFM instructions when arguments consisted of register and lable. +// pcrelprfmim matches PRFM instructions when arguments consisted of immediate and lable. +var ( + pcrel = regexp.MustCompile(`^((?:.* )?(?:b|bl)x?(?:\.)?(?:eq|ne|cs|cc|mi|pl|vs|vc|hi|ls|ge|lt|gt|le|al|nv)?) 0x([0-9a-f]+)$`) + pcrelr = regexp.MustCompile(`^((?:.*)?(?:ldr|adrp|adr|cbnz|cbz|ldrsw) (?:x|w|s|d|q)(?:[0-9]+,)) 0x([0-9a-f]+)$`) + pcrelrzr = regexp.MustCompile(`^((?:.*)?(?:ldr|adrp|adr|cbnz|cbz|ldrsw) (?:x|w)zr,) 0x([0-9a-f]+)$`) + pcrelim = regexp.MustCompile(`^((?:.*)?(?:tbnz|tbz) (?:x|w)(?:[0-9]+,) (?:#[0-9a-f]+,)) 0x([0-9a-f]+)$`) + pcrelimzr = regexp.MustCompile(`^((?:.*)?(?:tbnz|tbz) (?:x|w)zr, (?:#[0-9a-f]+,)) 0x([0-9a-f]+)$`) + pcrelprfm = regexp.MustCompile(`^((?:.*)?(?:prfm) (?:[0-9a-z]+,)) 0x([0-9a-f]+)$`) + pcrelprfmim = regexp.MustCompile(`^((?:.*)?(?:prfm) (?:#0x[0-9a-f]+,)) 0x([0-9a-f]+)$`) +) + +// Round is the multiple of the number of instructions that read from Json file. +// Round used as seed value for pseudo-random number generator provides the same sequence +// in the same round run for the external disassembler and decoder. +var Round int + +// condmark is used to mark conditional instructions when need to generate and test +// conditional instructions. +var condmark bool = false + +// Generate instruction binary according to Json file +// Encode variable field of instruction with random value +func doFuzzy(inst *InstJson, Ninst int) { + var testdata uint32 + var NonDigRE = regexp.MustCompile(`[\D]`) + rand.Seed(int64(Round + Ninst)) + off := 0 + DigBit := "" + if condmark == true && !strings.Contains(inst.Bits, "cond") { + inst.Enc = 0xffffffff + } else { + for _, f := range strings.Split(inst.Bits, "|") { + if i := strings.Index(f, ":"); i >= 0 { + // consider f contains "01:2" and "Rm:5" + DigBit = f[:i] + m := NonDigRE.FindStringSubmatch(DigBit) + if m == nil { + DigBit = strings.TrimSpace(DigBit) + s := strings.Split(DigBit, "") + for i := 0; i < len(s); i++ { + switch s[i] { + case "1", "(1)": + testdata |= 1 << uint(31-off) + } + off++ + } + } else { + // DigBit is "Rn" or "imm3" + n, _ := strconv.Atoi(f[i+1:]) + if DigBit == "cond" && condmark == true { + r := uint8(Round) + for i := n - 1; i >= 0; i-- { + switch (r >> uint(i)) & 1 { + case 1: + testdata |= 1 << uint(31-off) + } + off++ + } + } else { + for i := 0; i < n; i++ { + r := rand.Intn(2) + switch r { + case 1: + testdata |= 1 << uint(31-off) + } + off++ + } + } + } + continue + } + for _, bit := range strings.Fields(f) { + switch bit { + case "0", "(0)": + off++ + continue + case "1", "(1)": + testdata |= 1 << uint(31-off) + default: + r := rand.Intn(2) + switch r { + case 1: + testdata |= 1 << uint(31-off) + } + } + off++ + } + } + if off != 32 { + log.Printf("incorrect bit count for %s %s: have %d", inst.Name, inst.Bits, off) + } + inst.Enc = testdata + } +} + +// Generators. +// +// The test cases are described as functions that invoke a callback repeatedly, +// with a new input sequence each time. These helpers make writing those +// a little easier. + +// JSONCases generates ARM64 instructions according to inst.json. +func JSONCases(t *testing.T) func(func([]byte)) { + return func(try func([]byte)) { + data, err := ioutil.ReadFile("inst.json") + if err != nil { + t.Fatal(err) + } + var insts []InstJson + var instsN []InstJson + // Change N value to get more cases only when condmark=false. + N := 100 + if condmark == true { + N = 16 + } + if err := json.Unmarshal(data, &insts); err != nil { + t.Fatal(err) + } + // Append instructions to get more test cases. + for i := 0; i < N; { + for _, inst := range insts { + instsN = append(instsN, inst) + } + i++ + } + Round = 0 + for i := range instsN { + if i%len(insts) == 0 { + Round++ + } + doFuzzy(&instsN[i], i) + } + for _, inst := range instsN { + if condmark == true && inst.Enc == 0xffffffff { + continue + } + enc := inst.Enc + try([]byte{byte(enc), byte(enc >> 8), byte(enc >> 16), byte(enc >> 24)}) + } + } +} + +// condCases generates conditional instructions. +func condCases(t *testing.T) func(func([]byte)) { + return func(try func([]byte)) { + condmark = true + JSONCases(t)(func(enc []byte) { + try(enc) + }) + } +} + +// hexCases generates the cases written in hexadecimal in the encoded string. +// Spaces in 'encoded' separate entire test cases, not individual bytes. +func hexCases(t *testing.T, encoded string) func(func([]byte)) { + return func(try func([]byte)) { + for _, x := range strings.Fields(encoded) { + src, err := hex.DecodeString(x) + if err != nil { + t.Errorf("parsing %q: %v", x, err) + } + try(src) + } + } +} + +// testdataCases generates the test cases recorded in testdata/cases.txt. +// It only uses the inputs; it ignores the answers recorded in that file. +func testdataCases(t *testing.T) func(func([]byte)) { + var codes [][]byte + data, err := ioutil.ReadFile("testdata/cases.txt") + if err != nil { + t.Fatal(err) + } + for _, line := range strings.Split(string(data), "\n") { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + f := strings.Fields(line)[0] + i := strings.Index(f, "|") + if i < 0 { + t.Errorf("parsing %q: missing | separator", f) + continue + } + if i%2 != 0 { + t.Errorf("parsing %q: misaligned | separator", f) + } + code, err := hex.DecodeString(f[:i] + f[i+1:]) + if err != nil { + t.Errorf("parsing %q: %v", f, err) + continue + } + codes = append(codes, code) + } + + return func(try func([]byte)) { + for _, code := range codes { + try(code) + } + } +} diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/gnu.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/gnu.go new file mode 100644 index 00000000000..d1be0461738 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/gnu.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +import ( + "strings" +) + +// GNUSyntax returns the GNU assembler syntax for the instruction, as defined by GNU binutils. +// This form typically matches the syntax defined in the ARM Reference Manual. +func GNUSyntax(inst Inst) string { + switch inst.Op { + case RET: + if r, ok := inst.Args[0].(Reg); ok && r == X30 { + return "ret" + } + case B: + if _, ok := inst.Args[0].(Cond); ok { + return strings.ToLower("b." + inst.Args[0].String() + " " + inst.Args[1].String()) + } + case SYSL: + result := strings.ToLower(inst.String()) + return strings.Replace(result, "c", "C", -1) + case DCPS1, DCPS2, DCPS3, CLREX: + return strings.ToLower(strings.TrimSpace(inst.String())) + case ISB: + if strings.Contains(inst.String(), "SY") { + result := strings.TrimSuffix(inst.String(), " SY") + return strings.ToLower(result) + } + } + return strings.ToLower(inst.String()) +} diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.go b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.go new file mode 100644 index 00000000000..3ff31be222d --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.go @@ -0,0 +1,963 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arm64asm + +import ( + "fmt" + "strings" +) + +// An Op is an ARM64 opcode. +type Op uint16 + +// NOTE: The actual Op values are defined in tables.go. +// They are chosen to simplify instruction decoding and +// are not a dense packing from 0 to N, although the +// density is high, probably at least 90%. + +func (op Op) String() string { + if op >= Op(len(opstr)) || opstr[op] == "" { + return fmt.Sprintf("Op(%d)", int(op)) + } + return opstr[op] +} + +// An Inst is a single instruction. +type Inst struct { + Op Op // Opcode mnemonic + Enc uint32 // Raw encoding bits. + Args Args // Instruction arguments, in ARM manual order. +} + +func (i Inst) String() string { + var args []string + for _, arg := range i.Args { + if arg == nil { + break + } + args = append(args, arg.String()) + } + return i.Op.String() + " " + strings.Join(args, ", ") +} + +// An Args holds the instruction arguments. +// If an instruction has fewer than 5 arguments, +// the final elements in the array are nil. +type Args [5]Arg + +// An Arg is a single instruction argument, one of these types: +// Reg, RegSP, ImmShift, RegExtshiftAmount, PCRel, MemImmediate, +// MemExtend, Imm, Imm64, Imm_hint, Imm_clrex, Imm_dcps, Cond, +// Imm_c, Imm_option, Imm_prfop, Pstatefield, Systemreg, Imm_fp +// RegisterWithArrangement, RegisterWithArrangementAndIndex. +type Arg interface { + isArg() + String() string +} + +// A Reg is a single register. +// The zero value denotes W0, not the absence of a register. +type Reg uint16 + +const ( + W0 Reg = iota + W1 + W2 + W3 + W4 + W5 + W6 + W7 + W8 + W9 + W10 + W11 + W12 + W13 + W14 + W15 + W16 + W17 + W18 + W19 + W20 + W21 + W22 + W23 + W24 + W25 + W26 + W27 + W28 + W29 + W30 + WZR + + X0 + X1 + X2 + X3 + X4 + X5 + X6 + X7 + X8 + X9 + X10 + X11 + X12 + X13 + X14 + X15 + X16 + X17 + X18 + X19 + X20 + X21 + X22 + X23 + X24 + X25 + X26 + X27 + X28 + X29 + X30 + XZR + + B0 + B1 + B2 + B3 + B4 + B5 + B6 + B7 + B8 + B9 + B10 + B11 + B12 + B13 + B14 + B15 + B16 + B17 + B18 + B19 + B20 + B21 + B22 + B23 + B24 + B25 + B26 + B27 + B28 + B29 + B30 + B31 + + H0 + H1 + H2 + H3 + H4 + H5 + H6 + H7 + H8 + H9 + H10 + H11 + H12 + H13 + H14 + H15 + H16 + H17 + H18 + H19 + H20 + H21 + H22 + H23 + H24 + H25 + H26 + H27 + H28 + H29 + H30 + H31 + + S0 + S1 + S2 + S3 + S4 + S5 + S6 + S7 + S8 + S9 + S10 + S11 + S12 + S13 + S14 + S15 + S16 + S17 + S18 + S19 + S20 + S21 + S22 + S23 + S24 + S25 + S26 + S27 + S28 + S29 + S30 + S31 + + D0 + D1 + D2 + D3 + D4 + D5 + D6 + D7 + D8 + D9 + D10 + D11 + D12 + D13 + D14 + D15 + D16 + D17 + D18 + D19 + D20 + D21 + D22 + D23 + D24 + D25 + D26 + D27 + D28 + D29 + D30 + D31 + + Q0 + Q1 + Q2 + Q3 + Q4 + Q5 + Q6 + Q7 + Q8 + Q9 + Q10 + Q11 + Q12 + Q13 + Q14 + Q15 + Q16 + Q17 + Q18 + Q19 + Q20 + Q21 + Q22 + Q23 + Q24 + Q25 + Q26 + Q27 + Q28 + Q29 + Q30 + Q31 + + V0 + V1 + V2 + V3 + V4 + V5 + V6 + V7 + V8 + V9 + V10 + V11 + V12 + V13 + V14 + V15 + V16 + V17 + V18 + V19 + V20 + V21 + V22 + V23 + V24 + V25 + V26 + V27 + V28 + V29 + V30 + V31 + + WSP = WZR // These are different registers with the same encoding. + SP = XZR // These are different registers with the same encoding. +) + +func (Reg) isArg() {} + +func (r Reg) String() string { + switch { + case r == WZR: + return "WZR" + case r == XZR: + return "XZR" + case W0 <= r && r <= W30: + return fmt.Sprintf("W%d", int(r-W0)) + case X0 <= r && r <= X30: + return fmt.Sprintf("X%d", int(r-X0)) + + case B0 <= r && r <= B31: + return fmt.Sprintf("B%d", int(r-B0)) + case H0 <= r && r <= H31: + return fmt.Sprintf("H%d", int(r-H0)) + case S0 <= r && r <= S31: + return fmt.Sprintf("S%d", int(r-S0)) + case D0 <= r && r <= D31: + return fmt.Sprintf("D%d", int(r-D0)) + case Q0 <= r && r <= Q31: + return fmt.Sprintf("Q%d", int(r-Q0)) + + case V0 <= r && r <= V31: + return fmt.Sprintf("V%d", int(r-V0)) + default: + return fmt.Sprintf("Reg(%d)", int(r)) + } +} + +// A RegSP represent a register and X31/W31 is regarded as SP/WSP. +type RegSP Reg + +func (RegSP) isArg() {} + +func (r RegSP) String() string { + switch Reg(r) { + case WSP: + return "WSP" + case SP: + return "SP" + default: + return Reg(r).String() + } +} + +type ImmShift struct { + imm uint16 + shift uint8 +} + +func (ImmShift) isArg() {} + +func (is ImmShift) String() string { + if is.shift == 0 { + return fmt.Sprintf("#%#x", is.imm) + } + if is.shift < 128 { + return fmt.Sprintf("#%#x, LSL #%d", is.imm, is.shift) + } + return fmt.Sprintf("#%#x, MSL #%d", is.imm, is.shift-128) +} + +type ExtShift uint8 + +const ( + _ ExtShift = iota + uxtb + uxth + uxtw + uxtx + sxtb + sxth + sxtw + sxtx + lsl + lsr + asr + ror +) + +func (extShift ExtShift) String() string { + switch extShift { + case uxtb: + return "UXTB" + + case uxth: + return "UXTH" + + case uxtw: + return "UXTW" + + case uxtx: + return "UXTX" + + case sxtb: + return "SXTB" + + case sxth: + return "SXTH" + + case sxtw: + return "SXTW" + + case sxtx: + return "SXTX" + + case lsl: + return "LSL" + + case lsr: + return "LSR" + + case asr: + return "ASR" + + case ror: + return "ROR" + } + return "" +} + +type RegExtshiftAmount struct { + reg Reg + extShift ExtShift + amount uint8 + show_zero bool +} + +func (RegExtshiftAmount) isArg() {} + +func (rea RegExtshiftAmount) String() string { + buf := rea.reg.String() + if rea.extShift != ExtShift(0) { + buf += ", " + rea.extShift.String() + if rea.amount != 0 { + buf += fmt.Sprintf(" #%d", rea.amount) + } else { + if rea.show_zero == true { + buf += fmt.Sprintf(" #%d", rea.amount) + } + } + } + return buf +} + +// A PCRel describes a memory address (usually a code label) +// as a distance relative to the program counter. +type PCRel int64 + +func (PCRel) isArg() {} + +func (r PCRel) String() string { + return fmt.Sprintf(".%+#x", uint64(r)) +} + +// An AddrMode is an ARM addressing mode. +type AddrMode uint8 + +const ( + _ AddrMode = iota + AddrPostIndex // [R], X - use address R, set R = R + X + AddrPreIndex // [R, X]! - use address R + X, set R = R + X + AddrOffset // [R, X] - use address R + X + AddrPostReg // [Rn], Rm - - use address Rn, set Rn = Rn + Rm +) + +// A MemImmediate is a memory reference made up of a base R and immediate X. +// The effective memory address is R or R+X depending on AddrMode. +type MemImmediate struct { + Base RegSP + Mode AddrMode + imm int32 +} + +func (MemImmediate) isArg() {} + +func (m MemImmediate) String() string { + R := m.Base.String() + X := fmt.Sprintf("#%d", m.imm) + + switch m.Mode { + case AddrOffset: + if X == "#0" { + return fmt.Sprintf("[%s]", R) + } + return fmt.Sprintf("[%s,%s]", R, X) + case AddrPreIndex: + return fmt.Sprintf("[%s,%s]!", R, X) + case AddrPostIndex: + return fmt.Sprintf("[%s],%s", R, X) + case AddrPostReg: + post := Reg(X0) + Reg(m.imm) + postR := post.String() + return fmt.Sprintf("[%s], %s", R, postR) + } + return fmt.Sprintf("unimplemented!") +} + +// A MemExtend is a memory reference made up of a base R and index expression X. +// The effective memory address is R or R+X depending on Index, Extend and Amount. +type MemExtend struct { + Base RegSP + Index Reg + Extend ExtShift + Amount uint8 + Absent bool +} + +func (MemExtend) isArg() {} + +func (m MemExtend) String() string { + Rbase := m.Base.String() + RIndex := m.Index.String() + if m.Absent { + if m.Amount != 0 { + return fmt.Sprintf("[%s,%s,%s #0]", Rbase, RIndex, m.Extend.String()) + } else { + if m.Extend != lsl { + return fmt.Sprintf("[%s,%s,%s]", Rbase, RIndex, m.Extend.String()) + } else { + return fmt.Sprintf("[%s,%s]", Rbase, RIndex) + } + } + } else { + if m.Amount != 0 { + return fmt.Sprintf("[%s,%s,%s #%d]", Rbase, RIndex, m.Extend.String(), m.Amount) + } else { + if m.Extend != lsl { + return fmt.Sprintf("[%s,%s,%s]", Rbase, RIndex, m.Extend.String()) + } else { + return fmt.Sprintf("[%s,%s]", Rbase, RIndex) + } + } + } +} + +// An Imm is an integer constant. +type Imm struct { + Imm uint32 + Decimal bool +} + +func (Imm) isArg() {} + +func (i Imm) String() string { + if !i.Decimal { + return fmt.Sprintf("#%#x", i.Imm) + } else { + return fmt.Sprintf("#%d", i.Imm) + } +} + +type Imm64 struct { + Imm uint64 + Decimal bool +} + +func (Imm64) isArg() {} + +func (i Imm64) String() string { + if !i.Decimal { + return fmt.Sprintf("#%#x", i.Imm) + } else { + return fmt.Sprintf("#%d", i.Imm) + } +} + +// An Imm_hint is an integer constant for HINT instruction. +type Imm_hint uint8 + +func (Imm_hint) isArg() {} + +func (i Imm_hint) String() string { + return fmt.Sprintf("#%#x", uint32(i)) +} + +// An Imm_clrex is an integer constant for CLREX instruction. +type Imm_clrex uint8 + +func (Imm_clrex) isArg() {} + +func (i Imm_clrex) String() string { + if i == 15 { + return "" + } + return fmt.Sprintf("#%#x", uint32(i)) +} + +// An Imm_dcps is an integer constant for DCPS[123] instruction. +type Imm_dcps uint16 + +func (Imm_dcps) isArg() {} + +func (i Imm_dcps) String() string { + if i == 0 { + return "" + } + return fmt.Sprintf("#%#x", uint32(i)) +} + +// Standard conditions. +type Cond struct { + Value uint8 + Invert bool +} + +func (Cond) isArg() {} + +func (c Cond) String() string { + cond31 := c.Value >> 1 + invert := bool((c.Value & 1) == 1) + invert = (invert != c.Invert) + switch cond31 { + case 0: + if invert { + return "NE" + } else { + return "EQ" + } + case 1: + if invert { + return "CC" + } else { + return "CS" + } + case 2: + if invert { + return "PL" + } else { + return "MI" + } + case 3: + if invert { + return "VC" + } else { + return "VS" + } + case 4: + if invert { + return "LS" + } else { + return "HI" + } + case 5: + if invert { + return "LT" + } else { + return "GE" + } + case 6: + if invert { + return "LE" + } else { + return "GT" + } + case 7: + return "AL" + } + return "" +} + +// An Imm_c is an integer constant for SYS/SYSL/TLBI instruction. +type Imm_c uint8 + +func (Imm_c) isArg() {} + +func (i Imm_c) String() string { + return fmt.Sprintf("C%d", uint8(i)) +} + +// An Imm_option is an integer constant for DMB/DSB/ISB instruction. +type Imm_option uint8 + +func (Imm_option) isArg() {} + +func (i Imm_option) String() string { + switch uint8(i) { + case 15: + return "SY" + case 14: + return "ST" + case 13: + return "LD" + case 11: + return "ISH" + case 10: + return "ISHST" + case 9: + return "ISHLD" + case 7: + return "NSH" + case 6: + return "NSHST" + case 5: + return "NSHLD" + case 3: + return "OSH" + case 2: + return "OSHST" + case 1: + return "OSHLD" + } + return fmt.Sprintf("#%#02x", uint8(i)) +} + +// An Imm_prfop is an integer constant for PRFM instruction. +type Imm_prfop uint8 + +func (Imm_prfop) isArg() {} + +func (i Imm_prfop) String() string { + prf_type := (i >> 3) & (1<<2 - 1) + prf_target := (i >> 1) & (1<<2 - 1) + prf_policy := i & 1 + var result string + + switch prf_type { + case 0: + result = "PLD" + case 1: + result = "PLI" + case 2: + result = "PST" + case 3: + return fmt.Sprintf("#%#02x", uint8(i)) + } + switch prf_target { + case 0: + result += "L1" + case 1: + result += "L2" + case 2: + result += "L3" + case 3: + return fmt.Sprintf("#%#02x", uint8(i)) + } + if prf_policy == 0 { + result += "KEEP" + } else { + result += "STRM" + } + return result +} + +type Pstatefield uint8 + +const ( + SPSel Pstatefield = iota + DAIFSet + DAIFClr +) + +func (Pstatefield) isArg() {} + +func (p Pstatefield) String() string { + switch p { + case SPSel: + return "SPSel" + case DAIFSet: + return "DAIFSet" + case DAIFClr: + return "DAIFClr" + default: + return "unimplemented" + } +} + +type Systemreg struct { + op0 uint8 + op1 uint8 + cn uint8 + cm uint8 + op2 uint8 +} + +func (Systemreg) isArg() {} + +func (s Systemreg) String() string { + return fmt.Sprintf("S%d_%d_C%d_C%d_%d", + s.op0, s.op1, s.cn, s.cm, s.op2) +} + +// An Imm_fp is a signed floating-point constant. +type Imm_fp struct { + s uint8 + exp int8 + pre uint8 +} + +func (Imm_fp) isArg() {} + +func (i Imm_fp) String() string { + var s, pre, numerator, denominator int16 + var result float64 + if i.s == 0 { + s = 1 + } else { + s = -1 + } + pre = s * int16(16+i.pre) + if i.exp > 0 { + numerator = (pre << uint8(i.exp)) + denominator = 16 + } else { + numerator = pre + denominator = (16 << uint8(-1*i.exp)) + } + result = float64(numerator) / float64(denominator) + return fmt.Sprintf("#%.18e", result) +} + +type Arrangement uint8 + +const ( + _ Arrangement = iota + ArrangementB + Arrangement8B + Arrangement16B + ArrangementH + Arrangement4H + Arrangement8H + ArrangementS + Arrangement2S + Arrangement4S + ArrangementD + Arrangement1D + Arrangement2D + Arrangement1Q +) + +func (a Arrangement) String() (result string) { + switch a { + case ArrangementB: + result = ".B" + case Arrangement8B: + result = ".8B" + case Arrangement16B: + result = ".16B" + case ArrangementH: + result = ".H" + case Arrangement4H: + result = ".4H" + case Arrangement8H: + result = ".8H" + case ArrangementS: + result = ".S" + case Arrangement2S: + result = ".2S" + case Arrangement4S: + result = ".4S" + case ArrangementD: + result = ".D" + case Arrangement1D: + result = ".1D" + case Arrangement2D: + result = ".2D" + case Arrangement1Q: + result = ".1Q" + } + return +} + +// Register with arrangement: ., { .8B, .8B}, +type RegisterWithArrangement struct { + r Reg + a Arrangement + cnt uint8 +} + +func (RegisterWithArrangement) isArg() {} + +func (r RegisterWithArrangement) String() string { + result := r.r.String() + result += r.a.String() + if r.cnt > 0 { + result = "{" + result + if r.cnt == 2 { + r1 := V0 + Reg((uint16(r.r)-uint16(V0)+1)&31) + result += ", " + r1.String() + r.a.String() + } else if r.cnt > 2 { + if (uint16(r.cnt) + ((uint16(r.r) - uint16(V0)) & 31)) > 32 { + for i := 1; i < int(r.cnt); i++ { + cur := V0 + Reg((uint16(r.r)-uint16(V0)+uint16(i))&31) + result += ", " + cur.String() + r.a.String() + } + } else { + r1 := V0 + Reg((uint16(r.r)-uint16(V0)+uint16(r.cnt)-1)&31) + result += "-" + r1.String() + r.a.String() + } + } + result += "}" + } + return result +} + +// Register with arrangement and index: .[], +// { .B, .B }[]. +type RegisterWithArrangementAndIndex struct { + r Reg + a Arrangement + index uint8 + cnt uint8 +} + +func (RegisterWithArrangementAndIndex) isArg() {} + +func (r RegisterWithArrangementAndIndex) String() string { + result := r.r.String() + result += r.a.String() + if r.cnt > 0 { + result = "{" + result + if r.cnt == 2 { + r1 := V0 + Reg((uint16(r.r)-uint16(V0)+1)&31) + result += ", " + r1.String() + r.a.String() + } else if r.cnt > 2 { + if (uint16(r.cnt) + ((uint16(r.r) - uint16(V0)) & 31)) > 32 { + for i := 1; i < int(r.cnt); i++ { + cur := V0 + Reg((uint16(r.r)-uint16(V0)+uint16(i))&31) + result += ", " + cur.String() + r.a.String() + } + } else { + r1 := V0 + Reg((uint16(r.r)-uint16(V0)+uint16(r.cnt)-1)&31) + result += "-" + r1.String() + r.a.String() + } + } + result += "}" + } + return fmt.Sprintf("%s[%d]", result, r.index) +} diff --git a/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.json b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.json new file mode 100644 index 00000000000..2d25c944a21 --- /dev/null +++ b/src/cmd/vendor/golang.org/x/arch/arm64/arm64asm/inst.json @@ -0,0 +1,1219 @@ +[{"Name":"ADC","Bits":"0|0|0|1|1|0|1|0|0|0|0|Rm:5|0|0|0|0|0|0|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADC , , ","Code":"","Alias":""}, +{"Name":"ADC","Bits":"1|0|0|1|1|0|1|0|0|0|0|Rm:5|0|0|0|0|0|0|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADC , , ","Code":"","Alias":""}, +{"Name":"ADCS","Bits":"0|0|1|1|1|0|1|0|0|0|0|Rm:5|0|0|0|0|0|0|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADCS , , ","Code":"","Alias":""}, +{"Name":"ADCS","Bits":"1|0|1|1|1|0|1|0|0|0|0|Rm:5|0|0|0|0|0|0|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADCS , , ","Code":"","Alias":""}, +{"Name":"ADD (extended register)","Bits":"0|0|0|0|1|0|1|1|0|0|1|Rm:5|option:3|imm3:3|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADD , , {, {#}}","Code":"","Alias":""}, +{"Name":"ADD (extended register)","Bits":"1|0|0|0|1|0|1|1|0|0|1|Rm:5|option:3|imm3:3|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADD , , {, {#}}","Code":"","Alias":""}, +{"Name":"ADD (immediate)","Bits":"0|0|0|1|0|0|0|1|shift:2|imm12:12|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADD , , #{, }","Code":"","Alias":"This instruction is used by the alias MOV (to/from SP)."}, +{"Name":"ADD (immediate)","Bits":"1|0|0|1|0|0|0|1|shift:2|imm12:12|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADD , , #{, }","Code":"","Alias":"This instruction is used by the alias MOV (to/from SP)."}, +{"Name":"ADD (shifted register)","Bits":"0|0|0|0|1|0|1|1|shift:2|0|Rm:5|imm6:6|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADD , , {, #}","Code":"","Alias":""}, +{"Name":"ADD (shifted register)","Bits":"1|0|0|0|1|0|1|1|shift:2|0|Rm:5|imm6:6|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADD , , {, #}","Code":"","Alias":""}, +{"Name":"ADDS (extended register)","Bits":"0|0|1|0|1|0|1|1|0|0|1|Rm:5|option:3|imm3:3|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADDS , , {, {#}}","Code":"","Alias":"This instruction is used by the alias CMN (extended register)."}, +{"Name":"ADDS (extended register)","Bits":"1|0|1|0|1|0|1|1|0|0|1|Rm:5|option:3|imm3:3|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADDS , , {, {#}}","Code":"","Alias":"This instruction is used by the alias CMN (extended register)."}, +{"Name":"ADDS (immediate)","Bits":"0|0|1|1|0|0|0|1|shift:2|imm12:12|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADDS , , #{, }","Code":"","Alias":"This instruction is used by the alias CMN (immediate)."}, +{"Name":"ADDS (immediate)","Bits":"1|0|1|1|0|0|0|1|shift:2|imm12:12|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADDS , , #{, }","Code":"","Alias":"This instruction is used by the alias CMN (immediate)."}, +{"Name":"ADDS (shifted register)","Bits":"0|0|1|0|1|0|1|1|shift:2|0|Rm:5|imm6:6|Rn:5|Rd:5","Arch":"32-bit variant","Syntax":"ADDS , , {, #}","Code":"","Alias":"This instruction is used by the alias CMN (shifted register)."}, +{"Name":"ADDS (shifted register)","Bits":"1|0|1|0|1|0|1|1|shift:2|0|Rm:5|imm6:6|Rn:5|Rd:5","Arch":"64-bit variant","Syntax":"ADDS , , {, #}","Code":"","Alias":"This instruction is used by the alias CMN (shifted register)."}, +{"Name":"ADR","Bits":"0|immlo:2|1|0|0|0|0|immhi:19|Rd:5","Arch":"Literal variant","Syntax":"ADR ,