// Inferno utils/5l/asm.c // https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package arm64 import ( "cmd/internal/objabi" "cmd/internal/sys" "cmd/link/internal/ld" "cmd/link/internal/loader" "cmd/link/internal/sym" "debug/elf" "fmt" "log" "sync" ) func gentext2(ctxt *ld.Link, ldr *loader.Loader) { initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) if initfunc == nil { return } o := func(op uint32) { initfunc.AddUint32(ctxt.Arch, op) } // 0000000000000000 : // 0: 90000000 adrp x0, 0 // 0: R_AARCH64_ADR_PREL_PG_HI21 local.moduledata // 4: 91000000 add x0, x0, #0x0 // 4: R_AARCH64_ADD_ABS_LO12_NC local.moduledata o(0x90000000) o(0x91000000) rel := loader.Reloc{ Off: 0, Size: 8, Type: objabi.R_ADDRARM64, Sym: ctxt.Moduledata2, } initfunc.AddReloc(rel) // 8: 14000000 b 0 // 8: R_AARCH64_CALL26 runtime.addmoduledata o(0x14000000) rel2 := loader.Reloc{ Off: 8, Size: 4, Type: objabi.R_CALLARM64, // Really should be R_AARCH64_JUMP26 but doesn't seem to make any difference Sym: addmoduledata, } initfunc.AddReloc(rel2) } func adddynrel2(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc2, rIdx int) bool { targ := r.Sym() var targType sym.SymKind if targ != 0 { targType = ldr.SymType(targ) } switch r.Type() { default: if r.Type() >= objabi.ElfRelocOffset { ldr.Errorf(s, "unexpected relocation type %d (%s)", r.Type(), sym.RelocName(target.Arch, r.Type())) return false } // Handle relocations found in ELF object files. case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_PREL32): if targType == sym.SDYNIMPORT { ldr.Errorf(s, "unexpected R_AARCH64_PREL32 relocation for dynamic symbol %s", ldr.SymName(targ)) } // TODO(mwhudson): the test of VisibilityHidden here probably doesn't make // sense and should be removed when someone has thought about it properly. if (targType == 0 || targType == sym.SXREF) && !ldr.AttrVisibilityHidden(targ) { ldr.Errorf(s, "unknown symbol %s in pcrel", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_PCREL) su.SetRelocAdd(rIdx, r.Add()+4) return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_PREL64): if targType == sym.SDYNIMPORT { ldr.Errorf(s, "unexpected R_AARCH64_PREL64 relocation for dynamic symbol %s", ldr.SymName(targ)) } if targType == 0 || targType == sym.SXREF { ldr.Errorf(s, "unknown symbol %s in pcrel", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_PCREL) su.SetRelocAdd(rIdx, r.Add()+8) return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_CALL26), objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_JUMP26): if targType == sym.SDYNIMPORT { addpltsym2(target, ldr, syms, targ) su := ldr.MakeSymbolUpdater(s) su.SetRelocSym(rIdx, syms.PLT2) su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymPlt(targ))) } if (targType == 0 || targType == sym.SXREF) && !ldr.AttrVisibilityHidden(targ) { ldr.Errorf(s, "unknown symbol %s in callarm64", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_CALLARM64) return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADR_GOT_PAGE), objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LD64_GOT_LO12_NC): if targType != sym.SDYNIMPORT { // have symbol // TODO: turn LDR of GOT entry into ADR of symbol itself } // fall back to using GOT // TODO: just needs relocation, no need to put in .dynsym addgotsym2(target, ldr, syms, targ) su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_ARM64_GOT) su.SetRelocSym(rIdx, syms.GOT2) su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADR_PREL_PG_HI21), objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADD_ABS_LO12_NC): if targType == sym.SDYNIMPORT { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } if targType == 0 || targType == sym.SXREF { ldr.Errorf(s, "unknown symbol %s", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_ARM64_PCREL) return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ABS64): if targType == sym.SDYNIMPORT { ldr.Errorf(s, "unexpected R_AARCH64_ABS64 relocation for dynamic symbol %s", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_ADDR) if target.IsPIE() && target.IsInternal() { // For internal linking PIE, this R_ADDR relocation cannot // be resolved statically. We need to generate a dynamic // relocation. Let the code below handle it. break } return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST8_ABS_LO12_NC): if targType == sym.SDYNIMPORT { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_ARM64_LDST8) return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST32_ABS_LO12_NC): if targType == sym.SDYNIMPORT { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_ARM64_LDST32) return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST64_ABS_LO12_NC): if targType == sym.SDYNIMPORT { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_ARM64_LDST64) return true case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST128_ABS_LO12_NC): if targType == sym.SDYNIMPORT { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } su := ldr.MakeSymbolUpdater(s) su.SetRelocType(rIdx, objabi.R_ARM64_LDST128) return true } // Reread the reloc to incorporate any changes in type above. relocs := ldr.Relocs(s) r = relocs.At2(rIdx) switch r.Type() { case objabi.R_CALL, objabi.R_PCREL, objabi.R_CALLARM64: if targType != sym.SDYNIMPORT { // nothing to do, the relocation will be laid out in reloc return true } if target.IsExternal() { // External linker will do this relocation. return true } case objabi.R_ADDR: if ldr.SymType(s) == sym.STEXT && target.IsElf() { // The code is asking for the address of an external // function. We provide it with the address of the // correspondent GOT symbol. addgotsym2(target, ldr, syms, targ) su := ldr.MakeSymbolUpdater(s) su.SetRelocSym(rIdx, syms.GOT2) su.SetRelocAdd(rIdx, r.Add()+int64(ldr.SymGot(targ))) return true } // Process dynamic relocations for the data sections. if target.IsPIE() && target.IsInternal() { // When internally linking, generate dynamic relocations // for all typical R_ADDR relocations. The exception // are those R_ADDR that are created as part of generating // the dynamic relocations and must be resolved statically. // // There are three phases relevant to understanding this: // // dodata() // we are here // address() // symbol address assignment // reloc() // resolution of static R_ADDR relocs // // At this point symbol addresses have not been // assigned yet (as the final size of the .rela section // will affect the addresses), and so we cannot write // the Elf64_Rela.r_offset now. Instead we delay it // until after the 'address' phase of the linker is // complete. We do this via Addaddrplus, which creates // a new R_ADDR relocation which will be resolved in // the 'reloc' phase. // // These synthetic static R_ADDR relocs must be skipped // now, or else we will be caught in an infinite loop // of generating synthetic relocs for our synthetic // relocs. // // Furthermore, the rela sections contain dynamic // relocations with R_ADDR relocations on // Elf64_Rela.r_offset. This field should contain the // symbol offset as determined by reloc(), not the // final dynamically linked address as a dynamic // relocation would provide. switch ldr.SymName(s) { case ".dynsym", ".rela", ".rela.plt", ".got.plt", ".dynamic": return false } } else { // Either internally linking a static executable, // in which case we can resolve these relocations // statically in the 'reloc' phase, or externally // linking, in which case the relocation will be // prepared in the 'reloc' phase and passed to the // external linker in the 'asmb' phase. if ldr.SymType(s) != sym.SDATA && ldr.SymType(s) != sym.SRODATA { break } } if target.IsElf() { // Generate R_AARCH64_RELATIVE relocations for best // efficiency in the dynamic linker. // // As noted above, symbol addresses have not been // assigned yet, so we can't generate the final reloc // entry yet. We ultimately want: // // r_offset = s + r.Off // r_info = R_AARCH64_RELATIVE // r_addend = targ + r.Add // // The dynamic linker will set *offset = base address + // addend. // // AddAddrPlus is used for r_offset and r_addend to // generate new R_ADDR relocations that will update // these fields in the 'reloc' phase. rela := ldr.MakeSymbolUpdater(syms.Rela2) rela.AddAddrPlus(target.Arch, s, int64(r.Off())) if r.Siz() == 8 { rela.AddUint64(target.Arch, ld.ELF64_R_INFO(0, uint32(elf.R_AARCH64_RELATIVE))) } else { ldr.Errorf(s, "unexpected relocation for dynamic symbol %s", ldr.SymName(targ)) } rela.AddAddrPlus(target.Arch, targ, int64(r.Add())) // Not mark r done here. So we still apply it statically, // so in the file content we'll also have the right offset // to the relocation target. So it can be examined statically // (e.g. go version). return true } } return false } func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool { ctxt.Out.Write64(uint64(sectoff)) elfsym := ld.ElfSymForReloc(ctxt, r.Xsym) switch r.Type { default: return false case objabi.R_ADDR, objabi.R_DWARFSECREF: switch r.Siz { case 4: ctxt.Out.Write64(uint64(elf.R_AARCH64_ABS32) | uint64(elfsym)<<32) case 8: ctxt.Out.Write64(uint64(elf.R_AARCH64_ABS64) | uint64(elfsym)<<32) default: return false } case objabi.R_ADDRARM64: // two relocations: R_AARCH64_ADR_PREL_PG_HI21 and R_AARCH64_ADD_ABS_LO12_NC ctxt.Out.Write64(uint64(elf.R_AARCH64_ADR_PREL_PG_HI21) | uint64(elfsym)<<32) ctxt.Out.Write64(uint64(r.Xadd)) ctxt.Out.Write64(uint64(sectoff + 4)) ctxt.Out.Write64(uint64(elf.R_AARCH64_ADD_ABS_LO12_NC) | uint64(elfsym)<<32) case objabi.R_ARM64_TLS_LE: ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSLE_MOVW_TPREL_G0) | uint64(elfsym)<<32) case objabi.R_ARM64_TLS_IE: ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) | uint64(elfsym)<<32) ctxt.Out.Write64(uint64(r.Xadd)) ctxt.Out.Write64(uint64(sectoff + 4)) ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) | uint64(elfsym)<<32) case objabi.R_ARM64_GOTPCREL: ctxt.Out.Write64(uint64(elf.R_AARCH64_ADR_GOT_PAGE) | uint64(elfsym)<<32) ctxt.Out.Write64(uint64(r.Xadd)) ctxt.Out.Write64(uint64(sectoff + 4)) ctxt.Out.Write64(uint64(elf.R_AARCH64_LD64_GOT_LO12_NC) | uint64(elfsym)<<32) case objabi.R_CALLARM64: if r.Siz != 4 { return false } ctxt.Out.Write64(uint64(elf.R_AARCH64_CALL26) | uint64(elfsym)<<32) } ctxt.Out.Write64(uint64(r.Xadd)) return true } func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool { var v uint32 rs := r.Xsym if rs.Type == sym.SHOSTOBJ || r.Type == objabi.R_CALLARM64 || r.Type == objabi.R_ADDRARM64 { if rs.Dynid < 0 { ld.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type) return false } v = uint32(rs.Dynid) v |= 1 << 27 // external relocation } else { v = uint32(rs.Sect.Extnum) if v == 0 { ld.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Sect.Name, rs.Type, rs.Type) return false } } switch r.Type { default: return false case objabi.R_ADDR: v |= ld.MACHO_ARM64_RELOC_UNSIGNED << 28 case objabi.R_CALLARM64: if r.Xadd != 0 { ld.Errorf(s, "ld64 doesn't allow BR26 reloc with non-zero addend: %s+%d", rs.Name, r.Xadd) } v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_BRANCH26 << 28 case objabi.R_ADDRARM64: r.Siz = 4 // Two relocation entries: MACHO_ARM64_RELOC_PAGEOFF12 MACHO_ARM64_RELOC_PAGE21 // if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND. if r.Xadd != 0 { out.Write32(uint32(sectoff + 4)) out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) } out.Write32(uint32(sectoff + 4)) out.Write32(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25)) if r.Xadd != 0 { out.Write32(uint32(sectoff)) out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff)) } v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28 } switch r.Siz { default: return false case 1: v |= 0 << 25 case 2: v |= 1 << 25 case 4: v |= 2 << 25 case 8: v |= 3 << 25 } out.Write32(uint32(sectoff)) out.Write32(v) return true } func archreloc2(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loader.Reloc2, rr *loader.ExtReloc, s loader.Sym, val int64) (int64, bool, bool) { const extRelocNeeded = true const extRelocNotNeeded = false const isOk = true rs := ldr.ResolveABIAlias(r.Sym()) if target.IsExternal() { switch r.Type() { default: case objabi.R_ARM64_GOTPCREL, objabi.R_ADDRARM64: // set up addend for eventual relocation via outer symbol. rs, off := ld.FoldSubSymbolOffset(ldr, rs) rr.Xadd = r.Add() + off rst := ldr.SymType(rs) if rst != sym.SHOSTOBJ && rst != sym.SDYNIMPORT && ldr.SymSect(rs) == nil { ldr.Errorf(s, "missing section for %s", ldr.SymName(rs)) } rr.Xsym = rs // Note: ld64 currently has a bug that any non-zero addend for BR26 relocation // will make the linking fail because it thinks the code is not PIC even though // the BR26 relocation should be fully resolved at link time. // That is the reason why the next if block is disabled. When the bug in ld64 // is fixed, we can enable this block and also enable duff's device in cmd/7g. if false && target.IsDarwin() { var o0, o1 uint32 if target.IsBigEndian() { o0 = uint32(val >> 32) o1 = uint32(val) } else { o0 = uint32(val) o1 = uint32(val >> 32) } // Mach-O wants the addend to be encoded in the instruction // Note that although Mach-O supports ARM64_RELOC_ADDEND, it // can only encode 24-bit of signed addend, but the instructions // supports 33-bit of signed addend, so we always encode the // addend in place. o0 |= (uint32((rr.Xadd>>12)&3) << 29) | (uint32((rr.Xadd>>12>>2)&0x7ffff) << 5) o1 |= uint32(rr.Xadd&0xfff) << 10 rr.Xadd = 0 // when laid out, the instruction order must always be o1, o2. if target.IsBigEndian() { val = int64(o0)<<32 | int64(o1) } else { val = int64(o1)<<32 | int64(o0) } } return val, extRelocNeeded, isOk case objabi.R_CALLARM64, objabi.R_ARM64_TLS_LE, objabi.R_ARM64_TLS_IE: rr.Xsym = rs rr.Xadd = r.Add() return val, extRelocNeeded, isOk } } switch r.Type() { case objabi.R_ADDRARM64: t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) if t >= 1<<32 || t < -1<<32 { ldr.Errorf(s, "program too large, address relocation distance = %d", t) } var o0, o1 uint32 if target.IsBigEndian() { o0 = uint32(val >> 32) o1 = uint32(val) } else { o0 = uint32(val) o1 = uint32(val >> 32) } o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) o1 |= uint32(t&0xfff) << 10 // when laid out, the instruction order must always be o1, o2. if target.IsBigEndian() { return int64(o0)<<32 | int64(o1), extRelocNotNeeded, true } return int64(o1)<<32 | int64(o0), extRelocNotNeeded, true case objabi.R_ARM64_TLS_LE: if target.IsDarwin() { ldr.Errorf(s, "TLS reloc on unsupported OS %v", target.HeadType) } // The TCB is two pointers. This is not documented anywhere, but is // de facto part of the ABI. v := ldr.SymValue(rs) + int64(2*target.Arch.PtrSize) if v < 0 || v >= 32678 { ldr.Errorf(s, "TLS offset out of range %d", v) } return val | (v << 5), extRelocNeeded, true case objabi.R_ARM64_TLS_IE: if target.IsPIE() && target.IsElf() { // We are linking the final executable, so we // can optimize any TLS IE relocation to LE. if !target.IsLinux() { ldr.Errorf(s, "TLS reloc on unsupported OS %v", target.HeadType) } // The TCB is two pointers. This is not documented anywhere, but is // de facto part of the ABI. v := ldr.SymAddr(rs) + int64(2*target.Arch.PtrSize) + r.Add() if v < 0 || v >= 32678 { ldr.Errorf(s, "TLS offset out of range %d", v) } var o0, o1 uint32 if target.IsBigEndian() { o0 = uint32(val >> 32) o1 = uint32(val) } else { o0 = uint32(val) o1 = uint32(val >> 32) } // R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 // turn ADRP to MOVZ o0 = 0xd2a00000 | uint32(o0&0x1f) | (uint32((v>>16)&0xffff) << 5) // R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC // turn LD64 to MOVK if v&3 != 0 { ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", v) } o1 = 0xf2800000 | uint32(o1&0x1f) | (uint32(v&0xffff) << 5) // when laid out, the instruction order must always be o0, o1. if target.IsBigEndian() { return int64(o0)<<32 | int64(o1), extRelocNotNeeded, isOk } return int64(o1)<<32 | int64(o0), extRelocNotNeeded, isOk } else { log.Fatalf("cannot handle R_ARM64_TLS_IE (sym %s) when linking internally", ldr.SymName(s)) } case objabi.R_CALLARM64: var t int64 if ldr.SymType(rs) == sym.SDYNIMPORT { t = (ldr.SymAddr(syms.PLT2) + r.Add()) - (ldr.SymValue(s) + int64(r.Off())) } else { t = (ldr.SymAddr(rs) + r.Add()) - (ldr.SymValue(s) + int64(r.Off())) } if t >= 1<<27 || t < -1<<27 { ldr.Errorf(s, "program too large, call relocation distance = %d", t) } return val | ((t >> 2) & 0x03ffffff), extRelocNotNeeded, true case objabi.R_ARM64_GOT: sData := ldr.Data(s) if sData[r.Off()+3]&0x9f == 0x90 { // R_AARCH64_ADR_GOT_PAGE // patch instruction: adrp t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) if t >= 1<<32 || t < -1<<32 { ldr.Errorf(s, "program too large, address relocation distance = %d", t) } var o0 uint32 o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) return val | int64(o0), extRelocNotNeeded, isOk } else if sData[r.Off()+3] == 0xf9 { // R_AARCH64_LD64_GOT_LO12_NC // patch instruction: ldr t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) if t&7 != 0 { ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LD64_GOT_LO12_NC", t) } var o1 uint32 o1 |= uint32(t&0xfff) << (10 - 3) return val | int64(uint64(o1)), extRelocNotNeeded, isOk } else { ldr.Errorf(s, "unsupported instruction for %v R_GOTARM64", sData[r.Off():r.Off()+4]) } case objabi.R_ARM64_PCREL: sData := ldr.Data(s) if sData[r.Off()+3]&0x9f == 0x90 { // R_AARCH64_ADR_PREL_PG_HI21 // patch instruction: adrp t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) if t >= 1<<32 || t < -1<<32 { ldr.Errorf(s, "program too large, address relocation distance = %d", t) } o0 := (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5) return val | int64(o0), extRelocNotNeeded, isOk } else if sData[r.Off()+3]&0x91 == 0x91 { // R_AARCH64_ADD_ABS_LO12_NC // patch instruction: add t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) o1 := uint32(t&0xfff) << 10 return val | int64(o1), extRelocNotNeeded, isOk } else { ldr.Errorf(s, "unsupported instruction for %v R_PCRELARM64", sData[r.Off():r.Off()+4]) } case objabi.R_ARM64_LDST8: t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) o0 := uint32(t&0xfff) << 10 return val | int64(o0), extRelocNotNeeded, true case objabi.R_ARM64_LDST32: t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) if t&3 != 0 { ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST32_ABS_LO12_NC", t) } o0 := (uint32(t&0xfff) >> 2) << 10 return val | int64(o0), extRelocNotNeeded, true case objabi.R_ARM64_LDST64: t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) if t&7 != 0 { ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST64_ABS_LO12_NC", t) } o0 := (uint32(t&0xfff) >> 3) << 10 return val | int64(o0), extRelocNotNeeded, true case objabi.R_ARM64_LDST128: t := ldr.SymAddr(rs) + r.Add() - ((ldr.SymValue(s) + int64(r.Off())) &^ 0xfff) if t&15 != 0 { ldr.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST128_ABS_LO12_NC", t) } o0 := (uint32(t&0xfff) >> 4) << 10 return val | int64(o0), extRelocNotNeeded, true } return val, false, false } func archrelocvariant(target *ld.Target, syms *ld.ArchSyms, r *sym.Reloc, s *sym.Symbol, t int64) int64 { log.Fatalf("unexpected relocation variant") return -1 } func elfsetupplt(ctxt *ld.Link, plt, gotplt *loader.SymbolBuilder, dynamic loader.Sym) { if plt.Size() == 0 { // stp x16, x30, [sp, #-16]! // identifying information plt.AddUint32(ctxt.Arch, 0xa9bf7bf0) // the following two instructions (adrp + ldr) load *got[2] into x17 // adrp x16, &got[0] plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 16, objabi.R_ARM64_GOT, 4) plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x90000010) // is the offset value of &got[2] to &got[0], the same below // ldr x17, [x16, ] plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 16, objabi.R_ARM64_GOT, 4) plt.SetUint32(ctxt.Arch, plt.Size()-4, 0xf9400211) // add x16, x16, plt.AddSymRef(ctxt.Arch, gotplt.Sym(), 16, objabi.R_ARM64_PCREL, 4) plt.SetUint32(ctxt.Arch, plt.Size()-4, 0x91000210) // br x17 plt.AddUint32(ctxt.Arch, 0xd61f0220) // 3 nop for place holder plt.AddUint32(ctxt.Arch, 0xd503201f) plt.AddUint32(ctxt.Arch, 0xd503201f) plt.AddUint32(ctxt.Arch, 0xd503201f) // check gotplt.size == 0 if gotplt.Size() != 0 { ctxt.Errorf(gotplt.Sym(), "got.plt is not empty at the very beginning") } gotplt.AddAddrPlus(ctxt.Arch, dynamic, 0) gotplt.AddUint64(ctxt.Arch, 0) gotplt.AddUint64(ctxt.Arch, 0) } } func addpltsym2(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { if ldr.SymPlt(s) >= 0 { return } ld.Adddynsym2(ldr, target, syms, s) if target.IsElf() { plt := ldr.MakeSymbolUpdater(syms.PLT2) gotplt := ldr.MakeSymbolUpdater(syms.GOTPLT2) rela := ldr.MakeSymbolUpdater(syms.RelaPLT2) if plt.Size() == 0 { panic("plt is not set up") } // adrp x16, &got.plt[0] plt.AddAddrPlus4(target.Arch, gotplt.Sym(), gotplt.Size()) plt.SetUint32(target.Arch, plt.Size()-4, 0x90000010) relocs := plt.Relocs() plt.SetRelocType(relocs.Count()-1, objabi.R_ARM64_GOT) // is the offset value of &got.plt[n] to &got.plt[0] // ldr x17, [x16, ] plt.AddAddrPlus4(target.Arch, gotplt.Sym(), gotplt.Size()) plt.SetUint32(target.Arch, plt.Size()-4, 0xf9400211) relocs = plt.Relocs() plt.SetRelocType(relocs.Count()-1, objabi.R_ARM64_GOT) // add x16, x16, plt.AddAddrPlus4(target.Arch, gotplt.Sym(), gotplt.Size()) plt.SetUint32(target.Arch, plt.Size()-4, 0x91000210) relocs = plt.Relocs() plt.SetRelocType(relocs.Count()-1, objabi.R_ARM64_PCREL) // br x17 plt.AddUint32(target.Arch, 0xd61f0220) // add to got.plt: pointer to plt[0] gotplt.AddAddrPlus(target.Arch, plt.Sym(), 0) // rela rela.AddAddrPlus(target.Arch, gotplt.Sym(), gotplt.Size()-8) sDynid := ldr.SymDynid(s) rela.AddUint64(target.Arch, ld.ELF64_R_INFO(uint32(sDynid), uint32(elf.R_AARCH64_JUMP_SLOT))) rela.AddUint64(target.Arch, 0) ldr.SetPlt(s, int32(plt.Size()-16)) } else { ldr.Errorf(s, "addpltsym: unsupported binary format") } } func addgotsym2(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym) { if ldr.SymGot(s) >= 0 { return } ld.Adddynsym2(ldr, target, syms, s) got := ldr.MakeSymbolUpdater(syms.GOT2) ldr.SetGot(s, int32(got.Size())) got.AddUint64(target.Arch, 0) if target.IsElf() { rela := ldr.MakeSymbolUpdater(syms.Rela2) rela.AddAddrPlus(target.Arch, got.Sym(), int64(ldr.SymGot(s))) rela.AddUint64(target.Arch, ld.ELF64_R_INFO(uint32(ldr.SymDynid(s)), uint32(elf.R_AARCH64_GLOB_DAT))) rela.AddUint64(target.Arch, 0) } else { ldr.Errorf(s, "addgotsym: unsupported binary format") } } func asmb(ctxt *ld.Link, _ *loader.Loader) { if ctxt.IsELF { ld.Asmbelfsetup() } var wg sync.WaitGroup sect := ld.Segtext.Sections[0] offset := sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff ld.WriteParallel(&wg, ld.Codeblk, ctxt, offset, sect.Vaddr, sect.Length) for _, sect := range ld.Segtext.Sections[1:] { offset := sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff ld.WriteParallel(&wg, ld.Datblk, ctxt, offset, sect.Vaddr, sect.Length) } if ld.Segrodata.Filelen > 0 { ld.WriteParallel(&wg, ld.Datblk, ctxt, ld.Segrodata.Fileoff, ld.Segrodata.Vaddr, ld.Segrodata.Filelen) } if ld.Segrelrodata.Filelen > 0 { ld.WriteParallel(&wg, ld.Datblk, ctxt, ld.Segrelrodata.Fileoff, ld.Segrelrodata.Vaddr, ld.Segrelrodata.Filelen) } ld.WriteParallel(&wg, ld.Datblk, ctxt, ld.Segdata.Fileoff, ld.Segdata.Vaddr, ld.Segdata.Filelen) ld.WriteParallel(&wg, ld.Dwarfblk, ctxt, ld.Segdwarf.Fileoff, ld.Segdwarf.Vaddr, ld.Segdwarf.Filelen) wg.Wait() } func asmb2(ctxt *ld.Link) { machlink := uint32(0) if ctxt.HeadType == objabi.Hdarwin { machlink = uint32(ld.Domacholink(ctxt)) } /* output symbol table */ ld.Symsize = 0 ld.Lcsize = 0 symo := uint32(0) if !*ld.FlagS { // TODO: rationalize switch ctxt.HeadType { default: if ctxt.IsELF { symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound))) } case objabi.Hplan9: symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) case objabi.Hdarwin: symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink)) } ctxt.Out.SeekSet(int64(symo)) switch ctxt.HeadType { default: if ctxt.IsELF { ld.Asmelfsym(ctxt) ctxt.Out.Write(ld.Elfstrdat) if ctxt.LinkMode == ld.LinkExternal { ld.Elfemitreloc(ctxt) } } case objabi.Hplan9: ld.Asmplan9sym(ctxt) sym := ctxt.Syms.Lookup("pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) ctxt.Out.Write(sym.P) } case objabi.Hdarwin: if ctxt.LinkMode == ld.LinkExternal { ld.Machoemitreloc(ctxt) } } } ctxt.Out.SeekSet(0) switch ctxt.HeadType { default: case objabi.Hplan9: /* plan 9 */ ctxt.Out.Write32(0x647) /* magic */ ctxt.Out.Write32(uint32(ld.Segtext.Filelen)) /* sizes */ ctxt.Out.Write32(uint32(ld.Segdata.Filelen)) ctxt.Out.Write32(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) ctxt.Out.Write32(uint32(ld.Symsize)) /* nsyms */ ctxt.Out.Write32(uint32(ld.Entryvalue(ctxt))) /* va of entry */ ctxt.Out.Write32(0) ctxt.Out.Write32(uint32(ld.Lcsize)) case objabi.Hlinux, objabi.Hfreebsd, objabi.Hnetbsd, objabi.Hopenbsd: ld.Asmbelf(ctxt, int64(symo)) case objabi.Hdarwin: ld.Asmbmacho(ctxt) } if *ld.FlagC { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen) fmt.Printf("symsize=%d\n", ld.Symsize) fmt.Printf("lcsize=%d\n", ld.Lcsize) fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize)) } }