2015-03-08 04:32:55 -04:00
|
|
|
// Inferno utils/5l/asm.c
|
2016-08-28 17:04:46 -07:00
|
|
|
// https://bitbucket.org/inferno-os/inferno-os/src/default/utils/5l/asm.c
|
2015-03-08 04:32:55 -04:00
|
|
|
//
|
|
|
|
|
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
|
|
|
|
|
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
|
|
|
|
|
// Portions Copyright © 1997-1999 Vita Nuova Limited
|
|
|
|
|
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
|
|
|
|
|
// Portions Copyright © 2004,2006 Bruce Ellis
|
|
|
|
|
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
|
|
|
|
|
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
|
2016-04-10 14:32:26 -07:00
|
|
|
// Portions Copyright © 2009 The Go Authors. All rights reserved.
|
2015-03-08 04:32:55 -04:00
|
|
|
//
|
|
|
|
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
|
// of this software and associated documentation files (the "Software"), to deal
|
|
|
|
|
// in the Software without restriction, including without limitation the rights
|
|
|
|
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
|
// copies of the Software, and to permit persons to whom the Software is
|
|
|
|
|
// furnished to do so, subject to the following conditions:
|
|
|
|
|
//
|
|
|
|
|
// The above copyright notice and this permission notice shall be included in
|
|
|
|
|
// all copies or substantial portions of the Software.
|
|
|
|
|
//
|
|
|
|
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
|
// THE SOFTWARE.
|
|
|
|
|
|
2015-05-21 13:28:10 -04:00
|
|
|
package arm64
|
2015-03-08 04:32:55 -04:00
|
|
|
|
|
|
|
|
import (
|
2017-04-18 12:53:25 -07:00
|
|
|
"cmd/internal/objabi"
|
2017-09-30 21:10:49 +00:00
|
|
|
"cmd/internal/sys"
|
2015-05-21 13:28:10 -04:00
|
|
|
"cmd/link/internal/ld"
|
2017-10-04 17:54:04 -04:00
|
|
|
"cmd/link/internal/sym"
|
2017-10-06 16:01:02 -04:00
|
|
|
"debug/elf"
|
2015-04-03 04:37:18 -04:00
|
|
|
"encoding/binary"
|
2015-03-08 04:32:55 -04:00
|
|
|
"fmt"
|
|
|
|
|
"log"
|
|
|
|
|
)
|
|
|
|
|
|
2016-08-21 13:52:23 -04:00
|
|
|
func gentext(ctxt *ld.Link) {
|
2016-08-25 21:06:10 -04:00
|
|
|
if !ctxt.DynlinkingGo() {
|
2015-06-24 22:31:24 +12:00
|
|
|
return
|
|
|
|
|
}
|
cmd/link: use ctxt.{Lookup,ROLookup} in favour of function versions of same
Done with two eg templates:
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linklookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.Lookup(name, v)
}
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linkrlookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.ROLookup(name, v)
}
Change-Id: I00647dbf62294557bd24c29ad1f108fc786335f1
Reviewed-on: https://go-review.googlesource.com/29343
Run-TryBot: Michael Hudson-Doyle <michael.hudson@canonical.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-09-20 15:06:08 +12:00
|
|
|
addmoduledata := ctxt.Syms.Lookup("runtime.addmoduledata", 0)
|
2018-04-27 10:57:14 -04:00
|
|
|
if addmoduledata.Type == sym.STEXT && ctxt.BuildMode != ld.BuildModePlugin {
|
2015-06-24 22:31:24 +12:00
|
|
|
// we're linking a module containing the runtime -> no need for
|
|
|
|
|
// an init function
|
|
|
|
|
return
|
|
|
|
|
}
|
2017-10-04 17:54:04 -04:00
|
|
|
addmoduledata.Attr |= sym.AttrReachable
|
cmd/link: use ctxt.{Lookup,ROLookup} in favour of function versions of same
Done with two eg templates:
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linklookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.Lookup(name, v)
}
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linkrlookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.ROLookup(name, v)
}
Change-Id: I00647dbf62294557bd24c29ad1f108fc786335f1
Reviewed-on: https://go-review.googlesource.com/29343
Run-TryBot: Michael Hudson-Doyle <michael.hudson@canonical.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-09-20 15:06:08 +12:00
|
|
|
initfunc := ctxt.Syms.Lookup("go.link.addmoduledata", 0)
|
2017-10-04 17:54:04 -04:00
|
|
|
initfunc.Type = sym.STEXT
|
|
|
|
|
initfunc.Attr |= sym.AttrLocal
|
|
|
|
|
initfunc.Attr |= sym.AttrReachable
|
2015-06-24 22:31:24 +12:00
|
|
|
o := func(op uint32) {
|
2017-09-30 15:06:44 +00:00
|
|
|
initfunc.AddUint32(ctxt.Arch, op)
|
2015-06-24 22:31:24 +12:00
|
|
|
}
|
|
|
|
|
// 0000000000000000 <local.dso_init>:
|
|
|
|
|
// 0: 90000000 adrp x0, 0 <runtime.firstmoduledata>
|
|
|
|
|
// 0: R_AARCH64_ADR_PREL_PG_HI21 local.moduledata
|
|
|
|
|
// 4: 91000000 add x0, x0, #0x0
|
|
|
|
|
// 4: R_AARCH64_ADD_ABS_LO12_NC local.moduledata
|
|
|
|
|
o(0x90000000)
|
|
|
|
|
o(0x91000000)
|
2017-09-30 15:06:44 +00:00
|
|
|
rel := initfunc.AddRel()
|
2015-06-24 22:31:24 +12:00
|
|
|
rel.Off = 0
|
|
|
|
|
rel.Siz = 8
|
2016-08-21 13:52:23 -04:00
|
|
|
rel.Sym = ctxt.Moduledata
|
2017-04-18 12:53:25 -07:00
|
|
|
rel.Type = objabi.R_ADDRARM64
|
2015-06-24 22:31:24 +12:00
|
|
|
|
2018-04-27 10:57:14 -04:00
|
|
|
// 8: 14000000 b 0 <runtime.addmoduledata>
|
2015-06-24 22:31:24 +12:00
|
|
|
// 8: R_AARCH64_CALL26 runtime.addmoduledata
|
|
|
|
|
o(0x14000000)
|
2017-09-30 15:06:44 +00:00
|
|
|
rel = initfunc.AddRel()
|
2015-06-24 22:31:24 +12:00
|
|
|
rel.Off = 8
|
|
|
|
|
rel.Siz = 4
|
cmd/link: use ctxt.{Lookup,ROLookup} in favour of function versions of same
Done with two eg templates:
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linklookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.Lookup(name, v)
}
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linkrlookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.ROLookup(name, v)
}
Change-Id: I00647dbf62294557bd24c29ad1f108fc786335f1
Reviewed-on: https://go-review.googlesource.com/29343
Run-TryBot: Michael Hudson-Doyle <michael.hudson@canonical.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-09-20 15:06:08 +12:00
|
|
|
rel.Sym = ctxt.Syms.Lookup("runtime.addmoduledata", 0)
|
2017-04-18 12:53:25 -07:00
|
|
|
rel.Type = objabi.R_CALLARM64 // Really should be R_AARCH64_JUMP26 but doesn't seem to make any difference
|
2015-06-24 22:31:24 +12:00
|
|
|
|
2018-04-27 10:57:14 -04:00
|
|
|
if ctxt.BuildMode == ld.BuildModePlugin {
|
|
|
|
|
ctxt.Textp = append(ctxt.Textp, addmoduledata)
|
|
|
|
|
}
|
2016-08-21 13:52:23 -04:00
|
|
|
ctxt.Textp = append(ctxt.Textp, initfunc)
|
cmd/link: use ctxt.{Lookup,ROLookup} in favour of function versions of same
Done with two eg templates:
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linklookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.Lookup(name, v)
}
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linkrlookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.ROLookup(name, v)
}
Change-Id: I00647dbf62294557bd24c29ad1f108fc786335f1
Reviewed-on: https://go-review.googlesource.com/29343
Run-TryBot: Michael Hudson-Doyle <michael.hudson@canonical.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-09-20 15:06:08 +12:00
|
|
|
initarray_entry := ctxt.Syms.Lookup("go.link.addmoduledatainit", 0)
|
2017-10-04 17:54:04 -04:00
|
|
|
initarray_entry.Attr |= sym.AttrReachable
|
|
|
|
|
initarray_entry.Attr |= sym.AttrLocal
|
|
|
|
|
initarray_entry.Type = sym.SINITARR
|
2017-09-30 15:06:44 +00:00
|
|
|
initarray_entry.AddAddr(ctxt.Arch, initfunc)
|
2015-06-24 22:31:24 +12:00
|
|
|
}
|
2015-03-08 14:14:53 +01:00
|
|
|
|
2017-10-04 17:54:04 -04:00
|
|
|
func adddynrel(ctxt *ld.Link, s *sym.Symbol, r *sym.Reloc) bool {
|
2018-05-01 10:48:19 +02:00
|
|
|
targ := r.Sym
|
|
|
|
|
|
2018-09-26 10:12:18 +00:00
|
|
|
switch r.Type {
|
|
|
|
|
default:
|
|
|
|
|
if r.Type >= objabi.ElfRelocOffset {
|
|
|
|
|
ld.Errorf(s, "unexpected relocation type %d (%s)", r.Type, sym.RelocName(ctxt.Arch, r.Type))
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Handle relocations found in ELF object files.
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_PREL32):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
ld.Errorf(s, "unexpected R_AARCH64_PREL32 relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
// TODO(mwhudson): the test of VisibilityHidden here probably doesn't make
|
|
|
|
|
// sense and should be removed when someone has thought about it properly.
|
|
|
|
|
if (targ.Type == 0 || targ.Type == sym.SXREF) && !targ.Attr.VisibilityHidden() {
|
|
|
|
|
ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_PCREL
|
|
|
|
|
r.Add += 4
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_PREL64):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
ld.Errorf(s, "unexpected R_AARCH64_PREL64 relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
if targ.Type == 0 || targ.Type == sym.SXREF {
|
|
|
|
|
ld.Errorf(s, "unknown symbol %s in pcrel", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_PCREL
|
|
|
|
|
r.Add += 8
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_CALL26),
|
|
|
|
|
objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_JUMP26):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
addpltsym(ctxt, targ)
|
|
|
|
|
r.Sym = ctxt.Syms.Lookup(".plt", 0)
|
|
|
|
|
r.Add += int64(targ.Plt())
|
|
|
|
|
}
|
|
|
|
|
if (targ.Type == 0 || targ.Type == sym.SXREF) && !targ.Attr.VisibilityHidden() {
|
|
|
|
|
ld.Errorf(s, "unknown symbol %s in callarm64", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_CALLARM64
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADR_GOT_PAGE),
|
|
|
|
|
objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LD64_GOT_LO12_NC):
|
|
|
|
|
if targ.Type != sym.SDYNIMPORT {
|
|
|
|
|
// have symbol
|
|
|
|
|
// TODO: turn LDR of GOT entry into ADR of symbol itself
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// fall back to using GOT
|
|
|
|
|
// TODO: just needs relocation, no need to put in .dynsym
|
|
|
|
|
addgotsym(ctxt, targ)
|
|
|
|
|
|
|
|
|
|
r.Type = objabi.R_ARM64_GOT
|
|
|
|
|
r.Sym = ctxt.Syms.Lookup(".got", 0)
|
|
|
|
|
r.Add += int64(targ.Got())
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADR_PREL_PG_HI21),
|
|
|
|
|
objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ADD_ABS_LO12_NC):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
ld.Errorf(s, "unexpected relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
if targ.Type == 0 || targ.Type == sym.SXREF {
|
|
|
|
|
ld.Errorf(s, "unknown symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_ARM64_PCREL
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_ABS64):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
ld.Errorf(s, "unexpected R_AARCH64_ABS64 relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_ADDR
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST8_ABS_LO12_NC):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
ld.Errorf(s, "unexpected relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_ARM64_LDST8
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST32_ABS_LO12_NC):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
ld.Errorf(s, "unexpected relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_ARM64_LDST32
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST64_ABS_LO12_NC):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
ld.Errorf(s, "unexpected relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_ARM64_LDST64
|
|
|
|
|
return true
|
2019-05-11 02:21:22 +10:00
|
|
|
|
|
|
|
|
case objabi.ElfRelocOffset + objabi.RelocType(elf.R_AARCH64_LDST128_ABS_LO12_NC):
|
|
|
|
|
if targ.Type == sym.SDYNIMPORT {
|
|
|
|
|
ld.Errorf(s, "unexpected relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
r.Type = objabi.R_ARM64_LDST128
|
|
|
|
|
return true
|
2018-09-26 10:12:18 +00:00
|
|
|
}
|
|
|
|
|
|
2018-05-01 10:48:19 +02:00
|
|
|
switch r.Type {
|
|
|
|
|
case objabi.R_CALL,
|
|
|
|
|
objabi.R_PCREL,
|
|
|
|
|
objabi.R_CALLARM64:
|
|
|
|
|
if targ.Type != sym.SDYNIMPORT {
|
|
|
|
|
// nothing to do, the relocation will be laid out in reloc
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
if ctxt.LinkMode == ld.LinkExternal {
|
|
|
|
|
// External linker will do this relocation.
|
|
|
|
|
return true
|
|
|
|
|
}
|
2018-09-26 10:12:18 +00:00
|
|
|
|
|
|
|
|
case objabi.R_ADDR:
|
|
|
|
|
if s.Type == sym.STEXT && ctxt.IsELF {
|
|
|
|
|
// The code is asking for the address of an external
|
|
|
|
|
// function. We provide it with the address of the
|
|
|
|
|
// correspondent GOT symbol.
|
|
|
|
|
addgotsym(ctxt, targ)
|
|
|
|
|
|
|
|
|
|
r.Sym = ctxt.Syms.Lookup(".got", 0)
|
|
|
|
|
r.Add += int64(targ.Got())
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Process dynamic relocations for the data sections.
|
|
|
|
|
if ctxt.BuildMode == ld.BuildModePIE && ctxt.LinkMode == ld.LinkInternal {
|
|
|
|
|
// When internally linking, generate dynamic relocations
|
|
|
|
|
// for all typical R_ADDR relocations. The exception
|
|
|
|
|
// are those R_ADDR that are created as part of generating
|
|
|
|
|
// the dynamic relocations and must be resolved statically.
|
|
|
|
|
//
|
|
|
|
|
// There are three phases relevant to understanding this:
|
|
|
|
|
//
|
|
|
|
|
// dodata() // we are here
|
|
|
|
|
// address() // symbol address assignment
|
|
|
|
|
// reloc() // resolution of static R_ADDR relocs
|
|
|
|
|
//
|
|
|
|
|
// At this point symbol addresses have not been
|
|
|
|
|
// assigned yet (as the final size of the .rela section
|
|
|
|
|
// will affect the addresses), and so we cannot write
|
|
|
|
|
// the Elf64_Rela.r_offset now. Instead we delay it
|
|
|
|
|
// until after the 'address' phase of the linker is
|
|
|
|
|
// complete. We do this via Addaddrplus, which creates
|
|
|
|
|
// a new R_ADDR relocation which will be resolved in
|
|
|
|
|
// the 'reloc' phase.
|
|
|
|
|
//
|
|
|
|
|
// These synthetic static R_ADDR relocs must be skipped
|
|
|
|
|
// now, or else we will be caught in an infinite loop
|
|
|
|
|
// of generating synthetic relocs for our synthetic
|
|
|
|
|
// relocs.
|
|
|
|
|
//
|
|
|
|
|
// Furthermore, the rela sections contain dynamic
|
|
|
|
|
// relocations with R_ADDR relocations on
|
|
|
|
|
// Elf64_Rela.r_offset. This field should contain the
|
|
|
|
|
// symbol offset as determined by reloc(), not the
|
|
|
|
|
// final dynamically linked address as a dynamic
|
|
|
|
|
// relocation would provide.
|
|
|
|
|
switch s.Name {
|
|
|
|
|
case ".dynsym", ".rela", ".rela.plt", ".got.plt", ".dynamic":
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Either internally linking a static executable,
|
|
|
|
|
// in which case we can resolve these relocations
|
|
|
|
|
// statically in the 'reloc' phase, or externally
|
|
|
|
|
// linking, in which case the relocation will be
|
|
|
|
|
// prepared in the 'reloc' phase and passed to the
|
|
|
|
|
// external linker in the 'asmb' phase.
|
|
|
|
|
if s.Type != sym.SDATA && s.Type != sym.SRODATA {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ctxt.IsELF {
|
|
|
|
|
// TODO: We generate a R_AARCH64_ABS64 relocation for every R_ADDR, even
|
|
|
|
|
// though it would be more efficient (for the dynamic linker) if we
|
|
|
|
|
// generated R_AARCH64_RELATIVE instead.
|
|
|
|
|
ld.Adddynsym(ctxt, targ)
|
|
|
|
|
rela := ctxt.Syms.Lookup(".rela", 0)
|
|
|
|
|
rela.AddAddrPlus(ctxt.Arch, s, int64(r.Off))
|
|
|
|
|
if r.Siz == 8 {
|
|
|
|
|
rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(targ.Dynid), uint32(elf.R_AARCH64_ABS64)))
|
|
|
|
|
} else {
|
|
|
|
|
ld.Errorf(s, "unexpected relocation for dynamic symbol %s", targ.Name)
|
|
|
|
|
}
|
|
|
|
|
rela.AddUint64(ctxt.Arch, uint64(r.Add))
|
|
|
|
|
r.Type = objabi.ElfRelocOffset // ignore during relocsym
|
|
|
|
|
return true
|
|
|
|
|
}
|
2018-05-01 10:48:19 +02:00
|
|
|
}
|
2016-09-05 23:49:53 -04:00
|
|
|
return false
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-04 17:54:04 -04:00
|
|
|
func elfreloc1(ctxt *ld.Link, r *sym.Reloc, sectoff int64) bool {
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Write64(uint64(sectoff))
|
2015-04-03 04:37:11 -04:00
|
|
|
|
2015-10-29 12:17:43 +13:00
|
|
|
elfsym := r.Xsym.ElfsymForReloc()
|
2015-04-03 04:37:11 -04:00
|
|
|
switch r.Type {
|
|
|
|
|
default:
|
2017-08-27 22:00:00 +09:00
|
|
|
return false
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ADDR:
|
2015-04-03 04:37:11 -04:00
|
|
|
switch r.Siz {
|
|
|
|
|
case 4:
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_ABS32) | uint64(elfsym)<<32)
|
2015-04-03 04:37:11 -04:00
|
|
|
case 8:
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_ABS64) | uint64(elfsym)<<32)
|
2015-04-03 04:37:11 -04:00
|
|
|
default:
|
2017-08-27 22:00:00 +09:00
|
|
|
return false
|
2015-04-03 04:37:11 -04:00
|
|
|
}
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ADDRARM64:
|
2015-04-03 04:37:18 -04:00
|
|
|
// two relocations: R_AARCH64_ADR_PREL_PG_HI21 and R_AARCH64_ADD_ABS_LO12_NC
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_ADR_PREL_PG_HI21) | uint64(elfsym)<<32)
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Write64(uint64(r.Xadd))
|
|
|
|
|
ctxt.Out.Write64(uint64(sectoff + 4))
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_ADD_ABS_LO12_NC) | uint64(elfsym)<<32)
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ARM64_TLS_LE:
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSLE_MOVW_TPREL_G0) | uint64(elfsym)<<32)
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ARM64_TLS_IE:
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) | uint64(elfsym)<<32)
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Write64(uint64(r.Xadd))
|
|
|
|
|
ctxt.Out.Write64(uint64(sectoff + 4))
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) | uint64(elfsym)<<32)
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ARM64_GOTPCREL:
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_ADR_GOT_PAGE) | uint64(elfsym)<<32)
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Write64(uint64(r.Xadd))
|
|
|
|
|
ctxt.Out.Write64(uint64(sectoff + 4))
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_LD64_GOT_LO12_NC) | uint64(elfsym)<<32)
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_CALLARM64:
|
2015-04-03 04:37:11 -04:00
|
|
|
if r.Siz != 4 {
|
2017-08-27 22:00:00 +09:00
|
|
|
return false
|
2015-04-03 04:37:11 -04:00
|
|
|
}
|
2017-10-06 16:01:02 -04:00
|
|
|
ctxt.Out.Write64(uint64(elf.R_AARCH64_CALL26) | uint64(elfsym)<<32)
|
2015-04-03 04:37:11 -04:00
|
|
|
|
|
|
|
|
}
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Write64(uint64(r.Xadd))
|
2015-04-03 04:37:11 -04:00
|
|
|
|
2017-08-27 22:00:00 +09:00
|
|
|
return true
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-04 17:54:04 -04:00
|
|
|
func machoreloc1(arch *sys.Arch, out *ld.OutBuf, s *sym.Symbol, r *sym.Reloc, sectoff int64) bool {
|
2015-04-10 21:28:09 -04:00
|
|
|
var v uint32
|
|
|
|
|
|
|
|
|
|
rs := r.Xsym
|
|
|
|
|
|
2018-04-21 13:27:13 +02:00
|
|
|
if rs.Type == sym.SHOSTOBJ || r.Type == objabi.R_CALLARM64 || r.Type == objabi.R_ADDRARM64 {
|
2015-04-10 21:28:09 -04:00
|
|
|
if rs.Dynid < 0 {
|
2017-10-04 17:54:04 -04:00
|
|
|
ld.Errorf(s, "reloc %d (%s) to non-macho symbol %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Type, rs.Type)
|
2017-08-27 22:00:00 +09:00
|
|
|
return false
|
2015-04-10 21:28:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
v = uint32(rs.Dynid)
|
|
|
|
|
v |= 1 << 27 // external relocation
|
|
|
|
|
} else {
|
2015-05-27 12:04:25 +12:00
|
|
|
v = uint32(rs.Sect.Extnum)
|
2015-04-10 21:28:09 -04:00
|
|
|
if v == 0 {
|
2017-10-04 17:54:04 -04:00
|
|
|
ld.Errorf(s, "reloc %d (%s) to symbol %s in non-macho section %s type=%d (%s)", r.Type, sym.RelocName(arch, r.Type), rs.Name, rs.Sect.Name, rs.Type, rs.Type)
|
2017-08-27 22:00:00 +09:00
|
|
|
return false
|
2015-04-10 21:28:09 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch r.Type {
|
|
|
|
|
default:
|
2017-08-27 22:00:00 +09:00
|
|
|
return false
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ADDR:
|
2015-04-10 21:28:09 -04:00
|
|
|
v |= ld.MACHO_ARM64_RELOC_UNSIGNED << 28
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_CALLARM64:
|
2015-04-10 21:28:09 -04:00
|
|
|
if r.Xadd != 0 {
|
2016-09-17 09:39:33 -04:00
|
|
|
ld.Errorf(s, "ld64 doesn't allow BR26 reloc with non-zero addend: %s+%d", rs.Name, r.Xadd)
|
2015-04-10 21:28:09 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
v |= 1 << 24 // pc-relative bit
|
|
|
|
|
v |= ld.MACHO_ARM64_RELOC_BRANCH26 << 28
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ADDRARM64:
|
2015-04-10 21:28:09 -04:00
|
|
|
r.Siz = 4
|
|
|
|
|
// Two relocation entries: MACHO_ARM64_RELOC_PAGEOFF12 MACHO_ARM64_RELOC_PAGE21
|
|
|
|
|
// if r.Xadd is non-zero, add two MACHO_ARM64_RELOC_ADDEND.
|
|
|
|
|
if r.Xadd != 0 {
|
2017-10-01 02:37:20 +00:00
|
|
|
out.Write32(uint32(sectoff + 4))
|
|
|
|
|
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff))
|
2015-04-10 21:28:09 -04:00
|
|
|
}
|
2017-10-01 02:37:20 +00:00
|
|
|
out.Write32(uint32(sectoff + 4))
|
|
|
|
|
out.Write32(v | (ld.MACHO_ARM64_RELOC_PAGEOFF12 << 28) | (2 << 25))
|
2015-04-10 21:28:09 -04:00
|
|
|
if r.Xadd != 0 {
|
2017-10-01 02:37:20 +00:00
|
|
|
out.Write32(uint32(sectoff))
|
|
|
|
|
out.Write32((ld.MACHO_ARM64_RELOC_ADDEND << 28) | (2 << 25) | uint32(r.Xadd&0xffffff))
|
2015-04-10 21:28:09 -04:00
|
|
|
}
|
|
|
|
|
v |= 1 << 24 // pc-relative bit
|
|
|
|
|
v |= ld.MACHO_ARM64_RELOC_PAGE21 << 28
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch r.Siz {
|
|
|
|
|
default:
|
2017-08-27 22:00:00 +09:00
|
|
|
return false
|
2015-04-10 21:28:09 -04:00
|
|
|
case 1:
|
|
|
|
|
v |= 0 << 25
|
|
|
|
|
case 2:
|
|
|
|
|
v |= 1 << 25
|
|
|
|
|
case 4:
|
|
|
|
|
v |= 2 << 25
|
|
|
|
|
case 8:
|
|
|
|
|
v |= 3 << 25
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-01 02:37:20 +00:00
|
|
|
out.Write32(uint32(sectoff))
|
|
|
|
|
out.Write32(v)
|
2017-08-27 22:00:00 +09:00
|
|
|
return true
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
func archreloc(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, val int64) (int64, bool) {
|
2017-10-05 10:20:17 -04:00
|
|
|
if ctxt.LinkMode == ld.LinkExternal {
|
2015-04-03 04:37:11 -04:00
|
|
|
switch r.Type {
|
|
|
|
|
default:
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return val, false
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ARM64_GOTPCREL:
|
2015-11-03 10:23:56 +13:00
|
|
|
var o1, o2 uint32
|
2016-08-21 13:52:23 -04:00
|
|
|
if ctxt.Arch.ByteOrder == binary.BigEndian {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
o1 = uint32(val >> 32)
|
|
|
|
|
o2 = uint32(val)
|
2015-11-03 10:23:56 +13:00
|
|
|
} else {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
o1 = uint32(val)
|
|
|
|
|
o2 = uint32(val >> 32)
|
2015-11-03 10:23:56 +13:00
|
|
|
}
|
|
|
|
|
// Any relocation against a function symbol is redirected to
|
|
|
|
|
// be against a local symbol instead (see putelfsym in
|
|
|
|
|
// symtab.go) but unfortunately the system linker was buggy
|
|
|
|
|
// when confronted with a R_AARCH64_ADR_GOT_PAGE relocation
|
|
|
|
|
// against a local symbol until May 2015
|
|
|
|
|
// (https://sourceware.org/bugzilla/show_bug.cgi?id=18270). So
|
|
|
|
|
// we convert the adrp; ld64 + R_ARM64_GOTPCREL into adrp;
|
|
|
|
|
// add + R_ADDRARM64.
|
2018-10-19 17:42:11 -04:00
|
|
|
if !(r.Sym.IsFileLocal() || r.Sym.Attr.VisibilityHidden() || r.Sym.Attr.Local()) && r.Sym.Type == sym.STEXT && ctxt.DynlinkingGo() {
|
2015-11-03 10:23:56 +13:00
|
|
|
if o2&0xffc00000 != 0xf9400000 {
|
2016-09-17 09:39:33 -04:00
|
|
|
ld.Errorf(s, "R_ARM64_GOTPCREL against unexpected instruction %x", o2)
|
2015-11-03 10:23:56 +13:00
|
|
|
}
|
|
|
|
|
o2 = 0x91000000 | (o2 & 0x000003ff)
|
2017-04-18 12:53:25 -07:00
|
|
|
r.Type = objabi.R_ADDRARM64
|
2015-11-03 10:23:56 +13:00
|
|
|
}
|
2016-08-21 13:52:23 -04:00
|
|
|
if ctxt.Arch.ByteOrder == binary.BigEndian {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
val = int64(o1)<<32 | int64(o2)
|
2015-11-03 10:23:56 +13:00
|
|
|
} else {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
val = int64(o2)<<32 | int64(o1)
|
2015-11-03 10:23:56 +13:00
|
|
|
}
|
|
|
|
|
fallthrough
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ADDRARM64:
|
2017-08-27 22:00:00 +09:00
|
|
|
r.Done = false
|
2015-04-03 04:37:18 -04:00
|
|
|
|
|
|
|
|
// set up addend for eventual relocation via outer symbol.
|
|
|
|
|
rs := r.Sym
|
|
|
|
|
r.Xadd = r.Add
|
|
|
|
|
for rs.Outer != nil {
|
2016-09-17 09:39:33 -04:00
|
|
|
r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer)
|
2015-04-03 04:37:18 -04:00
|
|
|
rs = rs.Outer
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-04 17:54:04 -04:00
|
|
|
if rs.Type != sym.SHOSTOBJ && rs.Type != sym.SDYNIMPORT && rs.Sect == nil {
|
2016-09-17 09:39:33 -04:00
|
|
|
ld.Errorf(s, "missing section for %s", rs.Name)
|
2015-04-03 04:37:18 -04:00
|
|
|
}
|
|
|
|
|
r.Xsym = rs
|
|
|
|
|
|
2015-04-10 21:28:09 -04:00
|
|
|
// Note: ld64 currently has a bug that any non-zero addend for BR26 relocation
|
|
|
|
|
// will make the linking fail because it thinks the code is not PIC even though
|
|
|
|
|
// the BR26 relocation should be fully resolved at link time.
|
|
|
|
|
// That is the reason why the next if block is disabled. When the bug in ld64
|
|
|
|
|
// is fixed, we can enable this block and also enable duff's device in cmd/7g.
|
2017-10-07 13:49:44 -04:00
|
|
|
if false && ctxt.HeadType == objabi.Hdarwin {
|
2015-08-03 14:08:17 +12:00
|
|
|
var o0, o1 uint32
|
|
|
|
|
|
2016-08-21 13:52:23 -04:00
|
|
|
if ctxt.Arch.ByteOrder == binary.BigEndian {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
o0 = uint32(val >> 32)
|
|
|
|
|
o1 = uint32(val)
|
2015-08-03 14:08:17 +12:00
|
|
|
} else {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
o0 = uint32(val)
|
|
|
|
|
o1 = uint32(val >> 32)
|
2015-08-03 14:08:17 +12:00
|
|
|
}
|
2015-04-10 21:28:09 -04:00
|
|
|
// Mach-O wants the addend to be encoded in the instruction
|
|
|
|
|
// Note that although Mach-O supports ARM64_RELOC_ADDEND, it
|
|
|
|
|
// can only encode 24-bit of signed addend, but the instructions
|
|
|
|
|
// supports 33-bit of signed addend, so we always encode the
|
|
|
|
|
// addend in place.
|
|
|
|
|
o0 |= (uint32((r.Xadd>>12)&3) << 29) | (uint32((r.Xadd>>12>>2)&0x7ffff) << 5)
|
|
|
|
|
o1 |= uint32(r.Xadd&0xfff) << 10
|
|
|
|
|
r.Xadd = 0
|
|
|
|
|
|
2015-08-03 14:08:17 +12:00
|
|
|
// when laid out, the instruction order must always be o1, o2.
|
2016-08-21 13:52:23 -04:00
|
|
|
if ctxt.Arch.ByteOrder == binary.BigEndian {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
val = int64(o0)<<32 | int64(o1)
|
2015-08-03 14:08:17 +12:00
|
|
|
} else {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
val = int64(o1)<<32 | int64(o0)
|
2015-08-03 14:08:17 +12:00
|
|
|
}
|
2015-04-10 21:28:09 -04:00
|
|
|
}
|
|
|
|
|
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return val, true
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_CALLARM64,
|
|
|
|
|
objabi.R_ARM64_TLS_LE,
|
|
|
|
|
objabi.R_ARM64_TLS_IE:
|
2017-08-27 22:00:00 +09:00
|
|
|
r.Done = false
|
2015-04-03 04:37:11 -04:00
|
|
|
r.Xsym = r.Sym
|
2015-08-03 14:08:17 +12:00
|
|
|
r.Xadd = r.Add
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return val, true
|
2015-04-03 04:37:11 -04:00
|
|
|
}
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch r.Type {
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_CONST:
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return r.Add, true
|
2018-09-26 10:12:18 +00:00
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_GOTOFF:
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ctxt.Syms.Lookup(".got", 0)), true
|
2018-09-26 10:12:18 +00:00
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ADDRARM64:
|
2016-09-17 09:39:33 -04:00
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
2015-11-13 08:32:33 +00:00
|
|
|
if t >= 1<<32 || t < -1<<32 {
|
2016-09-17 09:39:33 -04:00
|
|
|
ld.Errorf(s, "program too large, address relocation distance = %d", t)
|
2015-11-13 08:32:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var o0, o1 uint32
|
|
|
|
|
|
2016-08-21 13:52:23 -04:00
|
|
|
if ctxt.Arch.ByteOrder == binary.BigEndian {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
o0 = uint32(val >> 32)
|
|
|
|
|
o1 = uint32(val)
|
2015-11-13 08:32:33 +00:00
|
|
|
} else {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
o0 = uint32(val)
|
|
|
|
|
o1 = uint32(val >> 32)
|
2015-11-13 08:32:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5)
|
|
|
|
|
o1 |= uint32(t&0xfff) << 10
|
|
|
|
|
|
|
|
|
|
// when laid out, the instruction order must always be o1, o2.
|
2016-08-21 13:52:23 -04:00
|
|
|
if ctxt.Arch.ByteOrder == binary.BigEndian {
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return int64(o0)<<32 | int64(o1), true
|
2015-11-13 08:32:33 +00:00
|
|
|
}
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return int64(o1)<<32 | int64(o0), true
|
2018-09-26 10:12:18 +00:00
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_ARM64_TLS_LE:
|
2017-08-27 22:00:00 +09:00
|
|
|
r.Done = false
|
2019-04-20 14:39:33 +00:00
|
|
|
if ctxt.HeadType == objabi.Hdarwin {
|
2017-10-07 13:49:44 -04:00
|
|
|
ld.Errorf(s, "TLS reloc on unsupported OS %v", ctxt.HeadType)
|
2015-08-11 14:10:03 +12:00
|
|
|
}
|
|
|
|
|
// The TCB is two pointers. This is not documented anywhere, but is
|
|
|
|
|
// de facto part of the ABI.
|
2017-09-30 21:10:49 +00:00
|
|
|
v := r.Sym.Value + int64(2*ctxt.Arch.PtrSize)
|
2015-08-11 14:10:03 +12:00
|
|
|
if v < 0 || v >= 32678 {
|
2016-09-17 09:39:33 -04:00
|
|
|
ld.Errorf(s, "TLS offset out of range %d", v)
|
2015-08-11 14:10:03 +12:00
|
|
|
}
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return val | (v << 5), true
|
2018-09-26 10:12:18 +00:00
|
|
|
|
|
|
|
|
case objabi.R_ARM64_TLS_IE:
|
|
|
|
|
if ctxt.BuildMode == ld.BuildModePIE && ctxt.IsELF {
|
|
|
|
|
// We are linking the final executable, so we
|
|
|
|
|
// can optimize any TLS IE relocation to LE.
|
|
|
|
|
r.Done = false
|
|
|
|
|
if ctxt.HeadType != objabi.Hlinux {
|
|
|
|
|
ld.Errorf(s, "TLS reloc on unsupported OS %v", ctxt.HeadType)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The TCB is two pointers. This is not documented anywhere, but is
|
|
|
|
|
// de facto part of the ABI.
|
|
|
|
|
v := ld.Symaddr(r.Sym) + int64(2*ctxt.Arch.PtrSize) + r.Add
|
|
|
|
|
if v < 0 || v >= 32678 {
|
|
|
|
|
ld.Errorf(s, "TLS offset out of range %d", v)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var o0, o1 uint32
|
|
|
|
|
if ctxt.Arch.ByteOrder == binary.BigEndian {
|
|
|
|
|
o0 = uint32(val >> 32)
|
|
|
|
|
o1 = uint32(val)
|
|
|
|
|
} else {
|
|
|
|
|
o0 = uint32(val)
|
|
|
|
|
o1 = uint32(val >> 32)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
|
|
|
|
|
// turn ADRP to MOVZ
|
|
|
|
|
o0 = 0xd2a00000 | uint32(o0&0x1f) | (uint32((v>>16)&0xffff) << 5)
|
|
|
|
|
// R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
|
|
|
|
|
// turn LD64 to MOVK
|
|
|
|
|
if v&3 != 0 {
|
|
|
|
|
ld.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", v)
|
|
|
|
|
}
|
|
|
|
|
o1 = 0xf2800000 | uint32(o1&0x1f) | (uint32(v&0xffff) << 5)
|
|
|
|
|
|
|
|
|
|
// when laid out, the instruction order must always be o0, o1.
|
|
|
|
|
if ctxt.Arch.ByteOrder == binary.BigEndian {
|
|
|
|
|
return int64(o0)<<32 | int64(o1), true
|
|
|
|
|
}
|
|
|
|
|
return int64(o1)<<32 | int64(o0), true
|
|
|
|
|
} else {
|
|
|
|
|
log.Fatalf("cannot handle R_ARM64_TLS_IE (sym %s) when linking internally", s.Name)
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.R_CALLARM64:
|
2018-09-26 10:12:18 +00:00
|
|
|
var t int64
|
|
|
|
|
if r.Sym.Type == sym.SDYNIMPORT {
|
|
|
|
|
t = (ld.Symaddr(ctxt.Syms.Lookup(".plt", 0)) + r.Add) - (s.Value + int64(r.Off))
|
|
|
|
|
} else {
|
|
|
|
|
t = (ld.Symaddr(r.Sym) + r.Add) - (s.Value + int64(r.Off))
|
|
|
|
|
}
|
2015-08-03 14:08:17 +12:00
|
|
|
if t >= 1<<27 || t < -1<<27 {
|
2016-09-17 09:39:33 -04:00
|
|
|
ld.Errorf(s, "program too large, call relocation distance = %d", t)
|
2015-08-03 14:08:17 +12:00
|
|
|
}
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return val | ((t >> 2) & 0x03ffffff), true
|
2018-09-26 10:12:18 +00:00
|
|
|
|
|
|
|
|
case objabi.R_ARM64_GOT:
|
|
|
|
|
if s.P[r.Off+3]&0x9f == 0x90 {
|
|
|
|
|
// R_AARCH64_ADR_GOT_PAGE
|
|
|
|
|
// patch instruction: adrp
|
|
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
|
|
|
|
if t >= 1<<32 || t < -1<<32 {
|
|
|
|
|
ld.Errorf(s, "program too large, address relocation distance = %d", t)
|
|
|
|
|
}
|
|
|
|
|
var o0 uint32
|
|
|
|
|
o0 |= (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5)
|
|
|
|
|
return val | int64(o0), true
|
|
|
|
|
} else if s.P[r.Off+3] == 0xf9 {
|
|
|
|
|
// R_AARCH64_LD64_GOT_LO12_NC
|
|
|
|
|
// patch instruction: ldr
|
|
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
|
|
|
|
if t&7 != 0 {
|
|
|
|
|
ld.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LD64_GOT_LO12_NC", t)
|
|
|
|
|
}
|
|
|
|
|
var o1 uint32
|
|
|
|
|
o1 |= uint32(t&0xfff) << (10 - 3)
|
|
|
|
|
return val | int64(uint64(o1)), true
|
|
|
|
|
} else {
|
|
|
|
|
ld.Errorf(s, "unsupported instruction for %v R_GOTARM64", s.P[r.Off:r.Off+4])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case objabi.R_ARM64_PCREL:
|
|
|
|
|
if s.P[r.Off+3]&0x9f == 0x90 {
|
|
|
|
|
// R_AARCH64_ADR_PREL_PG_HI21
|
|
|
|
|
// patch instruction: adrp
|
|
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
|
|
|
|
if t >= 1<<32 || t < -1<<32 {
|
|
|
|
|
ld.Errorf(s, "program too large, address relocation distance = %d", t)
|
|
|
|
|
}
|
|
|
|
|
o0 := (uint32((t>>12)&3) << 29) | (uint32((t>>12>>2)&0x7ffff) << 5)
|
|
|
|
|
return val | int64(o0), true
|
|
|
|
|
} else if s.P[r.Off+3]&0x91 == 0x91 {
|
|
|
|
|
// R_AARCH64_ADD_ABS_LO12_NC
|
|
|
|
|
// patch instruction: add
|
|
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
|
|
|
|
o1 := uint32(t&0xfff) << 10
|
|
|
|
|
return val | int64(o1), true
|
|
|
|
|
} else {
|
|
|
|
|
ld.Errorf(s, "unsupported instruction for %v R_PCRELARM64", s.P[r.Off:r.Off+4])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
case objabi.R_ARM64_LDST8:
|
|
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
|
|
|
|
o0 := uint32(t&0xfff) << 10
|
|
|
|
|
return val | int64(o0), true
|
|
|
|
|
|
|
|
|
|
case objabi.R_ARM64_LDST32:
|
|
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
|
|
|
|
if t&3 != 0 {
|
|
|
|
|
ld.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST32_ABS_LO12_NC", t)
|
|
|
|
|
}
|
|
|
|
|
o0 := (uint32(t&0xfff) >> 2) << 10
|
|
|
|
|
return val | int64(o0), true
|
|
|
|
|
|
|
|
|
|
case objabi.R_ARM64_LDST64:
|
|
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
|
|
|
|
if t&7 != 0 {
|
|
|
|
|
ld.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST64_ABS_LO12_NC", t)
|
|
|
|
|
}
|
|
|
|
|
o0 := (uint32(t&0xfff) >> 3) << 10
|
|
|
|
|
return val | int64(o0), true
|
2019-05-11 02:21:22 +10:00
|
|
|
|
|
|
|
|
case objabi.R_ARM64_LDST128:
|
|
|
|
|
t := ld.Symaddr(r.Sym) + r.Add - ((s.Value + int64(r.Off)) &^ 0xfff)
|
|
|
|
|
if t&15 != 0 {
|
|
|
|
|
ld.Errorf(s, "invalid address: %x for relocation type: R_AARCH64_LDST128_ABS_LO12_NC", t)
|
|
|
|
|
}
|
|
|
|
|
o0 := (uint32(t&0xfff) >> 4) << 10
|
|
|
|
|
return val | int64(o0), true
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
cmd/link: fewer allocs in ld.Arch.Archreloc
Archreloc had this signature:
func(*Link, *sym.Reloc, *sym.Symbol, *int64) bool
The last *int64 argument is used as out parameter.
Passed valus could be allocated on stack, but escape analysis
fails here, leading to high number of unwanted allocs.
If instead 4th arg is passed by value, and modified values is returned,
no problems with allocations arise:
func(*Link, *sym.Reloc, *sym.Symbol, int64) (int64, bool)
There are 2 benefits:
1. code becomes more readable.
2. less allocations.
For linking "hello world" example from net/http:
name old time/op new time/op delta
Linker-4 530ms ± 2% 520ms ± 2% -1.83% (p=0.001 n=17+16)
It's top 1 in alloc_objects from memprofile:
flat flat% sum% cum cum%
229379 33.05% 33.05% 229379 33.05% cmd/link/internal/ld.relocsym
...
list relocsym:
229379 229379 (flat, cum) 33.05% of Total
229379 229379 183: var o int64
After the patch, ~230k of int64 allocs (~ 1.75mb) removed.
Passes toolshash-check (toolstash cmp).
Change-Id: I25504fe27967bcff70c4b7338790f3921d15473d
Reviewed-on: https://go-review.googlesource.com/113637
Run-TryBot: Iskander Sharipov <iskander.sharipov@intel.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
2018-05-17 19:50:29 +03:00
|
|
|
return val, false
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-04 17:54:04 -04:00
|
|
|
func archrelocvariant(ctxt *ld.Link, r *sym.Reloc, s *sym.Symbol, t int64) int64 {
|
2015-03-08 14:14:53 +01:00
|
|
|
log.Fatalf("unexpected relocation variant")
|
|
|
|
|
return -1
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2018-09-26 10:12:18 +00:00
|
|
|
func elfsetupplt(ctxt *ld.Link) {
|
|
|
|
|
plt := ctxt.Syms.Lookup(".plt", 0)
|
|
|
|
|
gotplt := ctxt.Syms.Lookup(".got.plt", 0)
|
|
|
|
|
if plt.Size == 0 {
|
|
|
|
|
// stp x16, x30, [sp, #-16]!
|
|
|
|
|
// identifying information
|
|
|
|
|
plt.AddUint32(ctxt.Arch, 0xa9bf7bf0)
|
|
|
|
|
|
|
|
|
|
// the following two instructions (adrp + ldr) load *got[2] into x17
|
|
|
|
|
// adrp x16, &got[0]
|
|
|
|
|
plt.AddAddrPlus4(gotplt, 16)
|
|
|
|
|
plt.SetUint32(ctxt.Arch, plt.Size-4, 0x90000010)
|
|
|
|
|
plt.R[len(plt.R)-1].Type = objabi.R_ARM64_GOT
|
|
|
|
|
|
|
|
|
|
// <imm> is the offset value of &got[2] to &got[0], the same below
|
|
|
|
|
// ldr x17, [x16, <imm>]
|
|
|
|
|
plt.AddAddrPlus4(gotplt, 16)
|
|
|
|
|
plt.SetUint32(ctxt.Arch, plt.Size-4, 0xf9400211)
|
|
|
|
|
plt.R[len(plt.R)-1].Type = objabi.R_ARM64_GOT
|
|
|
|
|
|
|
|
|
|
// add x16, x16, <imm>
|
|
|
|
|
plt.AddAddrPlus4(gotplt, 16)
|
|
|
|
|
plt.SetUint32(ctxt.Arch, plt.Size-4, 0x91000210)
|
|
|
|
|
plt.R[len(plt.R)-1].Type = objabi.R_ARM64_PCREL
|
|
|
|
|
|
|
|
|
|
// br x17
|
|
|
|
|
plt.AddUint32(ctxt.Arch, 0xd61f0220)
|
|
|
|
|
|
|
|
|
|
// 3 nop for place holder
|
|
|
|
|
plt.AddUint32(ctxt.Arch, 0xd503201f)
|
|
|
|
|
plt.AddUint32(ctxt.Arch, 0xd503201f)
|
|
|
|
|
plt.AddUint32(ctxt.Arch, 0xd503201f)
|
|
|
|
|
|
|
|
|
|
// check gotplt.size == 0
|
|
|
|
|
if gotplt.Size != 0 {
|
|
|
|
|
ld.Errorf(gotplt, "got.plt is not empty at the very beginning")
|
|
|
|
|
}
|
|
|
|
|
gotplt.AddAddrPlus(ctxt.Arch, ctxt.Syms.Lookup(".dynamic", 0), 0)
|
|
|
|
|
|
|
|
|
|
gotplt.AddUint64(ctxt.Arch, 0)
|
|
|
|
|
gotplt.AddUint64(ctxt.Arch, 0)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func addpltsym(ctxt *ld.Link, s *sym.Symbol) {
|
|
|
|
|
if s.Plt() >= 0 {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ld.Adddynsym(ctxt, s)
|
|
|
|
|
|
|
|
|
|
if ctxt.IsELF {
|
|
|
|
|
plt := ctxt.Syms.Lookup(".plt", 0)
|
|
|
|
|
gotplt := ctxt.Syms.Lookup(".got.plt", 0)
|
|
|
|
|
rela := ctxt.Syms.Lookup(".rela.plt", 0)
|
|
|
|
|
if plt.Size == 0 {
|
|
|
|
|
elfsetupplt(ctxt)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// adrp x16, &got.plt[0]
|
|
|
|
|
plt.AddAddrPlus4(gotplt, gotplt.Size)
|
|
|
|
|
plt.SetUint32(ctxt.Arch, plt.Size-4, 0x90000010)
|
|
|
|
|
plt.R[len(plt.R)-1].Type = objabi.R_ARM64_GOT
|
|
|
|
|
|
|
|
|
|
// <offset> is the offset value of &got.plt[n] to &got.plt[0]
|
|
|
|
|
// ldr x17, [x16, <offset>]
|
|
|
|
|
plt.AddAddrPlus4(gotplt, gotplt.Size)
|
|
|
|
|
plt.SetUint32(ctxt.Arch, plt.Size-4, 0xf9400211)
|
|
|
|
|
plt.R[len(plt.R)-1].Type = objabi.R_ARM64_GOT
|
|
|
|
|
|
|
|
|
|
// add x16, x16, <offset>
|
|
|
|
|
plt.AddAddrPlus4(gotplt, gotplt.Size)
|
|
|
|
|
plt.SetUint32(ctxt.Arch, plt.Size-4, 0x91000210)
|
|
|
|
|
plt.R[len(plt.R)-1].Type = objabi.R_ARM64_PCREL
|
|
|
|
|
|
|
|
|
|
// br x17
|
|
|
|
|
plt.AddUint32(ctxt.Arch, 0xd61f0220)
|
|
|
|
|
|
|
|
|
|
// add to got.plt: pointer to plt[0]
|
|
|
|
|
gotplt.AddAddrPlus(ctxt.Arch, plt, 0)
|
|
|
|
|
|
|
|
|
|
// rela
|
|
|
|
|
rela.AddAddrPlus(ctxt.Arch, gotplt, gotplt.Size-8)
|
|
|
|
|
rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_AARCH64_JUMP_SLOT)))
|
|
|
|
|
rela.AddUint64(ctxt.Arch, 0)
|
|
|
|
|
|
|
|
|
|
s.SetPlt(int32(plt.Size - 16))
|
|
|
|
|
} else {
|
|
|
|
|
ld.Errorf(s, "addpltsym: unsupported binary format")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func addgotsym(ctxt *ld.Link, s *sym.Symbol) {
|
|
|
|
|
if s.Got() >= 0 {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ld.Adddynsym(ctxt, s)
|
|
|
|
|
got := ctxt.Syms.Lookup(".got", 0)
|
|
|
|
|
s.SetGot(int32(got.Size))
|
|
|
|
|
got.AddUint64(ctxt.Arch, 0)
|
|
|
|
|
|
|
|
|
|
if ctxt.IsELF {
|
|
|
|
|
rela := ctxt.Syms.Lookup(".rela", 0)
|
|
|
|
|
rela.AddAddrPlus(ctxt.Arch, got, int64(s.Got()))
|
|
|
|
|
rela.AddUint64(ctxt.Arch, ld.ELF64_R_INFO(uint32(s.Dynid), uint32(elf.R_AARCH64_GLOB_DAT)))
|
|
|
|
|
rela.AddUint64(ctxt.Arch, 0)
|
|
|
|
|
} else {
|
|
|
|
|
ld.Errorf(s, "addgotsym: unsupported binary format")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-19 22:40:38 -04:00
|
|
|
func asmb(ctxt *ld.Link) {
|
2016-08-21 18:25:28 -04:00
|
|
|
if ctxt.Debugvlog != 0 {
|
2017-04-18 12:53:25 -07:00
|
|
|
ctxt.Logf("%5.2f asmb\n", ld.Cputime())
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-07 13:43:38 -04:00
|
|
|
if ctxt.IsELF {
|
2016-09-20 15:57:53 +12:00
|
|
|
ld.Asmbelfsetup()
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-04-18 21:52:06 +12:00
|
|
|
sect := ld.Segtext.Sections[0]
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Codeblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
|
2017-04-18 21:52:06 +12:00
|
|
|
for _, sect = range ld.Segtext.Sections[1:] {
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.SeekSet(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Datblk(ctxt, int64(sect.Vaddr), int64(sect.Length))
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ld.Segrodata.Filelen > 0 {
|
2016-08-21 18:25:28 -04:00
|
|
|
if ctxt.Debugvlog != 0 {
|
2017-04-18 12:53:25 -07:00
|
|
|
ctxt.Logf("%5.2f rodatblk\n", ld.Cputime())
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.SeekSet(int64(ld.Segrodata.Fileoff))
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Datblk(ctxt, int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
2016-09-05 23:29:16 -04:00
|
|
|
if ld.Segrelrodata.Filelen > 0 {
|
|
|
|
|
if ctxt.Debugvlog != 0 {
|
2017-04-18 12:53:25 -07:00
|
|
|
ctxt.Logf("%5.2f relrodatblk\n", ld.Cputime())
|
2016-09-05 23:29:16 -04:00
|
|
|
}
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.SeekSet(int64(ld.Segrelrodata.Fileoff))
|
2016-09-05 23:29:16 -04:00
|
|
|
ld.Datblk(ctxt, int64(ld.Segrelrodata.Vaddr), int64(ld.Segrelrodata.Filelen))
|
|
|
|
|
}
|
2015-03-08 04:32:55 -04:00
|
|
|
|
2016-08-21 18:25:28 -04:00
|
|
|
if ctxt.Debugvlog != 0 {
|
2017-04-18 12:53:25 -07:00
|
|
|
ctxt.Logf("%5.2f datblk\n", ld.Cputime())
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.SeekSet(int64(ld.Segdata.Fileoff))
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Datblk(ctxt, int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
|
2015-03-08 04:32:55 -04:00
|
|
|
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.SeekSet(int64(ld.Segdwarf.Fileoff))
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Dwarfblk(ctxt, int64(ld.Segdwarf.Vaddr), int64(ld.Segdwarf.Filelen))
|
2019-04-03 22:41:48 -04:00
|
|
|
}
|
2016-03-14 09:23:04 -07:00
|
|
|
|
2019-04-03 22:41:48 -04:00
|
|
|
func asmb2(ctxt *ld.Link) {
|
2015-04-10 21:28:09 -04:00
|
|
|
machlink := uint32(0)
|
2017-10-07 13:49:44 -04:00
|
|
|
if ctxt.HeadType == objabi.Hdarwin {
|
2016-08-19 22:40:38 -04:00
|
|
|
machlink = uint32(ld.Domacholink(ctxt))
|
2015-04-10 21:28:09 -04:00
|
|
|
}
|
|
|
|
|
|
2015-03-08 04:32:55 -04:00
|
|
|
/* output symbol table */
|
|
|
|
|
ld.Symsize = 0
|
|
|
|
|
|
|
|
|
|
ld.Lcsize = 0
|
|
|
|
|
symo := uint32(0)
|
2016-08-21 18:34:24 -04:00
|
|
|
if !*ld.FlagS {
|
2015-03-08 04:32:55 -04:00
|
|
|
// TODO: rationalize
|
2016-08-21 18:25:28 -04:00
|
|
|
if ctxt.Debugvlog != 0 {
|
2017-04-18 12:53:25 -07:00
|
|
|
ctxt.Logf("%5.2f sym\n", ld.Cputime())
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
2017-10-07 13:49:44 -04:00
|
|
|
switch ctxt.HeadType {
|
2015-03-08 04:32:55 -04:00
|
|
|
default:
|
2017-10-07 13:43:38 -04:00
|
|
|
if ctxt.IsELF {
|
2016-03-14 09:23:04 -07:00
|
|
|
symo = uint32(ld.Segdwarf.Fileoff + ld.Segdwarf.Filelen)
|
2016-08-21 18:34:24 -04:00
|
|
|
symo = uint32(ld.Rnd(int64(symo), int64(*ld.FlagRound)))
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.Hplan9:
|
2015-03-08 04:32:55 -04:00
|
|
|
symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
|
2015-04-10 21:28:09 -04:00
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.Hdarwin:
|
2016-08-21 18:34:24 -04:00
|
|
|
symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(*ld.FlagRound))) + uint64(machlink))
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.SeekSet(int64(symo))
|
2017-10-07 13:49:44 -04:00
|
|
|
switch ctxt.HeadType {
|
2015-03-08 04:32:55 -04:00
|
|
|
default:
|
2017-10-07 13:43:38 -04:00
|
|
|
if ctxt.IsELF {
|
2016-08-21 18:25:28 -04:00
|
|
|
if ctxt.Debugvlog != 0 {
|
2017-04-18 12:53:25 -07:00
|
|
|
ctxt.Logf("%5.2f elfsym\n", ld.Cputime())
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Asmelfsym(ctxt)
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Flush()
|
|
|
|
|
ctxt.Out.Write(ld.Elfstrdat)
|
2015-03-08 04:32:55 -04:00
|
|
|
|
2017-10-05 10:20:17 -04:00
|
|
|
if ctxt.LinkMode == ld.LinkExternal {
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Elfemitreloc(ctxt)
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.Hplan9:
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Asmplan9sym(ctxt)
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Flush()
|
2015-03-08 04:32:55 -04:00
|
|
|
|
cmd/link: use ctxt.{Lookup,ROLookup} in favour of function versions of same
Done with two eg templates:
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linklookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.Lookup(name, v)
}
package p
import (
"cmd/link/internal/ld"
)
func before(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ld.Linkrlookup(ctxt, name, v)
}
func after(ctxt *ld.Link, name string, v int) *ld.Symbol {
return ctxt.Syms.ROLookup(name, v)
}
Change-Id: I00647dbf62294557bd24c29ad1f108fc786335f1
Reviewed-on: https://go-review.googlesource.com/29343
Run-TryBot: Michael Hudson-Doyle <michael.hudson@canonical.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2016-09-20 15:06:08 +12:00
|
|
|
sym := ctxt.Syms.Lookup("pclntab", 0)
|
2015-03-08 04:32:55 -04:00
|
|
|
if sym != nil {
|
|
|
|
|
ld.Lcsize = int32(len(sym.P))
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Write(sym.P)
|
|
|
|
|
ctxt.Out.Flush()
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
2015-04-10 21:28:09 -04:00
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.Hdarwin:
|
2017-10-05 10:20:17 -04:00
|
|
|
if ctxt.LinkMode == ld.LinkExternal {
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Machoemitreloc(ctxt)
|
2015-04-10 21:28:09 -04:00
|
|
|
}
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-21 18:25:28 -04:00
|
|
|
if ctxt.Debugvlog != 0 {
|
2017-04-18 12:53:25 -07:00
|
|
|
ctxt.Logf("%5.2f header\n", ld.Cputime())
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.SeekSet(0)
|
2017-10-07 13:49:44 -04:00
|
|
|
switch ctxt.HeadType {
|
2015-03-08 04:32:55 -04:00
|
|
|
default:
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.Hplan9: /* plan 9 */
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Write32(0x647) /* magic */
|
|
|
|
|
ctxt.Out.Write32(uint32(ld.Segtext.Filelen)) /* sizes */
|
|
|
|
|
ctxt.Out.Write32(uint32(ld.Segdata.Filelen))
|
|
|
|
|
ctxt.Out.Write32(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
|
|
|
|
|
ctxt.Out.Write32(uint32(ld.Symsize)) /* nsyms */
|
|
|
|
|
ctxt.Out.Write32(uint32(ld.Entryvalue(ctxt))) /* va of entry */
|
|
|
|
|
ctxt.Out.Write32(0)
|
|
|
|
|
ctxt.Out.Write32(uint32(ld.Lcsize))
|
2015-03-08 04:32:55 -04:00
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.Hlinux,
|
|
|
|
|
objabi.Hfreebsd,
|
|
|
|
|
objabi.Hnetbsd,
|
|
|
|
|
objabi.Hopenbsd,
|
|
|
|
|
objabi.Hnacl:
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Asmbelf(ctxt, int64(symo))
|
2015-04-10 21:28:09 -04:00
|
|
|
|
2017-04-18 12:53:25 -07:00
|
|
|
case objabi.Hdarwin:
|
2016-08-19 22:40:38 -04:00
|
|
|
ld.Asmbmacho(ctxt)
|
2015-03-08 04:32:55 -04:00
|
|
|
}
|
|
|
|
|
|
2017-10-01 02:37:20 +00:00
|
|
|
ctxt.Out.Flush()
|
2016-08-21 18:34:24 -04:00
|
|
|
if *ld.FlagC {
|
2015-03-08 04:32:55 -04:00
|
|
|
fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
|
|
|
|
|
fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
|
|
|
|
|
fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
|
|
|
|
|
fmt.Printf("symsize=%d\n", ld.Symsize)
|
|
|
|
|
fmt.Printf("lcsize=%d\n", ld.Lcsize)
|
|
|
|
|
fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
|
|
|
|
|
}
|
|
|
|
|
}
|