go/src/cmd/link/internal/ld/decodesym.go

294 lines
9.4 KiB
Go
Raw Normal View History

// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ld
import (
"cmd/internal/objabi"
"cmd/internal/sys"
"cmd/link/internal/loader"
"cmd/link/internal/sym"
"debug/elf"
"log"
)
// Decoding the type.* symbols. This has to be in sync with
// ../../runtime/type.go, or more specifically, with what
// cmd/compile/internal/gc/reflect.go stuffs in these.
// tflag is documented in reflect/type.go.
//
// tflag values must be kept in sync with copies in:
// cmd/compile/internal/gc/reflect.go
// cmd/link/internal/ld/decodesym.go
// reflect/type.go
// runtime/type.go
const (
tflagUncommon = 1 << 0
tflagExtraStar = 1 << 1
)
func decodeInuxi(arch *sys.Arch, p []byte, sz int) uint64 {
switch sz {
case 2:
return uint64(arch.ByteOrder.Uint16(p))
case 4:
return uint64(arch.ByteOrder.Uint32(p))
case 8:
return arch.ByteOrder.Uint64(p)
default:
Exitf("dwarf: decode inuxi %d", sz)
panic("unreachable")
}
}
func commonsize(arch *sys.Arch) int { return 4*arch.PtrSize + 8 + 8 } // runtime._type
func structfieldSize(arch *sys.Arch) int { return 3 * arch.PtrSize } // runtime.structfield
func uncommonSize() int { return 4 + 2 + 2 + 4 + 4 } // runtime.uncommontype
// Type.commonType.kind
func decodetypeKind(arch *sys.Arch, p []byte) uint8 {
return p[2*arch.PtrSize+7] & objabi.KindMask // 0x13 / 0x1f
}
// Type.commonType.kind
func decodetypeUsegcprog(arch *sys.Arch, p []byte) uint8 {
return p[2*arch.PtrSize+7] & objabi.KindGCProg // 0x13 / 0x1f
}
// Type.commonType.size
func decodetypeSize(arch *sys.Arch, p []byte) int64 {
return int64(decodeInuxi(arch, p, arch.PtrSize)) // 0x8 / 0x10
}
runtime: reintroduce ``dead'' space during GC scan Reintroduce an optimization discarded during the initial conversion from 4-bit heap bitmaps to 2-bit heap bitmaps: when we reach the place in the bitmap where there are no more pointers, mark that position for the GC so that it can avoid scanning past that place. During heapBitsSetType we can also avoid initializing heap bitmap beyond that location, which gives a bit of a win compared to Go 1.4. This particular optimization (not initializing the heap bitmap) may not last: we might change typedmemmove to use the heap bitmap, in which case it would all need to be initialized. The early stop in the GC scan will stay no matter what. Compared to Go 1.4 (github.com/rsc/go, branch go14bench): name old mean new mean delta SetTypeNode64 80.7ns × (1.00,1.01) 57.4ns × (1.00,1.01) -28.83% (p=0.000) SetTypeNode64Dead 80.5ns × (1.00,1.01) 13.1ns × (0.99,1.02) -83.77% (p=0.000) SetTypeNode64Slice 2.16µs × (1.00,1.01) 1.54µs × (1.00,1.01) -28.75% (p=0.000) SetTypeNode64DeadSlice 2.16µs × (1.00,1.01) 1.52µs × (1.00,1.00) -29.74% (p=0.000) Compared to previous CL: name old mean new mean delta SetTypeNode64 56.7ns × (1.00,1.00) 57.4ns × (1.00,1.01) +1.19% (p=0.000) SetTypeNode64Dead 57.2ns × (1.00,1.00) 13.1ns × (0.99,1.02) -77.15% (p=0.000) SetTypeNode64Slice 1.56µs × (1.00,1.01) 1.54µs × (1.00,1.01) -0.89% (p=0.000) SetTypeNode64DeadSlice 1.55µs × (1.00,1.01) 1.52µs × (1.00,1.00) -2.23% (p=0.000) This is the last CL in the sequence converting from the 4-bit heap to the 2-bit heap, with all the same optimizations reenabled. Compared to before that process began (compared to CL 9701 patch set 1): name old mean new mean delta BinaryTree17 5.87s × (0.94,1.09) 5.91s × (0.96,1.06) ~ (p=0.578) Fannkuch11 4.32s × (1.00,1.00) 4.32s × (1.00,1.00) ~ (p=0.474) FmtFprintfEmpty 89.1ns × (0.95,1.16) 89.0ns × (0.93,1.10) ~ (p=0.942) FmtFprintfString 283ns × (0.98,1.02) 298ns × (0.98,1.06) +5.33% (p=0.000) FmtFprintfInt 284ns × (0.98,1.04) 286ns × (0.98,1.03) ~ (p=0.208) FmtFprintfIntInt 486ns × (0.98,1.03) 498ns × (0.97,1.06) +2.48% (p=0.000) FmtFprintfPrefixedInt 400ns × (0.99,1.02) 408ns × (0.98,1.02) +2.23% (p=0.000) FmtFprintfFloat 566ns × (0.99,1.01) 587ns × (0.98,1.01) +3.69% (p=0.000) FmtManyArgs 1.91µs × (0.99,1.02) 1.94µs × (0.99,1.02) +1.81% (p=0.000) GobDecode 15.5ms × (0.98,1.05) 15.8ms × (0.98,1.03) +1.94% (p=0.002) GobEncode 11.9ms × (0.97,1.03) 12.0ms × (0.96,1.09) ~ (p=0.263) Gzip 648ms × (0.99,1.01) 648ms × (0.99,1.01) ~ (p=0.992) Gunzip 143ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.585) HTTPClientServer 89.2µs × (0.99,1.02) 90.3µs × (0.98,1.01) +1.24% (p=0.000) JSONEncode 32.3ms × (0.97,1.06) 31.6ms × (0.99,1.01) -2.29% (p=0.000) JSONDecode 106ms × (0.99,1.01) 107ms × (1.00,1.01) +0.62% (p=0.000) Mandelbrot200 6.02ms × (1.00,1.00) 6.03ms × (1.00,1.01) ~ (p=0.250) GoParse 6.57ms × (0.97,1.06) 6.53ms × (0.99,1.03) ~ (p=0.243) RegexpMatchEasy0_32 162ns × (1.00,1.00) 161ns × (1.00,1.01) -0.80% (p=0.000) RegexpMatchEasy0_1K 561ns × (0.99,1.02) 541ns × (0.99,1.01) -3.67% (p=0.000) RegexpMatchEasy1_32 145ns × (0.95,1.04) 138ns × (1.00,1.00) -5.04% (p=0.000) RegexpMatchEasy1_1K 864ns × (0.99,1.04) 887ns × (0.99,1.01) +2.57% (p=0.000) RegexpMatchMedium_32 255ns × (0.99,1.04) 253ns × (0.99,1.01) -1.05% (p=0.012) RegexpMatchMedium_1K 73.9µs × (0.98,1.04) 72.8µs × (1.00,1.00) -1.51% (p=0.005) RegexpMatchHard_32 3.92µs × (0.98,1.04) 3.85µs × (1.00,1.01) -1.88% (p=0.002) RegexpMatchHard_1K 120µs × (0.98,1.04) 117µs × (1.00,1.01) -2.02% (p=0.001) Revcomp 936ms × (0.95,1.08) 922ms × (0.97,1.08) ~ (p=0.234) Template 130ms × (0.98,1.04) 126ms × (0.99,1.01) -2.99% (p=0.000) TimeParse 638ns × (0.98,1.05) 628ns × (0.99,1.01) -1.54% (p=0.004) TimeFormat 674ns × (0.99,1.01) 668ns × (0.99,1.01) -0.80% (p=0.001) The slowdown of the first few benchmarks seems to be due to the new atomic operations for certain small size allocations. But the larger benchmarks mostly improve, probably due to the decreased memory pressure from having half as much heap bitmap. CL 9706, which removes the (never used anymore) wbshadow mode, gets back what is lost in the early microbenchmarks. Change-Id: I37423a209e8ec2a2e92538b45cac5422a6acd32d Reviewed-on: https://go-review.googlesource.com/9705 Reviewed-by: Rick Hudson <rlh@golang.org>
2015-05-04 22:53:54 -04:00
// Type.commonType.ptrdata
func decodetypePtrdata(arch *sys.Arch, p []byte) int64 {
return int64(decodeInuxi(arch, p[arch.PtrSize:], arch.PtrSize)) // 0x8 / 0x10
runtime: reintroduce ``dead'' space during GC scan Reintroduce an optimization discarded during the initial conversion from 4-bit heap bitmaps to 2-bit heap bitmaps: when we reach the place in the bitmap where there are no more pointers, mark that position for the GC so that it can avoid scanning past that place. During heapBitsSetType we can also avoid initializing heap bitmap beyond that location, which gives a bit of a win compared to Go 1.4. This particular optimization (not initializing the heap bitmap) may not last: we might change typedmemmove to use the heap bitmap, in which case it would all need to be initialized. The early stop in the GC scan will stay no matter what. Compared to Go 1.4 (github.com/rsc/go, branch go14bench): name old mean new mean delta SetTypeNode64 80.7ns × (1.00,1.01) 57.4ns × (1.00,1.01) -28.83% (p=0.000) SetTypeNode64Dead 80.5ns × (1.00,1.01) 13.1ns × (0.99,1.02) -83.77% (p=0.000) SetTypeNode64Slice 2.16µs × (1.00,1.01) 1.54µs × (1.00,1.01) -28.75% (p=0.000) SetTypeNode64DeadSlice 2.16µs × (1.00,1.01) 1.52µs × (1.00,1.00) -29.74% (p=0.000) Compared to previous CL: name old mean new mean delta SetTypeNode64 56.7ns × (1.00,1.00) 57.4ns × (1.00,1.01) +1.19% (p=0.000) SetTypeNode64Dead 57.2ns × (1.00,1.00) 13.1ns × (0.99,1.02) -77.15% (p=0.000) SetTypeNode64Slice 1.56µs × (1.00,1.01) 1.54µs × (1.00,1.01) -0.89% (p=0.000) SetTypeNode64DeadSlice 1.55µs × (1.00,1.01) 1.52µs × (1.00,1.00) -2.23% (p=0.000) This is the last CL in the sequence converting from the 4-bit heap to the 2-bit heap, with all the same optimizations reenabled. Compared to before that process began (compared to CL 9701 patch set 1): name old mean new mean delta BinaryTree17 5.87s × (0.94,1.09) 5.91s × (0.96,1.06) ~ (p=0.578) Fannkuch11 4.32s × (1.00,1.00) 4.32s × (1.00,1.00) ~ (p=0.474) FmtFprintfEmpty 89.1ns × (0.95,1.16) 89.0ns × (0.93,1.10) ~ (p=0.942) FmtFprintfString 283ns × (0.98,1.02) 298ns × (0.98,1.06) +5.33% (p=0.000) FmtFprintfInt 284ns × (0.98,1.04) 286ns × (0.98,1.03) ~ (p=0.208) FmtFprintfIntInt 486ns × (0.98,1.03) 498ns × (0.97,1.06) +2.48% (p=0.000) FmtFprintfPrefixedInt 400ns × (0.99,1.02) 408ns × (0.98,1.02) +2.23% (p=0.000) FmtFprintfFloat 566ns × (0.99,1.01) 587ns × (0.98,1.01) +3.69% (p=0.000) FmtManyArgs 1.91µs × (0.99,1.02) 1.94µs × (0.99,1.02) +1.81% (p=0.000) GobDecode 15.5ms × (0.98,1.05) 15.8ms × (0.98,1.03) +1.94% (p=0.002) GobEncode 11.9ms × (0.97,1.03) 12.0ms × (0.96,1.09) ~ (p=0.263) Gzip 648ms × (0.99,1.01) 648ms × (0.99,1.01) ~ (p=0.992) Gunzip 143ms × (1.00,1.00) 143ms × (1.00,1.01) ~ (p=0.585) HTTPClientServer 89.2µs × (0.99,1.02) 90.3µs × (0.98,1.01) +1.24% (p=0.000) JSONEncode 32.3ms × (0.97,1.06) 31.6ms × (0.99,1.01) -2.29% (p=0.000) JSONDecode 106ms × (0.99,1.01) 107ms × (1.00,1.01) +0.62% (p=0.000) Mandelbrot200 6.02ms × (1.00,1.00) 6.03ms × (1.00,1.01) ~ (p=0.250) GoParse 6.57ms × (0.97,1.06) 6.53ms × (0.99,1.03) ~ (p=0.243) RegexpMatchEasy0_32 162ns × (1.00,1.00) 161ns × (1.00,1.01) -0.80% (p=0.000) RegexpMatchEasy0_1K 561ns × (0.99,1.02) 541ns × (0.99,1.01) -3.67% (p=0.000) RegexpMatchEasy1_32 145ns × (0.95,1.04) 138ns × (1.00,1.00) -5.04% (p=0.000) RegexpMatchEasy1_1K 864ns × (0.99,1.04) 887ns × (0.99,1.01) +2.57% (p=0.000) RegexpMatchMedium_32 255ns × (0.99,1.04) 253ns × (0.99,1.01) -1.05% (p=0.012) RegexpMatchMedium_1K 73.9µs × (0.98,1.04) 72.8µs × (1.00,1.00) -1.51% (p=0.005) RegexpMatchHard_32 3.92µs × (0.98,1.04) 3.85µs × (1.00,1.01) -1.88% (p=0.002) RegexpMatchHard_1K 120µs × (0.98,1.04) 117µs × (1.00,1.01) -2.02% (p=0.001) Revcomp 936ms × (0.95,1.08) 922ms × (0.97,1.08) ~ (p=0.234) Template 130ms × (0.98,1.04) 126ms × (0.99,1.01) -2.99% (p=0.000) TimeParse 638ns × (0.98,1.05) 628ns × (0.99,1.01) -1.54% (p=0.004) TimeFormat 674ns × (0.99,1.01) 668ns × (0.99,1.01) -0.80% (p=0.001) The slowdown of the first few benchmarks seems to be due to the new atomic operations for certain small size allocations. But the larger benchmarks mostly improve, probably due to the decreased memory pressure from having half as much heap bitmap. CL 9706, which removes the (never used anymore) wbshadow mode, gets back what is lost in the early microbenchmarks. Change-Id: I37423a209e8ec2a2e92538b45cac5422a6acd32d Reviewed-on: https://go-review.googlesource.com/9705 Reviewed-by: Rick Hudson <rlh@golang.org>
2015-05-04 22:53:54 -04:00
}
// Type.commonType.tflag
func decodetypeHasUncommon(arch *sys.Arch, p []byte) bool {
return p[2*arch.PtrSize+4]&tflagUncommon != 0
}
// Type.FuncType.dotdotdot
func decodetypeFuncDotdotdot(arch *sys.Arch, p []byte) bool {
return uint16(decodeInuxi(arch, p[commonsize(arch)+2:], 2))&(1<<15) != 0
}
// Type.FuncType.inCount
func decodetypeFuncInCount(arch *sys.Arch, p []byte) int {
return int(decodeInuxi(arch, p[commonsize(arch):], 2))
}
func decodetypeFuncOutCount(arch *sys.Arch, p []byte) int {
return int(uint16(decodeInuxi(arch, p[commonsize(arch)+2:], 2)) & (1<<15 - 1))
}
// InterfaceType.methods.length
func decodetypeIfaceMethodCount(arch *sys.Arch, p []byte) int64 {
return int64(decodeInuxi(arch, p[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize))
}
cmd/link: prune unused methods Today the linker keeps all methods of reachable types. This is necessary if a program uses reflect.Value.Call. But while use of reflection is widespread in Go for encoders and decoders, using it to call a method is rare. This CL looks for the use of reflect.Value.Call in a program, and if it is absent, adopts a (reasonably conservative) method pruning strategy as part of dead code elimination. Any method that is directly called is kept, and any method that matches a used interface's method signature is kept. Whether or not a method body is kept is determined by the relocation from its receiver's *rtype to its *rtype. A small change in the compiler marks these relocations as R_METHOD so they can be easily collected and manipulated by the linker. As a bonus, this technique removes the text segment of methods that have been inlined. Looking at the output of building cmd/objdump with -ldflags=-v=2 shows that inlined methods like runtime.(*traceAllocBlockPtr).ptr are removed from the program. Relatively little work is necessary to do this. Linking two examples, jujud and cmd/objdump show no more than +2% link time. Binaries that do not use reflect.Call.Value drop 4 - 20% in size: addr2line: -793KB (18%) asm: -346KB (8%) cgo: -490KB (10%) compile: -564KB (4%) dist: -736KB (17%) fix: -404KB (12%) link: -328KB (7%) nm: -827KB (19%) objdump: -712KB (16%) pack: -327KB (14%) yacc: -350KB (10%) Binaries that do use reflect.Call.Value see a modest size decrease of 2 - 6% thanks to pruning of unexported methods: api: -151KB (3%) cover: -222KB (4%) doc: -106KB (2.5%) pprof: -314KB (3%) trace: -357KB (4%) vet: -187KB (2.7%) jujud: -4.4MB (5.8%) cmd/go: -384KB (3.4%) The trivial Hello example program goes from 2MB to 1.68MB: package main import "fmt" func main() { fmt.Println("Hello, 世界") } Method pruning also helps when building small binaries with "-ldflags=-s -w". The above program goes from 1.43MB to 1.2MB. Unfortunately the linker can only tell if reflect.Value.Call has been statically linked, not if it is dynamically used. And while use is rare, it is linked into a very common standard library package, text/template. The result is programs like cmd/go, which don't use reflect.Value.Call, see limited benefit from this CL. If binary size is important enough it may be possible to address this in future work. For #6853. Change-Id: Iabe90e210e813b08c3f8fd605f841f0458973396 Reviewed-on: https://go-review.googlesource.com/20483 Reviewed-by: Russ Cox <rsc@golang.org>
2016-03-07 23:45:04 -05:00
// Matches runtime/typekind.go and reflect.Kind.
const (
kindArray = 17
kindChan = 18
kindFunc = 19
kindInterface = 20
kindMap = 21
kindPtr = 22
kindSlice = 23
kindStruct = 25
kindMask = (1 << 5) - 1
)
func decodeReloc(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int32) loader.Reloc2 {
for j := 0; j < relocs.Count(); j++ {
rel := relocs.At2(j)
if rel.Off() == off {
return rel
}
}
return loader.Reloc2{}
}
func decodeRelocSym(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int32) loader.Sym {
return decodeReloc(ldr, symIdx, relocs, off).Sym()
}
// decodetypeName decodes the name from a reflect.name.
func decodetypeName(ldr *loader.Loader, symIdx loader.Sym, relocs *loader.Relocs, off int) string {
r := decodeRelocSym(ldr, symIdx, relocs, int32(off))
if r == 0 {
return ""
}
data := ldr.Data(r)
namelen := int(uint16(data[1])<<8 | uint16(data[2]))
return string(data[3 : 3+namelen])
}
func decodetypeFuncInType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs, i int) loader.Sym {
uadd := commonsize(arch) + 4
if arch.PtrSize == 8 {
uadd += 4
}
if decodetypeHasUncommon(arch, ldr.Data(symIdx)) {
uadd += uncommonSize()
}
return decodeRelocSym(ldr, symIdx, relocs, int32(uadd+i*arch.PtrSize))
}
func decodetypeFuncOutType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, relocs *loader.Relocs, i int) loader.Sym {
return decodetypeFuncInType(ldr, arch, symIdx, relocs, i+decodetypeFuncInCount(arch, ldr.Data(symIdx)))
}
func decodetypeArrayElem(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))) // 0x1c / 0x30
}
func decodetypeArrayLen(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) int64 {
data := ldr.Data(symIdx)
return int64(decodeInuxi(arch, data[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize))
}
func decodetypeChanElem(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))) // 0x1c / 0x30
}
func decodetypeMapKey(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))) // 0x1c / 0x30
}
func decodetypeMapValue(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))+int32(arch.PtrSize)) // 0x20 / 0x38
}
func decodetypePtrElem(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym {
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(commonsize(arch))) // 0x1c / 0x30
}
func decodetypeStructFieldCount(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) int {
data := ldr.Data(symIdx)
return int(decodeInuxi(arch, data[commonsize(arch)+2*arch.PtrSize:], arch.PtrSize))
}
func decodetypeStructFieldArrayOff(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) int {
data := ldr.Data(symIdx)
off := commonsize(arch) + 4*arch.PtrSize
if decodetypeHasUncommon(arch, data) {
off += uncommonSize()
}
off += i * structfieldSize(arch)
return off
}
func decodetypeStructFieldName(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) string {
off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i)
relocs := ldr.Relocs(symIdx)
return decodetypeName(ldr, symIdx, &relocs, off)
}
func decodetypeStructFieldType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) loader.Sym {
off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i)
relocs := ldr.Relocs(symIdx)
return decodeRelocSym(ldr, symIdx, &relocs, int32(off+arch.PtrSize))
}
func decodetypeStructFieldOffsAnon(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym, i int) int64 {
off := decodetypeStructFieldArrayOff(ldr, arch, symIdx, i)
data := ldr.Data(symIdx)
return int64(decodeInuxi(arch, data[off+2*arch.PtrSize:], arch.PtrSize))
}
// decodetypeStr returns the contents of an rtype's str field (a nameOff).
func decodetypeStr(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) string {
relocs := ldr.Relocs(symIdx)
str := decodetypeName(ldr, symIdx, &relocs, 4*arch.PtrSize+8)
data := ldr.Data(symIdx)
if data[2*arch.PtrSize+4]&tflagExtraStar != 0 {
return str[1:]
}
return str
}
func decodetypeGcmask(ctxt *Link, s loader.Sym) []byte {
if ctxt.loader.SymType(s) == sym.SDYNIMPORT {
symData := ctxt.loader.Data(s)
addr := decodetypeGcprogShlib(ctxt, symData)
ptrdata := decodetypePtrdata(ctxt.Arch, symData)
sect := findShlibSection(ctxt, ctxt.loader.SymPkg(s), addr)
if sect != nil {
bits := ptrdata / int64(ctxt.Arch.PtrSize)
r := make([]byte, (bits+7)/8)
// ldshlibsyms avoids closing the ELF file so sect.ReadAt works.
// If we remove this read (and the ones in decodetypeGcprog), we
// can close the file.
_, err := sect.ReadAt(r, int64(addr-sect.Addr))
if err != nil {
log.Fatal(err)
}
return r
}
Exitf("cannot find gcmask for %s", ctxt.loader.SymName(s))
return nil
}
relocs := ctxt.loader.Relocs(s)
mask := decodeRelocSym(ctxt.loader, s, &relocs, 2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize))
return ctxt.loader.Data(mask)
}
// Type.commonType.gc
func decodetypeGcprog(ctxt *Link, s loader.Sym) []byte {
if ctxt.loader.SymType(s) == sym.SDYNIMPORT {
symData := ctxt.loader.Data(s)
addr := decodetypeGcprogShlib(ctxt, symData)
sect := findShlibSection(ctxt, ctxt.loader.SymPkg(s), addr)
if sect != nil {
// A gcprog is a 4-byte uint32 indicating length, followed by
// the actual program.
progsize := make([]byte, 4)
_, err := sect.ReadAt(progsize, int64(addr-sect.Addr))
if err != nil {
log.Fatal(err)
}
progbytes := make([]byte, ctxt.Arch.ByteOrder.Uint32(progsize))
_, err = sect.ReadAt(progbytes, int64(addr-sect.Addr+4))
if err != nil {
log.Fatal(err)
}
return append(progsize, progbytes...)
}
Exitf("cannot find gcmask for %s", ctxt.loader.SymName(s))
return nil
}
relocs := ctxt.loader.Relocs(s)
rs := decodeRelocSym(ctxt.loader, s, &relocs, 2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize))
return ctxt.loader.Data(rs)
}
// Find the elf.Section of a given shared library that contains a given address.
func findShlibSection(ctxt *Link, path string, addr uint64) *elf.Section {
for _, shlib := range ctxt.Shlibs {
if shlib.Path == path {
for _, sect := range shlib.File.Sections[1:] { // skip the NULL section
if sect.Addr <= addr && addr <= sect.Addr+sect.Size {
return sect
}
}
}
}
return nil
}
func decodetypeGcprogShlib(ctxt *Link, data []byte) uint64 {
return decodeInuxi(ctxt.Arch, data[2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize):], ctxt.Arch.PtrSize)
}