go/src/cmd/internal/obj/util.go

511 lines
10 KiB
Go
Raw Normal View History

// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package obj
import (
"bytes"
"fmt"
"log"
"os"
"strings"
"time"
)
const REG_NONE = 0
var start time.Time
func Cputime() float64 {
if start.IsZero() {
start = time.Now()
}
return time.Since(start).Seconds()
}
func envOr(key, value string) string {
if x := os.Getenv(key); x != "" {
return x
}
return value
}
var (
GOROOT = envOr("GOROOT", defaultGOROOT)
GOARCH = envOr("GOARCH", defaultGOARCH)
GOOS = envOr("GOOS", defaultGOOS)
GO386 = envOr("GO386", defaultGO386)
GOARM = goarm()
Version = version
)
func goarm() int {
switch v := envOr("GOARM", defaultGOARM); v {
case "5":
return 5
case "6":
return 6
case "7":
return 7
}
// Fail here, rather than validate at multiple call sites.
log.Fatalf("Invalid GOARM value. Must be 5, 6, or 7.")
panic("unreachable")
}
func Getgoextlinkenabled() string {
return envOr("GO_EXTLINK_ENABLED", defaultGO_EXTLINK_ENABLED)
}
func (p *Prog) Line() string {
cmd/compile,link: generate PC-value tables with inlining information In order to generate accurate tracebacks, the runtime needs to know the inlined call stack for a given PC. This creates two tables per function for this purpose. The first table is the inlining tree (stored in the function's funcdata), which has a node containing the file, line, and function name for every inlined call. The second table is a PC-value table that maps each PC to a node in the inlining tree (or -1 if the PC is not the result of inlining). To give the appearance that inlining hasn't happened, the runtime also needs the original source position information of inlined AST nodes. Previously the compiler plastered over the line numbers of inlined AST nodes with the line number of the call. This meant that the PC-line table mapped each PC to line number of the outermost call in its inlined call stack, with no way to access the innermost line number. Now the compiler retains line numbers of inlined AST nodes and writes the innermost source position information to the PC-line and PC-file tables. Some tools and tests expect to see outermost line numbers, so we provide the OutermostLine function for displaying line info. To keep track of the inlined call stack for an AST node, we extend the src.PosBase type with an index into a global inlining tree. Every time the compiler inlines a call, it creates a node in the global inlining tree for the call, and writes its index to the PosBase of every inlined AST node. The parent of this node is the inlining tree index of the call. -1 signifies no parent. For each function, the compiler creates a local inlining tree and a PC-value table mapping each PC to an index in the local tree. These are written to an object file, which is read by the linker. The linker re-encodes these tables compactly by deduplicating function names and file names. This change increases the size of binaries by 4-5%. For example, this is how the go1 benchmark binary is impacted by this change: section old bytes new bytes delta .text 3.49M ± 0% 3.49M ± 0% +0.06% .rodata 1.12M ± 0% 1.21M ± 0% +8.21% .gopclntab 1.50M ± 0% 1.68M ± 0% +11.89% .debug_line 338k ± 0% 435k ± 0% +28.78% Total 9.21M ± 0% 9.58M ± 0% +4.01% Updates #19348. Change-Id: Ic4f180c3b516018138236b0c35e0218270d957d3 Reviewed-on: https://go-review.googlesource.com/37231 Run-TryBot: David Lazar <lazard@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
2017-02-17 12:28:05 -05:00
return p.Ctxt.OutermostPos(p.Pos).String()
}
var armCondCode = []string{
".EQ",
".NE",
".CS",
".CC",
".MI",
".PL",
".VS",
".VC",
".HI",
".LS",
".GE",
".LT",
".GT",
".LE",
"",
".NV",
}
/* ARM scond byte */
const (
C_SCOND = (1 << 4) - 1
C_SBIT = 1 << 4
C_PBIT = 1 << 5
C_WBIT = 1 << 6
C_FBIT = 1 << 7
C_UBIT = 1 << 7
C_SCOND_XOR = 14
)
// CConv formats ARM condition codes.
func CConv(s uint8) string {
if s == 0 {
return ""
}
sc := armCondCode[(s&C_SCOND)^C_SCOND_XOR]
if s&C_SBIT != 0 {
sc += ".S"
}
if s&C_PBIT != 0 {
sc += ".P"
}
if s&C_WBIT != 0 {
sc += ".W"
}
if s&C_UBIT != 0 { /* ambiguous with FBIT */
sc += ".U"
}
return sc
}
func (p *Prog) String() string {
if p == nil {
return "<nil Prog>"
}
if p.Ctxt == nil {
return "<Prog without ctxt>"
}
sc := CConv(p.Scond)
var buf bytes.Buffer
fmt.Fprintf(&buf, "%.5d (%v)\t%v%s", p.Pc, p.Line(), p.As, sc)
sep := "\t"
quadOpAmd64 := p.RegTo2 == -1
if quadOpAmd64 {
fmt.Fprintf(&buf, "%s$%d", sep, p.From3.Offset)
sep = ", "
}
if p.From.Type != TYPE_NONE {
fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.From))
sep = ", "
}
if p.Reg != REG_NONE {
// Should not happen but might as well show it if it does.
fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.Reg)))
sep = ", "
}
if p.From3Type() != TYPE_NONE {
if p.From3.Type == TYPE_CONST && p.As == ATEXT {
// Special case - omit $.
fmt.Fprintf(&buf, "%s%d", sep, p.From3.Offset)
} else if quadOpAmd64 {
fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.From3.Reg)))
} else {
fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, p.From3))
}
sep = ", "
}
if p.To.Type != TYPE_NONE {
fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.To))
}
if p.RegTo2 != REG_NONE && !quadOpAmd64 {
fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.RegTo2)))
}
return buf.String()
}
func (ctxt *Link) NewProg() *Prog {
var p *Prog
if i := ctxt.allocIdx; i < len(ctxt.progs) {
p = &ctxt.progs[i]
ctxt.allocIdx = i + 1
} else {
p = new(Prog) // should be the only call to this; all others should use ctxt.NewProg
}
p.Ctxt = ctxt
return p
}
func (ctxt *Link) freeProgs() {
s := ctxt.progs[:ctxt.allocIdx]
for i := range s {
s[i] = Prog{}
}
ctxt.allocIdx = 0
}
func (ctxt *Link) Dconv(a *Addr) string {
return Dconv(nil, a)
}
func Dconv(p *Prog, a *Addr) string {
var str string
switch a.Type {
default:
str = fmt.Sprintf("type=%d", a.Type)
case TYPE_NONE:
str = ""
if a.Name != NAME_NONE || a.Reg != 0 || a.Sym != nil {
str = fmt.Sprintf("%v(%v)(NONE)", Mconv(a), Rconv(int(a.Reg)))
}
case TYPE_REG:
// TODO(rsc): This special case is for x86 instructions like
// PINSRQ CX,$1,X6
// where the $1 is included in the p->to Addr.
// Move into a new field.
if a.Offset != 0 {
str = fmt.Sprintf("$%d,%v", a.Offset, Rconv(int(a.Reg)))
break
}
str = Rconv(int(a.Reg))
if a.Name != NAME_NONE || a.Sym != nil {
str = fmt.Sprintf("%v(%v)(REG)", Mconv(a), Rconv(int(a.Reg)))
}
case TYPE_BRANCH:
if a.Sym != nil {
str = fmt.Sprintf("%s(SB)", a.Sym.Name)
} else if p != nil && p.Pcond != nil {
str = fmt.Sprint(p.Pcond.Pc)
} else if a.Val != nil {
str = fmt.Sprint(a.Val.(*Prog).Pc)
} else {
str = fmt.Sprintf("%d(PC)", a.Offset)
}
case TYPE_INDIR:
str = fmt.Sprintf("*%s", Mconv(a))
case TYPE_MEM:
str = Mconv(a)
if a.Index != REG_NONE {
str += fmt.Sprintf("(%v*%d)", Rconv(int(a.Index)), int(a.Scale))
}
case TYPE_CONST:
if a.Reg != 0 {
str = fmt.Sprintf("$%v(%v)", Mconv(a), Rconv(int(a.Reg)))
} else {
str = fmt.Sprintf("$%v", Mconv(a))
}
case TYPE_TEXTSIZE:
if a.Val.(int32) == ArgsSizeUnknown {
str = fmt.Sprintf("$%d", a.Offset)
} else {
str = fmt.Sprintf("$%d-%d", a.Offset, a.Val.(int32))
}
case TYPE_FCONST:
str = fmt.Sprintf("%.17g", a.Val.(float64))
// Make sure 1 prints as 1.0
if !strings.ContainsAny(str, ".e") {
str += ".0"
}
str = fmt.Sprintf("$(%s)", str)
case TYPE_SCONST:
str = fmt.Sprintf("$%q", a.Val.(string))
case TYPE_ADDR:
str = fmt.Sprintf("$%s", Mconv(a))
case TYPE_SHIFT:
v := int(a.Offset)
ops := "<<>>->@>"
switch GOARCH {
case "arm":
op := ops[((v>>5)&3)<<1:]
if v&(1<<4) != 0 {
str = fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
} else {
str = fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
}
if a.Reg != 0 {
str += fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
}
case "arm64":
op := ops[((v>>22)&3)<<1:]
str = fmt.Sprintf("R%d%c%c%d", (v>>16)&31, op[0], op[1], (v>>10)&63)
default:
panic("TYPE_SHIFT is not supported on " + GOARCH)
}
case TYPE_REGREG:
str = fmt.Sprintf("(%v, %v)", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
case TYPE_REGREG2:
str = fmt.Sprintf("%v, %v", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
case TYPE_REGLIST:
str = regListConv(int(a.Offset))
}
return str
}
func Mconv(a *Addr) string {
var str string
switch a.Name {
default:
str = fmt.Sprintf("name=%d", a.Name)
case NAME_NONE:
switch {
case a.Reg == REG_NONE:
str = fmt.Sprint(a.Offset)
case a.Offset == 0:
str = fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
case a.Offset != 0:
str = fmt.Sprintf("%d(%v)", a.Offset, Rconv(int(a.Reg)))
}
// Note: a.Reg == REG_NONE encodes the default base register for the NAME_ type.
case NAME_EXTERN:
reg := "SB"
if a.Reg != REG_NONE {
reg = Rconv(int(a.Reg))
}
if a.Sym != nil {
str = fmt.Sprintf("%s%s(%s)", a.Sym.Name, offConv(a.Offset), reg)
} else {
str = fmt.Sprintf("%s(%s)", offConv(a.Offset), reg)
}
case NAME_GOTREF:
reg := "SB"
if a.Reg != REG_NONE {
reg = Rconv(int(a.Reg))
}
if a.Sym != nil {
str = fmt.Sprintf("%s%s@GOT(%s)", a.Sym.Name, offConv(a.Offset), reg)
} else {
str = fmt.Sprintf("%s@GOT(%s)", offConv(a.Offset), reg)
}
case NAME_STATIC:
reg := "SB"
if a.Reg != REG_NONE {
reg = Rconv(int(a.Reg))
}
if a.Sym != nil {
str = fmt.Sprintf("%s<>%s(%s)", a.Sym.Name, offConv(a.Offset), reg)
} else {
str = fmt.Sprintf("<>%s(%s)", offConv(a.Offset), reg)
}
case NAME_AUTO:
reg := "SP"
if a.Reg != REG_NONE {
reg = Rconv(int(a.Reg))
}
if a.Sym != nil {
str = fmt.Sprintf("%s%s(%s)", a.Sym.Name, offConv(a.Offset), reg)
} else {
str = fmt.Sprintf("%s(%s)", offConv(a.Offset), reg)
}
case NAME_PARAM:
reg := "FP"
if a.Reg != REG_NONE {
reg = Rconv(int(a.Reg))
}
if a.Sym != nil {
str = fmt.Sprintf("%s%s(%s)", a.Sym.Name, offConv(a.Offset), reg)
} else {
str = fmt.Sprintf("%s(%s)", offConv(a.Offset), reg)
}
}
return str
}
func offConv(off int64) string {
if off == 0 {
return ""
}
return fmt.Sprintf("%+d", off)
}
type regSet struct {
lo int
hi int
Rconv func(int) string
}
// Few enough architectures that a linear scan is fastest.
// Not even worth sorting.
var regSpace []regSet
/*
Each architecture defines a register space as a unique
integer range.
Here is the list of architectures and the base of their register spaces.
*/
const (
// Because of masking operations in the encodings, each register
// space should start at 0 modulo some power of 2.
RBase386 = 1 * 1024
RBaseAMD64 = 2 * 1024
RBaseARM = 3 * 1024
RBasePPC64 = 4 * 1024 // range [4k, 8k)
RBaseARM64 = 8 * 1024 // range [8k, 13k)
RBaseMIPS = 13 * 1024 // range [13k, 14k)
RBaseS390X = 14 * 1024 // range [14k, 15k)
)
// RegisterRegister binds a pretty-printer (Rconv) for register
// numbers to a given register number range. Lo is inclusive,
// hi exclusive (valid registers are lo through hi-1).
func RegisterRegister(lo, hi int, Rconv func(int) string) {
regSpace = append(regSpace, regSet{lo, hi, Rconv})
}
func Rconv(reg int) string {
if reg == REG_NONE {
return "NONE"
}
for i := range regSpace {
rs := &regSpace[i]
if rs.lo <= reg && reg < rs.hi {
return rs.Rconv(reg)
}
}
return fmt.Sprintf("R???%d", reg)
}
func regListConv(list int) string {
str := ""
for i := 0; i < 16; i++ { // TODO: 16 is ARM-specific.
if list&(1<<uint(i)) != 0 {
if str == "" {
str += "["
} else {
str += ","
}
// This is ARM-specific; R10 is g.
if i == 10 {
str += "g"
} else {
str += fmt.Sprintf("R%d", i)
}
}
}
str += "]"
return str
}
type opSet struct {
lo As
names []string
}
// Not even worth sorting
var aSpace []opSet
// RegisterOpcode binds a list of instruction names
// to a given instruction number range.
func RegisterOpcode(lo As, Anames []string) {
if len(Anames) > AllowedOpCodes {
panic(fmt.Sprintf("too many instructions, have %d max %d", len(Anames), AllowedOpCodes))
}
aSpace = append(aSpace, opSet{lo, Anames})
}
func (a As) String() string {
if 0 <= a && int(a) < len(Anames) {
return Anames[a]
}
for i := range aSpace {
as := &aSpace[i]
if as.lo <= a && int(a-as.lo) < len(as.names) {
return as.names[a-as.lo]
}
}
return fmt.Sprintf("A???%d", a)
}
var Anames = []string{
"XXX",
"CALL",
"DUFFCOPY",
"DUFFZERO",
"END",
"FUNCDATA",
"JMP",
"NOP",
"PCDATA",
"RET",
"TEXT",
"UNDEF",
"VARDEF",
"VARKILL",
"VARLIVE",
}
func Bool2int(b bool) int {
// The compiler currently only optimizes this form.
// See issue 6011.
var i int
if b {
i = 1
} else {
i = 0
}
return i
}