cmd/5g etc: mechanical cleanup

Run rsc.io/grind rev a26569f on C->Go conversions.

The new change in grind is the inlining of goto targets.
If code says 'goto x' and the block starting at label x is unreachable
except through that goto and the code can be moved to where
the goto is without changing the meaning of its variable names,
grind does that move. Simlarly, a goto to a plain return statement
turns into that return statement (even if there are other paths to
the return statement).

Combined, these remove many long-distance gotos, which in turn
makes it possible to reduce the scope of more variable declarations.
(Because gotos can't jump across declarations, the gotos were
keeping the declarations from moving.)

Checked bit-for-bit compatibility with toolstash + buildall.

Reduces compiler runtime in html/template by about 12%.

Change-Id: Id727c0bd7763a61aa22f3daa00aeb8fccbc057a3
Reviewed-on: https://go-review.googlesource.com/6472
Reviewed-by: Aram Hăvărneanu <aram@mgk.ro>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
This commit is contained in:
Russ Cox 2015-03-02 12:35:15 -05:00
parent 190357d560
commit 79f727a70e
67 changed files with 4118 additions and 5445 deletions

View file

@ -25,15 +25,8 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Dump("cgen-res", res)
}
var n1 gc.Node
var nr *gc.Node
var nl *gc.Node
var a int
var f1 gc.Node
var f0 gc.Node
var n2 gc.Node
if n == nil || n.Type == nil {
goto ret
return
}
if res == nil || res.Type == nil {
@ -81,7 +74,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
goto ret
return
}
}
@ -90,7 +83,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
goto ret
return
}
// update addressability for string, slice
@ -124,7 +117,7 @@ func cgen(n *gc.Node, res *gc.Node) {
regfree(&n1)
}
goto ret
return
}
// if both are not addressable, use a temporary.
@ -179,16 +172,16 @@ func cgen(n *gc.Node, res *gc.Node) {
}
sudoclean()
goto ret
return
}
}
// otherwise, the result is addressable but n is not.
// let's do some computation.
nl = n.Left
nl := n.Left
nr = n.Right
nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
@ -198,7 +191,7 @@ func cgen(n *gc.Node, res *gc.Node) {
n2 := *n
n2.Left = &n1
cgen(&n2, res)
goto ret
return
}
}
@ -223,8 +216,34 @@ func cgen(n *gc.Node, res *gc.Node) {
}
}
var a int
var f0 gc.Node
var n1 gc.Node
var n2 gc.Node
if nl != nil && gc.Isfloat[n.Type.Etype] != 0 && gc.Isfloat[nl.Type.Etype] != 0 {
goto flt
// floating-point.
regalloc(&f0, nl.Type, res)
if nr != nil {
goto flt2
}
if n.Op == gc.OMINUS {
nr = gc.Nodintconst(-1)
gc.Convlit(&nr, n.Type)
n.Op = gc.OMUL
goto flt2
}
// unary
cgen(nl, &f0)
if n.Op != gc.OCONV && n.Op != gc.OPLUS {
gins(optoas(int(n.Op), n.Type), &f0, &f0)
}
gmove(&f0, res)
regfree(&f0)
return
}
switch n.Op {
default:
@ -255,11 +274,11 @@ func cgen(n *gc.Node, res *gc.Node) {
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
return
case gc.OPLUS:
cgen(nl, res)
goto ret
return
// unary
case gc.OCOM:
@ -286,7 +305,13 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.OMUL:
a = optoas(int(n.Op), nl.Type)
goto sbop
// symmetric binary
if nl.Ullman < nr.Ullman {
r := nl
nl = nr
nr = r
}
goto abop
// asymmetric binary
case gc.OSUB:
@ -489,14 +514,7 @@ func cgen(n *gc.Node, res *gc.Node) {
goto abop
}
goto ret
sbop: // symmetric binary
if nl.Ullman < nr.Ullman {
r := nl
nl = nr
nr = r
}
return
// TODO(kaib): use fewer registers here.
abop: // asymmetric binary
@ -561,33 +579,10 @@ norm:
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
goto ret
flt: // floating-point.
regalloc(&f0, nl.Type, res)
if nr != nil {
goto flt2
}
if n.Op == gc.OMINUS {
nr = gc.Nodintconst(-1)
gc.Convlit(&nr, n.Type)
n.Op = gc.OMUL
goto flt2
}
// unary
cgen(nl, &f0)
if n.Op != gc.OCONV && n.Op != gc.OPLUS {
gins(optoas(int(n.Op), n.Type), &f0, &f0)
}
gmove(&f0, res)
regfree(&f0)
goto ret
return
flt2: // binary
var f1 gc.Node
if nl.Ullman >= nr.Ullman {
cgen(nl, &f0)
regalloc(&f1, n.Type, nil)
@ -604,9 +599,7 @@ flt2: // binary
gmove(&f1, res)
regfree(&f0)
regfree(&f1)
goto ret
ret:
return
}
/*
@ -666,7 +659,6 @@ func agen(n *gc.Node, res *gc.Node) {
n = n.Left
}
var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
@ -682,7 +674,7 @@ func agen(n *gc.Node, res *gc.Node) {
gins(arm.AMOVW, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
goto ret
return
}
if n.Addable != 0 {
@ -694,10 +686,10 @@ func agen(n *gc.Node, res *gc.Node) {
gins(arm.AMOVW, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
goto ret
return
}
nl = n.Left
nl := n.Left
switch n.Op {
default:
@ -820,8 +812,6 @@ func agen(n *gc.Node, res *gc.Node) {
regfree(&n3)
}
}
ret:
}
/*
@ -1195,25 +1185,23 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
gc.Genlist(n.Ninit)
}
var et int
var nl *gc.Node
var nr *gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
goto ret
return
}
}
et = int(n.Type.Etype)
et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
goto ret
return
}
nr = nil
nr := (*gc.Node)(nil)
var nl *gc.Node
switch n.Op {
default:
a := gc.ONE
@ -1221,14 +1209,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
a = gc.OEQ
}
gencmp0(n, n.Type, a, likely, to)
goto ret
return
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
}
goto ret
return
case gc.OANDAND,
gc.OOROR:
@ -1246,7 +1234,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
bgen(n.Right, true_, likely, to)
}
goto ret
return
case gc.OEQ,
gc.ONE,
@ -1256,7 +1244,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
goto ret
return
}
fallthrough
@ -1264,14 +1252,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
nl = n.Left
if nl == nil || nl.Type == nil {
goto ret
return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
goto ret
return
case gc.OEQ,
gc.ONE,
@ -1293,7 +1281,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
n.Ninit = ll
gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
gc.Patch(p2, gc.Pc)
goto ret
return
}
a = gc.Brcom(a)
@ -1438,9 +1426,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
regfree(&n2)
}
goto ret
ret:
return
}
/*

View file

@ -329,7 +329,7 @@ func cgen_call(n *gc.Node, proc int) {
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
goto ret
return
}
// call pointer
@ -340,15 +340,13 @@ func cgen_call(n *gc.Node, proc int) {
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
goto ret
return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
ret:
}
/*

View file

@ -331,10 +331,8 @@ func gmove(f *gc.Node, t *gc.Node) {
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
var flo gc.Node
var a int
var r1 gc.Node
var fhi gc.Node
if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
@ -387,7 +385,9 @@ func gmove(f *gc.Node, t *gc.Node) {
switch uint32(ft)<<16 | uint32(tt) {
default:
goto fatal
// should not happen
gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
return
/*
* integer copy and truncate
@ -784,6 +784,8 @@ hard:
// truncate 64 bit integer
trunc64:
var fhi gc.Node
var flo gc.Node
split64(f, &flo, &fhi)
regalloc(&r1, t.Type, nil)
@ -792,10 +794,6 @@ trunc64:
regfree(&r1)
splitclean()
return
// should not happen
fatal:
gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
func samaddr(f *gc.Node, t *gc.Node) bool {
@ -1273,12 +1271,6 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
*a = obj.Addr{}
var oary [10]int64
var nn *gc.Node
var reg *gc.Node
var n1 gc.Node
var reg1 *gc.Node
var o int
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
@ -1288,98 +1280,88 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
if v >= 32000 || v <= -32000 {
break
}
goto lit
switch as {
default:
return false
case arm.AADD,
arm.ASUB,
arm.AAND,
arm.AORR,
arm.AEOR,
arm.AMOVB,
arm.AMOVBS,
arm.AMOVBU,
arm.AMOVH,
arm.AMOVHS,
arm.AMOVHU,
arm.AMOVW:
break
}
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(n, a, 1)
return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
reg = &clean[cleani-1]
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
goto odot
var nn *gc.Node
var oary [10]int64
o := gc.Dotoffset(n, oary[:], &nn)
if nn == nil {
sudoclean()
return false
}
if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(&n1, a, 1)
return true
}
regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
agen(nn, reg)
n1.Xoffset = oary[0]
} else {
cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatal("can't happen")
}
gins(arm.AMOVW, &n1, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[i] + 1)
}
a.Type = obj.TYPE_NONE
a.Name = obj.NAME_NONE
n1.Type = n.Type
gc.Naddr(&n1, a, 1)
return true
case gc.OINDEX:
return false
}
return false
lit:
switch as {
default:
return false
case arm.AADD,
arm.ASUB,
arm.AAND,
arm.AORR,
arm.AEOR,
arm.AMOVB,
arm.AMOVBS,
arm.AMOVBU,
arm.AMOVH,
arm.AMOVHS,
arm.AMOVHU,
arm.AMOVW:
break
}
cleani += 2
reg = &clean[cleani-1]
reg1 = &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(n, a, 1)
goto yes
odot:
o = gc.Dotoffset(n, oary[:], &nn)
if nn == nil {
goto no
}
if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(&n1, a, 1)
goto yes
}
regalloc(reg, gc.Types[gc.Tptr], nil)
n1 = *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
agen(nn, reg)
n1.Xoffset = oary[0]
} else {
cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatal("can't happen")
}
gins(arm.AMOVW, &n1, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[i] + 1)
}
a.Type = obj.TYPE_NONE
a.Name = obj.NAME_NONE
n1.Type = n.Type
gc.Naddr(&n1, a, 1)
goto yes
yes:
return true
no:
sudoclean()
return false
}

View file

@ -257,9 +257,8 @@ func subprop(r0 *gc.Flow) bool {
if !regtyp(v2) {
return false
}
var r *gc.Flow
var info gc.ProgInfo
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
}
@ -289,7 +288,32 @@ func subprop(r0 *gc.Flow) bool {
if p.To.Type == v1.Type {
if p.To.Reg == v1.Reg {
if p.Scond == arm.C_SCOND_NONE {
goto gotit
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
if p.From.Type == v2.Type {
fmt.Printf(" excise")
}
fmt.Printf("\n")
}
for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
p = r.Prog
copysub(&p.From, v1, v2, 1)
copysub1(p, v1, v2, 1)
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v\n", r.Prog)
}
}
t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return true
}
}
}
@ -304,34 +328,6 @@ func subprop(r0 *gc.Flow) bool {
}
return false
gotit:
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
if p.From.Type == v2.Type {
fmt.Printf(" excise")
}
fmt.Printf("\n")
}
for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
p = r.Prog
copysub(&p.From, v1, v2, 1)
copysub1(p, v1, v2, 1)
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v\n", r.Prog)
}
}
t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return true
}
/*

View file

@ -38,17 +38,14 @@ import (
import "cmd/internal/ld"
func needlib(name string) int {
var p string
var s *ld.LSym
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
p = fmt.Sprintf(".dynlib.%s", name)
p := fmt.Sprintf(".dynlib.%s", name)
s = ld.Linklookup(ld.Ctxt, p, 0)
s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
@ -73,10 +70,7 @@ func adddynrela(rel *ld.LSym, s *ld.LSym, r *ld.Reloc) {
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
var targ *ld.LSym
var rel *ld.LSym
targ = r.Sym
targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
@ -203,7 +197,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
rel = ld.Linklookup(ld.Ctxt, ".rel", 0)
rel := ld.Linklookup(ld.Ctxt, ".rel", 0)
ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off))
ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynmic reloc
r.Type = ld.R_CONST // write r->add during relocsym
@ -217,11 +211,9 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
var elfsym int32
ld.Thearch.Lput(uint32(sectoff))
elfsym = r.Xsym.Elfsym
elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
@ -267,11 +259,8 @@ func elfreloc1(r *ld.Reloc, sectoff int64) int {
}
func elfsetupplt() {
var plt *ld.LSym
var got *ld.LSym
plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// str lr, [sp, #-4]!
ld.Adduint32(ld.Ctxt, plt, 0xe52de004)
@ -298,9 +287,8 @@ func elfsetupplt() {
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
var rs *ld.LSym
rs = r.Xsym
rs := r.Xsym
if rs.Type == ld.SHOSTOBJ || r.Type == ld.R_CALLARM {
if rs.Dynid < 0 {
@ -353,15 +341,13 @@ func machoreloc1(r *ld.Reloc, sectoff int64) int {
}
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
var rs *ld.LSym
if ld.Linkmode == ld.LinkExternal {
switch r.Type {
case ld.R_CALLARM:
r.Done = 0
// set up addend for eventual relocation via outer symbol.
rs = r.Sym
rs := r.Sym
r.Xadd = r.Add
if r.Xadd&0x800000 != 0 {
@ -437,9 +423,7 @@ func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
}
func addpltreloc(ctxt *ld.Link, plt *ld.LSym, got *ld.LSym, sym *ld.LSym, typ int) *ld.Reloc {
var r *ld.Reloc
r = ld.Addrel(plt)
r := ld.Addrel(plt)
r.Sym = got
r.Off = int32(plt.Size)
r.Siz = 4
@ -454,10 +438,6 @@ func addpltreloc(ctxt *ld.Link, plt *ld.LSym, got *ld.LSym, sym *ld.LSym, typ in
}
func addpltsym(ctxt *ld.Link, s *ld.LSym) {
var plt *ld.LSym
var got *ld.LSym
var rel *ld.LSym
if s.Plt >= 0 {
return
}
@ -465,9 +445,9 @@ func addpltsym(ctxt *ld.Link, s *ld.LSym) {
adddynsym(ctxt, s)
if ld.Iself {
plt = ld.Linklookup(ctxt, ".plt", 0)
got = ld.Linklookup(ctxt, ".got.plt", 0)
rel = ld.Linklookup(ctxt, ".rel.plt", 0)
plt := ld.Linklookup(ctxt, ".plt", 0)
got := ld.Linklookup(ctxt, ".got.plt", 0)
rel := ld.Linklookup(ctxt, ".rel.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
@ -497,13 +477,11 @@ func addpltsym(ctxt *ld.Link, s *ld.LSym) {
}
func addgotsyminternal(ctxt *ld.Link, s *ld.LSym) {
var got *ld.LSym
if s.Got >= 0 {
return
}
got = ld.Linklookup(ctxt, ".got", 0)
got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Addaddrplus(ctxt, got, s, 0)
@ -515,20 +493,17 @@ func addgotsyminternal(ctxt *ld.Link, s *ld.LSym) {
}
func addgotsym(ctxt *ld.Link, s *ld.LSym) {
var got *ld.LSym
var rel *ld.LSym
if s.Got >= 0 {
return
}
adddynsym(ctxt, s)
got = ld.Linklookup(ctxt, ".got", 0)
got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint32(ctxt, got, 0)
if ld.Iself {
rel = ld.Linklookup(ctxt, ".rel", 0)
rel := ld.Linklookup(ctxt, ".rel", 0)
ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_GLOB_DAT))
} else {
@ -537,10 +512,6 @@ func addgotsym(ctxt *ld.Link, s *ld.LSym) {
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
var d *ld.LSym
var t int
var name string
if s.Dynid >= 0 {
return
}
@ -549,10 +520,10 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
d = ld.Linklookup(ctxt, ".dynsym", 0)
d := ld.Linklookup(ctxt, ".dynsym", 0)
/* name */
name = s.Extname
name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
@ -567,7 +538,7 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
ld.Adduint32(ctxt, d, 0)
/* type */
t = ld.STB_GLOBAL << 4
t := ld.STB_GLOBAL << 4
if (s.Cgoexport&ld.CgoExportDynamic != 0) && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
@ -589,14 +560,12 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
}
func adddynlib(lib string) {
var s *ld.LSym
if needlib(lib) == 0 {
return
}
if ld.Iself {
s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
@ -609,13 +578,6 @@ func adddynlib(lib string) {
}
func asmb() {
var symo uint32
var dwarfoff uint32
var machlink uint32
var sect *ld.Section
var sym *ld.LSym
var i int
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
@ -625,7 +587,7 @@ func asmb() {
ld.Asmbelfsetup()
}
sect = ld.Segtext.Sect
sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
@ -651,14 +613,14 @@ func asmb() {
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
machlink = 0
machlink := uint32(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
if ld.Debug['w'] == 0 {
dwarfoff = uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
dwarfoff := uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
ld.Cseek(int64(dwarfoff))
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
@ -673,7 +635,7 @@ func asmb() {
ld.Symsize = 0
ld.Lcsize = 0
symo = 0
symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
@ -719,10 +681,10 @@ func asmb() {
ld.Asmplan9sym()
ld.Cflush()
sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
for i = 0; int32(i) < ld.Lcsize; i++ {
for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}

View file

@ -81,8 +81,6 @@ func linkarchinit() {
}
func archinit() {
var s *ld.LSym
// getgoextlinkenabled is based on GO_EXTLINK_ENABLED when
// Go was built; see ../../make.bash.
if ld.Linkmode == ld.LinkAuto && obj.Getgoextlinkenabled() == "0" {
@ -175,7 +173,7 @@ func archinit() {
}
// embed goarm to runtime.goarm
s = ld.Linklookup(ld.Ctxt, "runtime.goarm", 0)
s := ld.Linklookup(ld.Ctxt, "runtime.goarm", 0)
s.Type = ld.SRODATA
ld.Adduint8(ld.Ctxt, s, uint8(ld.Ctxt.Goarm))

View file

@ -29,13 +29,8 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Dump("cgen-res", res)
}
var nl *gc.Node
var n1 gc.Node
var nr *gc.Node
var n2 gc.Node
var a int
if n == nil || n.Type == nil {
goto ret
return
}
if res == nil || res.Type == nil {
@ -60,7 +55,7 @@ func cgen(n *gc.Node, res *gc.Node) {
} else {
gc.Cgen_slice(n, res)
}
goto ret
return
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
@ -71,7 +66,7 @@ func cgen(n *gc.Node, res *gc.Node) {
} else {
gc.Cgen_eface(n, res)
}
goto ret
return
}
if n.Ullman >= gc.UINF {
@ -83,7 +78,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
goto ret
return
}
}
@ -92,7 +87,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
goto ret
return
}
if res.Addable == 0 {
@ -108,7 +103,7 @@ func cgen(n *gc.Node, res *gc.Node) {
cgen(&n1, res)
regfree(&n1)
goto ret
return
}
var f int
@ -118,7 +113,7 @@ func cgen(n *gc.Node, res *gc.Node) {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
return
}
f = 1 // gen thru register
@ -151,7 +146,7 @@ func cgen(n *gc.Node, res *gc.Node) {
fmt.Printf("%v [ignore previous line]\n", p1)
}
sudoclean()
goto ret
return
}
}
@ -160,7 +155,7 @@ func cgen(n *gc.Node, res *gc.Node) {
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
goto ret
return
}
// update addressability for string, slice
@ -184,16 +179,16 @@ func cgen(n *gc.Node, res *gc.Node) {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
return
}
if n.Addable != 0 {
gmove(n, res)
goto ret
return
}
nl = n.Left
nr = n.Right
nl := n.Left
nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
@ -203,7 +198,7 @@ func cgen(n *gc.Node, res *gc.Node) {
n2 := *n
n2.Left = &n1
cgen(&n2, res)
goto ret
return
}
}
@ -224,10 +219,11 @@ func cgen(n *gc.Node, res *gc.Node) {
}
sudoclean()
goto ret
return
}
}
var a int
switch n.Op {
default:
gc.Dump("cgen", n)
@ -252,11 +248,11 @@ func cgen(n *gc.Node, res *gc.Node) {
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
return
case gc.OPLUS:
cgen(nl, res)
goto ret
return
// unary
case gc.OCOM:
@ -270,7 +266,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gins(a, &n2, &n1)
gmove(&n1, res)
regfree(&n1)
goto ret
return
case gc.OMINUS:
if gc.Isfloat[nl.Type.Etype] != 0 {
@ -280,8 +276,16 @@ func cgen(n *gc.Node, res *gc.Node) {
goto sbop
}
a = optoas(int(n.Op), nl.Type)
goto uop
a := optoas(int(n.Op), nl.Type)
// unary
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
gins(a, nil, &n1)
gmove(&n1, res)
regfree(&n1)
return
// symmetric binary
case gc.OAND,
@ -325,7 +329,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gmove(&n2, res)
regfree(&n2)
regfree(&n1)
goto ret
return
}
}
@ -517,7 +521,7 @@ func cgen(n *gc.Node, res *gc.Node) {
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
goto ret
return
/*
* put simplest on right - we'll generate into left
@ -543,6 +547,8 @@ sbop: // symmetric binary
}
abop: // asymmetric binary
var n1 gc.Node
var n2 gc.Node
if nl.Ullman >= nr.Ullman {
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
@ -588,18 +594,7 @@ abop: // asymmetric binary
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
goto ret
uop: // unary
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
gins(a, nil, &n1)
gmove(&n1, res)
regfree(&n1)
goto ret
ret:
return
}
/*
@ -878,7 +873,6 @@ func agen(n *gc.Node, res *gc.Node) {
n = n.Left
}
var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
@ -894,7 +888,7 @@ func agen(n *gc.Node, res *gc.Node) {
gins(x86.ALEAQ, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
goto ret
return
}
if n.Addable != 0 {
@ -903,10 +897,10 @@ func agen(n *gc.Node, res *gc.Node) {
gins(x86.ALEAQ, n, &n1)
gmove(&n1, res)
regfree(&n1)
goto ret
return
}
nl = n.Left
nl := n.Left
switch n.Op {
default:
@ -981,8 +975,6 @@ func agen(n *gc.Node, res *gc.Node) {
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
}
}
ret:
}
/*
@ -1108,27 +1100,21 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
gc.Genlist(n.Ninit)
}
var a int
var et int
var nl *gc.Node
var n1 gc.Node
var nr *gc.Node
var n2 gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
goto ret
return
}
}
et = int(n.Type.Etype)
et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
goto ret
return
}
nr = nil
nr := (*gc.Node)(nil)
for n.Op == gc.OCONVNOP {
n = n.Left
@ -1137,6 +1123,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
}
var nl *gc.Node
switch n.Op {
default:
goto def
@ -1146,7 +1133,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
}
goto ret
return
case gc.ONAME:
if n.Addable == 0 {
@ -1160,7 +1147,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
goto ret
return
case gc.OANDAND,
gc.OOROR:
@ -1178,7 +1165,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
bgen(n.Right, true_, likely, to)
}
goto ret
return
case gc.OEQ,
gc.ONE,
@ -1188,7 +1175,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
goto ret
return
}
fallthrough
@ -1196,14 +1183,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
nl = n.Left
if nl == nil || nl.Type == nil {
goto ret
return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
goto ret
return
case gc.OEQ,
gc.ONE,
@ -1225,7 +1212,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
n.Ninit = ll
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
gc.Patch(p2, gc.Pc)
goto ret
return
}
a = gc.Brcom(a)
@ -1352,22 +1339,22 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
regfree(&n2)
}
goto ret
return
def:
var n1 gc.Node
regalloc(&n1, n.Type, nil)
cgen(n, &n1)
var n2 gc.Node
gc.Nodconst(&n2, n.Type, 0)
gins(optoas(gc.OCMP, n.Type), &n1, &n2)
a = x86.AJNE
a := x86.AJNE
if !true_ {
a = x86.AJEQ
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
regfree(&n1)
goto ret
ret:
return
}
/*

View file

@ -786,14 +786,6 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
var cx gc.Node
var oldcx gc.Node
var rcx int
var tcount *gc.Type
a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
@ -813,7 +805,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
gmove(&n1, res)
regfree(&n1)
goto ret
return
}
if nl.Ullman >= gc.UINF {
@ -830,24 +822,27 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
nr = &n5
}
rcx = int(reg[x86.REG_CX])
rcx := int(reg[x86.REG_CX])
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n3 gc.Node
regalloc(&n3, tcount, &n1) // to clear high bits of CX
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
oldcx = gc.Node{}
oldcx := gc.Node{}
if rcx > 0 && !gc.Samereg(&cx, res) {
regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
@ -855,6 +850,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
cx.Type = tcount
var n2 gc.Node
if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
} else {
@ -900,8 +896,6 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
regfree(&n1)
regfree(&n2)
ret:
}
/*

View file

@ -1349,12 +1349,6 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
*a = obj.Addr{}
var o int
var n1 gc.Node
var oary [10]int64
var nn *gc.Node
var reg *gc.Node
var reg1 *gc.Node
switch n.Op {
case gc.OLITERAL:
if !gc.Isconst(n, gc.CTINT) {
@ -1364,118 +1358,108 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
if v >= 32000 || v <= -32000 {
break
}
goto lit
switch as {
default:
return false
case x86.AADDB,
x86.AADDW,
x86.AADDL,
x86.AADDQ,
x86.ASUBB,
x86.ASUBW,
x86.ASUBL,
x86.ASUBQ,
x86.AANDB,
x86.AANDW,
x86.AANDL,
x86.AANDQ,
x86.AORB,
x86.AORW,
x86.AORL,
x86.AORQ,
x86.AXORB,
x86.AXORW,
x86.AXORL,
x86.AXORQ,
x86.AINCB,
x86.AINCW,
x86.AINCL,
x86.AINCQ,
x86.ADECB,
x86.ADECW,
x86.ADECL,
x86.ADECQ,
x86.AMOVB,
x86.AMOVW,
x86.AMOVL,
x86.AMOVQ:
break
}
cleani += 2
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(n, a, 1)
return true
case gc.ODOT,
gc.ODOTPTR:
cleani += 2
reg = &clean[cleani-1]
reg := &clean[cleani-1]
reg1 := &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
goto odot
var nn *gc.Node
var oary [10]int64
o := gc.Dotoffset(n, oary[:], &nn)
if nn == nil {
sudoclean()
return false
}
if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(&n1, a, 1)
return true
}
regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
agen(nn, reg)
n1.Xoffset = oary[0]
} else {
cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatal("can't happen")
}
gins(movptr, &n1, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[i] + 1)
}
a.Type = obj.TYPE_NONE
a.Index = obj.TYPE_NONE
fixlargeoffset(&n1)
gc.Naddr(&n1, a, 1)
return true
case gc.OINDEX:
return false
}
return false
lit:
switch as {
default:
return false
case x86.AADDB,
x86.AADDW,
x86.AADDL,
x86.AADDQ,
x86.ASUBB,
x86.ASUBW,
x86.ASUBL,
x86.ASUBQ,
x86.AANDB,
x86.AANDW,
x86.AANDL,
x86.AANDQ,
x86.AORB,
x86.AORW,
x86.AORL,
x86.AORQ,
x86.AXORB,
x86.AXORW,
x86.AXORL,
x86.AXORQ,
x86.AINCB,
x86.AINCW,
x86.AINCL,
x86.AINCQ,
x86.ADECB,
x86.ADECW,
x86.ADECL,
x86.ADECQ,
x86.AMOVB,
x86.AMOVW,
x86.AMOVL,
x86.AMOVQ:
break
}
cleani += 2
reg = &clean[cleani-1]
reg1 = &clean[cleani-2]
reg.Op = gc.OEMPTY
reg1.Op = gc.OEMPTY
gc.Naddr(n, a, 1)
goto yes
odot:
o = gc.Dotoffset(n, oary[:], &nn)
if nn == nil {
goto no
}
if nn.Addable != 0 && o == 1 && oary[0] >= 0 {
// directly addressable set of DOTs
n1 := *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
gc.Naddr(&n1, a, 1)
goto yes
}
regalloc(reg, gc.Types[gc.Tptr], nil)
n1 = *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
agen(nn, reg)
n1.Xoffset = oary[0]
} else {
cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
for i := 1; i < o; i++ {
if oary[i] >= 0 {
gc.Fatal("can't happen")
}
gins(movptr, &n1, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[i] + 1)
}
a.Type = obj.TYPE_NONE
a.Index = obj.TYPE_NONE
fixlargeoffset(&n1)
gc.Naddr(&n1, a, 1)
goto yes
yes:
return true
no:
sudoclean()
return false
}

View file

@ -563,8 +563,7 @@ func subprop(r0 *gc.Flow) bool {
}
var info gc.ProgInfo
var r *gc.Flow
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
}
@ -595,7 +594,31 @@ func subprop(r0 *gc.Flow) bool {
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
goto gotit
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
fmt.Printf(" excise")
}
fmt.Printf("\n")
}
for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
p = r.Prog
copysub(&p.From, v1, v2, 1)
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v\n", r.Prog)
}
}
t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return true
}
if copyau(&p.From, v2) || copyau(&p.To, v2) {
@ -617,33 +640,6 @@ func subprop(r0 *gc.Flow) bool {
fmt.Printf("\tran off end; return 0\n")
}
return false
gotit:
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
fmt.Printf(" excise")
}
fmt.Printf("\n")
}
for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
p = r.Prog
copysub(&p.From, v1, v2, 1)
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v\n", r.Prog)
}
}
t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return true
}
/*

View file

@ -44,17 +44,14 @@ func PADDR(x uint32) uint32 {
var zeroes string
func needlib(name string) int {
var p string
var s *ld.LSym
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
p = fmt.Sprintf(".elfload.%s", name)
p := fmt.Sprintf(".elfload.%s", name)
s = ld.Linklookup(ld.Ctxt, p, 0)
s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
@ -74,11 +71,7 @@ func adddynrela(rela *ld.LSym, s *ld.LSym, r *ld.Reloc) {
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
var targ *ld.LSym
var rela *ld.LSym
var got *ld.LSym
targ = r.Sym
targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
@ -233,7 +226,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off))
if r.Siz == 8 {
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_X86_64_64))
@ -258,7 +251,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
// but we only need to support cgo and that's all it needs.
adddynsym(ld.Ctxt, targ)
got = ld.Linklookup(ld.Ctxt, ".got", 0)
got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Type = got.Type | ld.SSUB
s.Outer = got
s.Sub = got.Sub
@ -276,11 +269,9 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
var elfsym int32
ld.Thearch.Vput(uint64(sectoff))
elfsym = r.Xsym.Elfsym
elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
@ -337,9 +328,8 @@ func elfreloc1(r *ld.Reloc, sectoff int64) int {
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
var rs *ld.LSym
rs = r.Xsym
rs := r.Xsym
if rs.Type == ld.SHOSTOBJ || r.Type == ld.R_PCREL {
if rs.Dynid < 0 {
@ -406,11 +396,8 @@ func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
}
func elfsetupplt() {
var plt *ld.LSym
var got *ld.LSym
plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// pushq got+8(IP)
ld.Adduint8(ld.Ctxt, plt, 0xff)
@ -443,13 +430,9 @@ func addpltsym(s *ld.LSym) {
adddynsym(ld.Ctxt, s)
if ld.Iself {
var plt *ld.LSym
var got *ld.LSym
var rela *ld.LSym
plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
rela = ld.Linklookup(ld.Ctxt, ".rela.plt", 0)
plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
rela := ld.Linklookup(ld.Ctxt, ".rela.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
@ -491,10 +474,8 @@ func addpltsym(s *ld.LSym) {
// http://networkpx.blogspot.com/2009/09/about-lcdyldinfoonly-command.html
// has details about what we're avoiding.
var plt *ld.LSym
addgotsym(s)
plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
ld.Adduint32(ld.Ctxt, ld.Linklookup(ld.Ctxt, ".linkedit.plt", 0), uint32(s.Dynid))
@ -510,20 +491,17 @@ func addpltsym(s *ld.LSym) {
}
func addgotsym(s *ld.LSym) {
var got *ld.LSym
var rela *ld.LSym
if s.Got >= 0 {
return
}
adddynsym(ld.Ctxt, s)
got = ld.Linklookup(ld.Ctxt, ".got", 0)
got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint64(ld.Ctxt, got, 0)
if ld.Iself {
rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, got, int64(s.Got))
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_X86_64_GLOB_DAT))
ld.Adduint64(ld.Ctxt, rela, 0)
@ -535,10 +513,6 @@ func addgotsym(s *ld.LSym) {
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
var d *ld.LSym
var t int
var name string
if s.Dynid >= 0 {
return
}
@ -547,13 +521,13 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
d = ld.Linklookup(ctxt, ".dynsym", 0)
d := ld.Linklookup(ctxt, ".dynsym", 0)
name = s.Extname
name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
/* type */
t = ld.STB_GLOBAL << 4
t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
@ -595,14 +569,12 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
}
func adddynlib(lib string) {
var s *ld.LSym
if needlib(lib) == 0 {
return
}
if ld.Iself {
s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
@ -615,15 +587,6 @@ func adddynlib(lib string) {
}
func asmb() {
var magic int32
var i int
var vl int64
var symo int64
var dwarfoff int64
var machlink int64
var sect *ld.Section
var sym *ld.LSym
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
@ -638,7 +601,7 @@ func asmb() {
ld.Asmbelfsetup()
}
sect = ld.Segtext.Sect
sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
@ -664,13 +627,13 @@ func asmb() {
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
machlink = 0
machlink := int64(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
dwarfoff = ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND))
dwarfoff := ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND))
ld.Cseek(dwarfoff)
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
@ -708,7 +671,7 @@ func asmb() {
ld.Symsize = 0
ld.Spsize = 0
ld.Lcsize = 0
symo = 0
symo := int64(0)
if ld.Debug['s'] == 0 {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f sym\n", obj.Cputime())
@ -763,10 +726,10 @@ func asmb() {
ld.Asmplan9sym()
ld.Cflush()
sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
for i = 0; int32(i) < ld.Lcsize; i++ {
for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
@ -795,7 +758,7 @@ func asmb() {
switch ld.HEADTYPE {
default:
case ld.Hplan9: /* plan9 */
magic = 4*26*26 + 7
magic := int32(4*26*26 + 7)
magic |= 0x00008000 /* fat header */
ld.Lputb(uint32(magic)) /* magic */
@ -803,7 +766,7 @@ func asmb() {
ld.Lputb(uint32(ld.Segdata.Filelen))
ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
ld.Lputb(uint32(ld.Symsize)) /* nsyms */
vl = ld.Entryvalue()
vl := ld.Entryvalue()
ld.Lputb(PADDR(uint32(vl))) /* va of entry */
ld.Lputb(uint32(ld.Spsize)) /* sp offsets */
ld.Lputb(uint32(ld.Lcsize)) /* line offsets */

View file

@ -254,8 +254,15 @@ func cgen(n *gc.Node, res *gc.Node) {
case gc.OMINUS,
gc.OCOM:
a = optoas(int(n.Op), nl.Type)
goto uop
a := optoas(int(n.Op), nl.Type)
// unary
var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
gins(a, nil, &n1)
gmove(&n1, res)
return
// symmetric binary
case gc.OAND,
@ -270,7 +277,13 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
goto sbop
// symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
r := nl
nl = nr
nr = r
}
goto abop
// asymmetric binary
case gc.OSUB:
@ -443,13 +456,6 @@ func cgen(n *gc.Node, res *gc.Node) {
return
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || nl.Op == gc.OLITERAL {
r := nl
nl = nr
nr = r
}
abop: // asymmetric binary
if gc.Smallintconst(nr) {
var n1 gc.Node
@ -488,15 +494,6 @@ abop: // asymmetric binary
}
return
uop: // unary
var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
gins(a, nil, &n1)
gmove(&n1, res)
return
}
/*

View file

@ -199,7 +199,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(i386.AMOVL, ncon(0), &lo2)
gins(i386.AMOVL, ncon(0), &hi2)
splitclean()
goto out
return
}
if v >= 32 {
@ -215,7 +215,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(i386.AMOVL, ncon(0), &lo2)
splitclean()
splitclean()
goto out
return
}
// general shift
@ -296,7 +296,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
}
splitclean()
goto out
return
}
if v >= 32 {
@ -316,7 +316,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
}
splitclean()
splitclean()
goto out
return
}
// general shift
@ -482,7 +482,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
splitclean()
splitclean()
goto out
return
}
gins(i386.AMOVL, &lo1, &ax)
@ -500,8 +500,6 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(i386.AMOVL, &ax, &lo1)
gins(i386.AMOVL, &dx, &hi1)
splitclean()
out:
}
/*

View file

@ -934,7 +934,27 @@ func cgen_float387(n *gc.Node, res *gc.Node) {
gc.Nodreg(&f0, nl.Type, i386.REG_F0)
gc.Nodreg(&f1, n.Type, i386.REG_F0+1)
if nr != nil {
goto flt2
// binary
if nl.Ullman >= nr.Ullman {
cgen(nl, &f0)
if nr.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
} else {
cgen(nr, &f0)
gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
}
} else {
cgen(nr, &f0)
if nl.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
} else {
cgen(nl, &f0)
gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
}
}
gmove(&f0, res)
return
}
// unary
@ -945,28 +965,6 @@ func cgen_float387(n *gc.Node, res *gc.Node) {
}
gmove(&f0, res)
return
flt2: // binary
if nl.Ullman >= nr.Ullman {
cgen(nl, &f0)
if nr.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
} else {
cgen(nr, &f0)
gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
}
} else {
cgen(nr, &f0)
if nl.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
} else {
cgen(nl, &f0)
gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
}
}
gmove(&f0, res)
return
}
func cgen_floatsse(n *gc.Node, res *gc.Node) {
@ -1064,7 +1062,47 @@ func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
var n2 gc.Node
var ax gc.Node
if gc.Use_sse != 0 {
goto sse
if nl.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
var tmp gc.Node
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
}
var n2 gc.Node
regalloc(&n2, nr.Type, nil)
gmove(nr, &n2)
nr = &n2
if nl.Op != gc.OREGISTER {
var n3 gc.Node
regalloc(&n3, nl.Type, nil)
gmove(nl, &n3)
nl = &n3
}
if a == gc.OGE || a == gc.OGT {
// only < and <= work right with NaN; reverse if needed
r := nr
nr = nl
nl = r
a = gc.Brrev(a)
}
gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
if nl.Op == gc.OREGISTER {
regfree(nl)
}
regfree(nr)
goto ret
} else {
goto x87
}
@ -1118,47 +1156,6 @@ x87:
goto ret
sse:
if nl.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
var tmp gc.Node
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
}
regalloc(&n2, nr.Type, nil)
gmove(nr, &n2)
nr = &n2
if nl.Op != gc.OREGISTER {
var n3 gc.Node
regalloc(&n3, nl.Type, nil)
gmove(nl, &n3)
nl = &n3
}
if a == gc.OGE || a == gc.OGT {
// only < and <= work right with NaN; reverse if needed
r := nr
nr = nl
nl = r
a = gc.Brrev(a)
}
gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
if nl.Op == gc.OREGISTER {
regfree(nl)
}
regfree(nr)
ret:
if a == gc.OEQ {
// neither NE nor P

View file

@ -403,7 +403,48 @@ func foptoas(op int, t *gc.Type, flg int) int {
et := int(gc.Simtype[t.Etype])
if gc.Use_sse != 0 {
goto sse
switch uint32(op)<<16 | uint32(et) {
default:
gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
case gc.OCMP<<16 | gc.TFLOAT32:
a = i386.AUCOMISS
case gc.OCMP<<16 | gc.TFLOAT64:
a = i386.AUCOMISD
case gc.OAS<<16 | gc.TFLOAT32:
a = i386.AMOVSS
case gc.OAS<<16 | gc.TFLOAT64:
a = i386.AMOVSD
case gc.OADD<<16 | gc.TFLOAT32:
a = i386.AADDSS
case gc.OADD<<16 | gc.TFLOAT64:
a = i386.AADDSD
case gc.OSUB<<16 | gc.TFLOAT32:
a = i386.ASUBSS
case gc.OSUB<<16 | gc.TFLOAT64:
a = i386.ASUBSD
case gc.OMUL<<16 | gc.TFLOAT32:
a = i386.AMULSS
case gc.OMUL<<16 | gc.TFLOAT64:
a = i386.AMULSD
case gc.ODIV<<16 | gc.TFLOAT32:
a = i386.ADIVSS
case gc.ODIV<<16 | gc.TFLOAT64:
a = i386.ADIVSD
}
return a
}
// If we need Fpop, it means we're working on
@ -499,50 +540,6 @@ func foptoas(op int, t *gc.Type, flg int) int {
gc.Fatal("foptoas %v %v %#x", gc.Oconv(int(op), 0), gc.Tconv(t, 0), flg)
return 0
sse:
switch uint32(op)<<16 | uint32(et) {
default:
gc.Fatal("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
case gc.OCMP<<16 | gc.TFLOAT32:
a = i386.AUCOMISS
case gc.OCMP<<16 | gc.TFLOAT64:
a = i386.AUCOMISD
case gc.OAS<<16 | gc.TFLOAT32:
a = i386.AMOVSS
case gc.OAS<<16 | gc.TFLOAT64:
a = i386.AMOVSD
case gc.OADD<<16 | gc.TFLOAT32:
a = i386.AADDSS
case gc.OADD<<16 | gc.TFLOAT64:
a = i386.AADDSD
case gc.OSUB<<16 | gc.TFLOAT32:
a = i386.ASUBSS
case gc.OSUB<<16 | gc.TFLOAT64:
a = i386.ASUBSD
case gc.OMUL<<16 | gc.TFLOAT32:
a = i386.AMULSS
case gc.OMUL<<16 | gc.TFLOAT64:
a = i386.AMULSD
case gc.ODIV<<16 | gc.TFLOAT32:
a = i386.ADIVSS
case gc.ODIV<<16 | gc.TFLOAT64:
a = i386.ADIVSD
}
return a
}
var resvd = []int{
@ -928,7 +925,9 @@ func gmove(f *gc.Node, t *gc.Node) {
switch uint32(ft)<<16 | uint32(tt) {
default:
goto fatal
// should not happen
gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
return
/*
* integer copy and truncate
@ -1164,10 +1163,6 @@ hard:
gmove(&r1, t)
regfree(&r1)
return
// should not happen
fatal:
gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
func floatmove(f *gc.Node, t *gc.Node) {

View file

@ -371,8 +371,7 @@ func subprop(r0 *gc.Flow) bool {
return false
}
var info gc.ProgInfo
var r *gc.Flow
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\t? %v\n", r.Prog)
}
@ -393,7 +392,31 @@ func subprop(r0 *gc.Flow) bool {
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
goto gotit
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
fmt.Printf(" excise")
}
fmt.Printf("\n")
}
for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
p = r.Prog
copysub(&p.From, v1, v2, 1)
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v\n", r.Prog)
}
}
t := int(v1.Reg)
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return true
}
if copyau(&p.From, v2) || copyau(&p.To, v2) {
@ -405,33 +428,6 @@ func subprop(r0 *gc.Flow) bool {
}
return false
gotit:
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
if p.From.Type == v2.Type && p.From.Reg == v2.Reg {
fmt.Printf(" excise")
}
fmt.Printf("\n")
}
for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
p = r.Prog
copysub(&p.From, v1, v2, 1)
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v\n", r.Prog)
}
}
t := int(v1.Reg)
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return true
}
/*

View file

@ -38,17 +38,14 @@ import (
import "cmd/internal/ld"
func needlib(name string) int {
var p string
var s *ld.LSym
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
p = fmt.Sprintf(".dynlib.%s", name)
p := fmt.Sprintf(".dynlib.%s", name)
s = ld.Linklookup(ld.Ctxt, p, 0)
s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
@ -66,11 +63,7 @@ func adddynrela(rela *ld.LSym, s *ld.LSym, r *ld.Reloc) {
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
var targ *ld.LSym
var rel *ld.LSym
var got *ld.LSym
targ = r.Sym
targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
@ -210,7 +203,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
}
if ld.Iself {
adddynsym(ld.Ctxt, targ)
rel = ld.Linklookup(ld.Ctxt, ".rel", 0)
rel := ld.Linklookup(ld.Ctxt, ".rel", 0)
ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off))
ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_386_32))
r.Type = ld.R_CONST // write r->add during relocsym
@ -231,7 +224,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
// but we only need to support cgo and that's all it needs.
adddynsym(ld.Ctxt, targ)
got = ld.Linklookup(ld.Ctxt, ".got", 0)
got := ld.Linklookup(ld.Ctxt, ".got", 0)
s.Type = got.Type | ld.SSUB
s.Outer = got
s.Sub = got.Sub
@ -249,11 +242,9 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
}
func elfreloc1(r *ld.Reloc, sectoff int64) int {
var elfsym int32
ld.Thearch.Lput(uint32(sectoff))
elfsym = r.Xsym.Elfsym
elfsym := r.Xsym.Elfsym
switch r.Type {
default:
return -1
@ -287,9 +278,8 @@ func elfreloc1(r *ld.Reloc, sectoff int64) int {
func machoreloc1(r *ld.Reloc, sectoff int64) int {
var v uint32
var rs *ld.LSym
rs = r.Xsym
rs := r.Xsym
if rs.Type == ld.SHOSTOBJ {
if rs.Dynid < 0 {
@ -365,11 +355,8 @@ func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
}
func elfsetupplt() {
var plt *ld.LSym
var got *ld.LSym
plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
got = ld.Linklookup(ld.Ctxt, ".got.plt", 0)
plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
got := ld.Linklookup(ld.Ctxt, ".got.plt", 0)
if plt.Size == 0 {
// pushl got+4
ld.Adduint8(ld.Ctxt, plt, 0xff)
@ -395,10 +382,6 @@ func elfsetupplt() {
}
func addpltsym(ctxt *ld.Link, s *ld.LSym) {
var plt *ld.LSym
var got *ld.LSym
var rel *ld.LSym
if s.Plt >= 0 {
return
}
@ -406,9 +389,9 @@ func addpltsym(ctxt *ld.Link, s *ld.LSym) {
adddynsym(ctxt, s)
if ld.Iself {
plt = ld.Linklookup(ctxt, ".plt", 0)
got = ld.Linklookup(ctxt, ".got.plt", 0)
rel = ld.Linklookup(ctxt, ".rel.plt", 0)
plt := ld.Linklookup(ctxt, ".plt", 0)
got := ld.Linklookup(ctxt, ".got.plt", 0)
rel := ld.Linklookup(ctxt, ".rel.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
@ -441,9 +424,7 @@ func addpltsym(ctxt *ld.Link, s *ld.LSym) {
} else if ld.HEADTYPE == ld.Hdarwin {
// Same laziness as in 6l.
var plt *ld.LSym
plt = ld.Linklookup(ctxt, ".plt", 0)
plt := ld.Linklookup(ctxt, ".plt", 0)
addgotsym(ctxt, s)
@ -461,20 +442,17 @@ func addpltsym(ctxt *ld.Link, s *ld.LSym) {
}
func addgotsym(ctxt *ld.Link, s *ld.LSym) {
var got *ld.LSym
var rel *ld.LSym
if s.Got >= 0 {
return
}
adddynsym(ctxt, s)
got = ld.Linklookup(ctxt, ".got", 0)
got := ld.Linklookup(ctxt, ".got", 0)
s.Got = int32(got.Size)
ld.Adduint32(ctxt, got, 0)
if ld.Iself {
rel = ld.Linklookup(ctxt, ".rel", 0)
rel := ld.Linklookup(ctxt, ".rel", 0)
ld.Addaddrplus(ctxt, rel, got, int64(s.Got))
ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_386_GLOB_DAT))
} else if ld.HEADTYPE == ld.Hdarwin {
@ -485,10 +463,6 @@ func addgotsym(ctxt *ld.Link, s *ld.LSym) {
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
var d *ld.LSym
var t int
var name string
if s.Dynid >= 0 {
return
}
@ -497,10 +471,10 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
d = ld.Linklookup(ctxt, ".dynsym", 0)
d := ld.Linklookup(ctxt, ".dynsym", 0)
/* name */
name = s.Extname
name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
@ -515,7 +489,7 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
ld.Adduint32(ctxt, d, 0)
/* type */
t = ld.STB_GLOBAL << 4
t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
@ -541,14 +515,12 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
}
func adddynlib(lib string) {
var s *ld.LSym
if needlib(lib) == 0 {
return
}
if ld.Iself {
s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
@ -561,14 +533,6 @@ func adddynlib(lib string) {
}
func asmb() {
var magic int32
var symo uint32
var dwarfoff uint32
var machlink uint32
var sect *ld.Section
var sym *ld.LSym
var i int
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
@ -578,7 +542,7 @@ func asmb() {
ld.Asmbelfsetup()
}
sect = ld.Segtext.Sect
sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
@ -604,13 +568,13 @@ func asmb() {
ld.Cseek(int64(ld.Segdata.Fileoff))
ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
machlink = 0
machlink := uint32(0)
if ld.HEADTYPE == ld.Hdarwin {
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
}
dwarfoff = uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
dwarfoff := uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND)))
ld.Cseek(int64(dwarfoff))
ld.Segdwarf.Fileoff = uint64(ld.Cpos())
@ -623,7 +587,7 @@ func asmb() {
ld.Symsize = 0
ld.Spsize = 0
ld.Lcsize = 0
symo = 0
symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
@ -673,10 +637,10 @@ func asmb() {
ld.Asmplan9sym()
ld.Cflush()
sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
for i = 0; int32(i) < ld.Lcsize; i++ {
for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}
@ -704,7 +668,7 @@ func asmb() {
switch ld.HEADTYPE {
default:
case ld.Hplan9: /* plan9 */
magic = 4*11*11 + 7
magic := int32(4*11*11 + 7)
ld.Lputb(uint32(magic)) /* magic */
ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */

View file

@ -26,13 +26,8 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Dump("cgen-res", res)
}
var a int
var nr *gc.Node
var nl *gc.Node
var n1 gc.Node
var n2 gc.Node
if n == nil || n.Type == nil {
goto ret
return
}
if res == nil || res.Type == nil {
@ -57,7 +52,7 @@ func cgen(n *gc.Node, res *gc.Node) {
} else {
gc.Cgen_slice(n, res)
}
goto ret
return
case gc.OEFACE:
if res.Op != gc.ONAME || res.Addable == 0 {
@ -68,7 +63,7 @@ func cgen(n *gc.Node, res *gc.Node) {
} else {
gc.Cgen_eface(n, res)
}
goto ret
return
}
if n.Ullman >= gc.UINF {
@ -80,7 +75,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Tempname(&n1, n.Type)
cgen(n, &n1)
cgen(&n1, res)
goto ret
return
}
}
@ -89,7 +84,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
sgen(n, res, n.Type.Width)
goto ret
return
}
if res.Addable == 0 {
@ -105,7 +100,7 @@ func cgen(n *gc.Node, res *gc.Node) {
cgen(&n1, res)
regfree(&n1)
goto ret
return
}
var f int
@ -115,7 +110,7 @@ func cgen(n *gc.Node, res *gc.Node) {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
return
}
f = 1 // gen thru register
@ -148,7 +143,7 @@ func cgen(n *gc.Node, res *gc.Node) {
fmt.Printf("%v [ignore previous line]\n", p1)
}
sudoclean()
goto ret
return
}
}
@ -157,7 +152,7 @@ func cgen(n *gc.Node, res *gc.Node) {
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
goto ret
return
}
// update addressability for string, slice
@ -181,7 +176,7 @@ func cgen(n *gc.Node, res *gc.Node) {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
return
}
// if both are addressable, move
@ -196,11 +191,11 @@ func cgen(n *gc.Node, res *gc.Node) {
regfree(&n1)
}
goto ret
return
}
nl = n.Left
nr = n.Right
nl := n.Left
nr := n.Right
if nl != nil && nl.Ullman >= gc.UINF {
if nr != nil && nr.Ullman >= gc.UINF {
@ -210,7 +205,7 @@ func cgen(n *gc.Node, res *gc.Node) {
n2 := *n
n2.Left = &n1
cgen(&n2, res)
goto ret
return
}
}
@ -231,7 +226,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
sudoclean()
goto ret
return
}
}
@ -239,6 +234,7 @@ func cgen(n *gc.Node, res *gc.Node) {
// OGE, OLE, and ONE ourselves.
// if(nl != N && isfloat[n->type->etype] && isfloat[nl->type->etype]) goto flt;
var a int
switch n.Op {
default:
gc.Dump("cgen", n)
@ -263,11 +259,11 @@ func cgen(n *gc.Node, res *gc.Node) {
bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
return
case gc.OPLUS:
cgen(nl, res)
goto ret
return
// unary
case gc.OCOM:
@ -281,7 +277,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gins(a, &n2, &n1)
gmove(&n1, res)
regfree(&n1)
goto ret
return
case gc.OMINUS:
if gc.Isfloat[nl.Type.Etype] != 0 {
@ -291,8 +287,16 @@ func cgen(n *gc.Node, res *gc.Node) {
goto sbop
}
a = optoas(int(n.Op), nl.Type)
goto uop
a := optoas(int(n.Op), nl.Type)
// unary
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
gins(a, nil, &n1)
gmove(&n1, res)
regfree(&n1)
return
// symmetric binary
case gc.OAND,
@ -331,7 +335,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gmove(&n2, res)
regfree(&n2)
regfree(&n1)
goto ret
return
}
}
@ -523,7 +527,7 @@ func cgen(n *gc.Node, res *gc.Node) {
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
goto ret
return
/*
* put simplest on right - we'll generate into left
@ -549,6 +553,8 @@ sbop: // symmetric binary
}
abop: // asymmetric binary
var n1 gc.Node
var n2 gc.Node
if nl.Ullman >= nr.Ullman {
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
@ -610,18 +616,7 @@ abop: // asymmetric binary
if n2.Op != gc.OLITERAL {
regfree(&n2)
}
goto ret
uop: // unary
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
gins(a, nil, &n1)
gmove(&n1, res)
regfree(&n1)
goto ret
ret:
return
}
/*
@ -887,7 +882,6 @@ func agen(n *gc.Node, res *gc.Node) {
n = n.Left
}
var nl *gc.Node
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
@ -906,7 +900,7 @@ func agen(n *gc.Node, res *gc.Node) {
gins(ppc64.AMOVD, &n3, &n2)
gmove(&n2, res)
regfree(&n2)
goto ret
return
}
if n.Addable != 0 {
@ -918,10 +912,10 @@ func agen(n *gc.Node, res *gc.Node) {
gins(ppc64.AMOVD, &n1, &n2)
gmove(&n2, res)
regfree(&n2)
goto ret
return
}
nl = n.Left
nl := n.Left
switch n.Op {
default:
@ -999,8 +993,6 @@ func agen(n *gc.Node, res *gc.Node) {
ginsadd(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, res)
}
}
ret:
}
/*
@ -1126,24 +1118,21 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
gc.Genlist(n.Ninit)
}
var et int
var nl *gc.Node
var nr *gc.Node
if n.Type == nil {
gc.Convlit(&n, gc.Types[gc.TBOOL])
if n.Type == nil {
goto ret
return
}
}
et = int(n.Type.Etype)
et := int(n.Type.Etype)
if et != gc.TBOOL {
gc.Yyerror("cgen: bad type %v for %v", gc.Tconv(n.Type, 0), gc.Oconv(int(n.Op), 0))
gc.Patch(gins(obj.AEND, nil, nil), to)
goto ret
return
}
nr = nil
nr := (*gc.Node)(nil)
for n.Op == gc.OCONVNOP {
n = n.Left
@ -1152,6 +1141,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
}
var nl *gc.Node
switch n.Op {
default:
var n1 gc.Node
@ -1166,14 +1156,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
gc.Patch(gc.Gbranch(a, n.Type, likely), to)
regfree(&n1)
goto ret
return
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
}
goto ret
return
case gc.OANDAND,
gc.OOROR:
@ -1191,7 +1181,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
bgen(n.Right, true_, likely, to)
}
goto ret
return
case gc.OEQ,
gc.ONE,
@ -1201,7 +1191,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
gc.OGE:
nr = n.Right
if nr == nil || nr.Type == nil {
goto ret
return
}
fallthrough
@ -1209,14 +1199,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
nl = n.Left
if nl == nil || nl.Type == nil {
goto ret
return
}
}
switch n.Op {
case gc.ONOT:
bgen(nl, !true_, likely, to)
goto ret
return
case gc.OEQ,
gc.ONE,
@ -1238,7 +1228,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
n.Ninit = ll
gc.Patch(gc.Gbranch(ppc64.ABR, nil, 0), to)
gc.Patch(p2, gc.Pc)
goto ret
return
}
a = gc.Brcom(a)
@ -1365,9 +1355,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
regfree(&n2)
}
goto ret
ret:
return
}
/*

View file

@ -572,10 +572,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// TODO(minux): enable division by magic multiply (also need to fix longmod below)
//if(nr->op != OLITERAL)
goto longdiv
// division and mod using (slow) hardware instruction
longdiv:
dodiv(op, nl, nr, res)
return
@ -639,11 +636,6 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl >> nr
*/
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
var tcount *gc.Type
a := int(optoas(op, nl.Type))
if nr.Op == gc.OLITERAL {
@ -663,7 +655,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
gmove(&n1, res)
regfree(&n1)
goto ret
return
}
if nl.Ullman >= gc.UINF {
@ -683,15 +675,18 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// Allow either uint32 or uint64 as shift type,
// to avoid unnecessary conversion from uint32 to uint64
// just to do the comparison.
tcount = gc.Types[gc.Simtype[nr.Type.Etype]]
tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
if tcount.Etype < gc.TUINT32 {
tcount = gc.Types[gc.TUINT32]
}
var n1 gc.Node
regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n3 gc.Node
regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
@ -728,8 +723,6 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
regfree(&n1)
regfree(&n2)
ret:
}
func clearfat(nl *gc.Node) {
@ -759,9 +752,8 @@ func clearfat(nl *gc.Node) {
agen(nl, &dst)
var boff uint64
var p *obj.Prog
if q > 128 {
p = gins(ppc64.ASUB, nil, &dst)
p := gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
@ -784,7 +776,7 @@ func clearfat(nl *gc.Node) {
// The loop leaves R3 on the last zeroed dword
boff = 8
} else if q >= 4 {
p = gins(ppc64.ASUB, nil, &dst)
p := gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
f := (*gc.Node)(gc.Sysfunc("duffzero"))
@ -797,6 +789,7 @@ func clearfat(nl *gc.Node) {
// duffzero leaves R3 on the last zeroed dword
boff = 8
} else {
var p *obj.Prog
for t := uint64(0); t < q; t++ {
p = gins(ppc64.AMOVD, &r0, &dst)
p.To.Type = obj.TYPE_MEM
@ -806,6 +799,7 @@ func clearfat(nl *gc.Node) {
boff = 8 * q
}
var p *obj.Prog
for t := uint64(0); t < c; t++ {
p = gins(ppc64.AMOVB, &r0, &dst)
p.To.Type = obj.TYPE_MEM

View file

@ -407,9 +407,8 @@ func subprop(r0 *gc.Flow) bool {
if !regtyp(v2) {
return false
}
var r *gc.Flow
var info gc.ProgInfo
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
break
}
@ -425,7 +424,32 @@ func subprop(r0 *gc.Flow) bool {
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
if p.To.Type == v1.Type {
if p.To.Reg == v1.Reg {
goto gotit
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
if p.From.Type == v2.Type {
fmt.Printf(" excise")
}
fmt.Printf("\n")
}
for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
p = r.Prog
copysub(&p.From, v1, v2, 1)
copysub1(p, v1, v2, 1)
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v\n", r.Prog)
}
}
t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return true
}
}
}
@ -439,34 +463,6 @@ func subprop(r0 *gc.Flow) bool {
}
return false
gotit:
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
if p.From.Type == v2.Type {
fmt.Printf(" excise")
}
fmt.Printf("\n")
}
for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
p = r.Prog
copysub(&p.From, v1, v2, 1)
copysub1(p, v1, v2, 1)
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v\n", r.Prog)
}
}
t := int(int(v1.Reg))
v1.Reg = v2.Reg
v2.Reg = int16(t)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return true
}
/*

View file

@ -39,17 +39,14 @@ import (
import "cmd/internal/ld"
func needlib(name string) int {
var p string
var s *ld.LSym
if name[0] == '\x00' {
return 0
}
/* reuse hash code in symbol table */
p = fmt.Sprintf(".dynlib.%s", name)
p := fmt.Sprintf(".dynlib.%s", name)
s = ld.Linklookup(ld.Ctxt, p, 0)
s := ld.Linklookup(ld.Ctxt, p, 0)
if s.Type == 0 {
s.Type = 100 // avoid SDATA, etc.
@ -163,16 +160,13 @@ func gentext() {
// Construct a call stub in stub that calls symbol targ via its PLT
// entry.
func gencallstub(abicase int, stub *ld.LSym, targ *ld.LSym) {
var plt *ld.LSym
var r *ld.Reloc
if abicase != 1 {
// If we see R_PPC64_TOCSAVE or R_PPC64_REL24_NOTOC
// relocations, we'll need to implement cases 2 and 3.
log.Fatalf("gencallstub only implements case 1 calls")
}
plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
stub.Type = ld.STEXT
@ -180,7 +174,7 @@ func gencallstub(abicase int, stub *ld.LSym, targ *ld.LSym) {
ld.Adduint32(ld.Ctxt, stub, 0xf8410018) // std r2,24(r1)
// Load the function pointer from the PLT.
r = ld.Addrel(stub)
r := ld.Addrel(stub)
r.Off = int32(stub.Size)
r.Sym = plt
@ -214,10 +208,7 @@ func adddynrela(rel *ld.LSym, s *ld.LSym, r *ld.Reloc) {
}
func adddynrel(s *ld.LSym, r *ld.Reloc) {
var targ *ld.LSym
var rela *ld.LSym
targ = r.Sym
targ := r.Sym
ld.Ctxt.Cursym = s
switch r.Type {
@ -251,7 +242,7 @@ func adddynrel(s *ld.LSym, r *ld.Reloc) {
// These happen in .toc sections
adddynsym(ld.Ctxt, targ)
rela = ld.Linklookup(ld.Ctxt, ".rela", 0)
rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
ld.Addaddrplus(ld.Ctxt, rela, s, int64(r.Off))
ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(targ.Dynid), ld.R_PPC64_ADDR64))
ld.Adduint64(ld.Ctxt, rela, uint64(r.Add))
@ -325,9 +316,7 @@ func elfreloc1(r *ld.Reloc, sectoff int64) int {
}
func elfsetupplt() {
var plt *ld.LSym
plt = ld.Linklookup(ld.Ctxt, ".plt", 0)
plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
if plt.Size == 0 {
// The dynamic linker stores the address of the
// dynamic resolver and the DSO identifier in the two
@ -360,10 +349,6 @@ func symtoc(s *ld.LSym) int64 {
}
func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
var o1 uint32
var o2 uint32
var t int64
if ld.Linkmode == ld.LinkExternal {
// TODO(minux): translate R_ADDRPOWER and R_CALLPOWER into standard ELF relocations.
// R_ADDRPOWER corresponds to R_PPC_ADDR16_HA and R_PPC_ADDR16_LO.
@ -386,9 +371,9 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
// The encoding of the immediate x<<16 + y,
// where x is the low 16 bits of the first instruction and y is the low 16
// bits of the second. Both x and y are signed (int16, not uint16).
o1 = uint32(r.Add >> 32)
o2 = uint32(r.Add)
t = ld.Symaddr(r.Sym)
o1 := uint32(r.Add >> 32)
o2 := uint32(r.Add)
t := ld.Symaddr(r.Sym)
if t < 0 {
ld.Ctxt.Diag("relocation for %s is too big (>=2G): %d", s.Name, ld.Symaddr(r.Sym))
}
@ -410,13 +395,14 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
case ld.R_CALLPOWER:
// Bits 6 through 29 = (S + A - P) >> 2
var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off:])
} else {
o1 = ld.Le32(s.P[r.Off:])
}
t = ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off))
t := ld.Symaddr(r.Sym) + r.Add - (s.Value + int64(r.Off))
if t&3 != 0 {
ld.Ctxt.Diag("relocation for %s+%d is not aligned: %d", r.Sym.Name, r.Off, t)
}
@ -439,7 +425,6 @@ func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
}
func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
var o1 uint32
switch r.Variant & ld.RV_TYPE_MASK {
default:
ld.Diag("unexpected relocation variant %d", r.Variant)
@ -452,6 +437,7 @@ func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
if r.Variant&ld.RV_CHECK_OVERFLOW != 0 {
// Whether to check for signed or unsigned
// overflow depends on the instruction
var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off-2:])
} else {
@ -485,6 +471,7 @@ func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
if r.Variant&ld.RV_CHECK_OVERFLOW != 0 {
// Whether to check for signed or unsigned
// overflow depends on the instruction
var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = ld.Be32(s.P[r.Off-2:])
} else {
@ -508,6 +495,7 @@ func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
return int64(int16(t))
case ld.RV_POWER_DS:
var o1 uint32
if ld.Ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = uint32(ld.Be16(s.P[r.Off:]))
} else {
@ -535,23 +523,18 @@ func addpltsym(ctxt *ld.Link, s *ld.LSym) {
adddynsym(ctxt, s)
if ld.Iself {
var plt *ld.LSym
var rela *ld.LSym
var glink *ld.LSym
var r *ld.Reloc
plt = ld.Linklookup(ctxt, ".plt", 0)
rela = ld.Linklookup(ctxt, ".rela.plt", 0)
plt := ld.Linklookup(ctxt, ".plt", 0)
rela := ld.Linklookup(ctxt, ".rela.plt", 0)
if plt.Size == 0 {
elfsetupplt()
}
// Create the glink resolver if necessary
glink = ensureglinkresolver()
glink := ensureglinkresolver()
// Write symbol resolver stub (just a branch to the
// glink resolver stub)
r = ld.Addrel(glink)
r := ld.Addrel(glink)
r.Sym = glink
r.Off = int32(glink.Size)
@ -579,11 +562,7 @@ func addpltsym(ctxt *ld.Link, s *ld.LSym) {
// Generate the glink resolver stub if necessary and return the .glink section
func ensureglinkresolver() *ld.LSym {
var glink *ld.LSym
var s *ld.LSym
var r *ld.Reloc
glink = ld.Linklookup(ld.Ctxt, ".glink", 0)
glink := ld.Linklookup(ld.Ctxt, ".glink", 0)
if glink.Size != 0 {
return glink
}
@ -610,7 +589,7 @@ func ensureglinkresolver() *ld.LSym {
ld.Adduint32(ld.Ctxt, glink, 0x7800f082) // srdi r0,r0,2
// r11 = address of the first byte of the PLT
r = ld.Addrel(glink)
r := ld.Addrel(glink)
r.Off = int32(glink.Size)
r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0)
@ -636,7 +615,7 @@ func ensureglinkresolver() *ld.LSym {
// Add DT_PPC64_GLINK .dynamic entry, which points to 32 bytes
// before the first symbol resolver stub.
s = ld.Linklookup(ld.Ctxt, ".dynamic", 0)
s := ld.Linklookup(ld.Ctxt, ".dynamic", 0)
ld.Elfwritedynentsymplus(s, ld.DT_PPC64_GLINK, glink, glink.Size-32)
@ -644,10 +623,6 @@ func ensureglinkresolver() *ld.LSym {
}
func adddynsym(ctxt *ld.Link, s *ld.LSym) {
var d *ld.LSym
var t int
var name string
if s.Dynid >= 0 {
return
}
@ -656,13 +631,13 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
s.Dynid = int32(ld.Nelfsym)
ld.Nelfsym++
d = ld.Linklookup(ctxt, ".dynsym", 0)
d := ld.Linklookup(ctxt, ".dynsym", 0)
name = s.Extname
name := s.Extname
ld.Adduint32(ctxt, d, uint32(ld.Addstring(ld.Linklookup(ctxt, ".dynstr", 0), name)))
/* type */
t = ld.STB_GLOBAL << 4
t := ld.STB_GLOBAL << 4
if s.Cgoexport != 0 && s.Type&ld.SMASK == ld.STEXT {
t |= ld.STT_FUNC
@ -696,14 +671,12 @@ func adddynsym(ctxt *ld.Link, s *ld.LSym) {
}
func adddynlib(lib string) {
var s *ld.LSym
if needlib(lib) == 0 {
return
}
if ld.Iself {
s = ld.Linklookup(ld.Ctxt, ".dynstr", 0)
s := ld.Linklookup(ld.Ctxt, ".dynstr", 0)
if s.Size == 0 {
ld.Addstring(s, "")
}
@ -714,11 +687,6 @@ func adddynlib(lib string) {
}
func asmb() {
var symo uint32
var sect *ld.Section
var sym *ld.LSym
var i int
if ld.Debug['v'] != 0 {
fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
}
@ -728,7 +696,7 @@ func asmb() {
ld.Asmbelfsetup()
}
sect = ld.Segtext.Sect
sect := ld.Segtext.Sect
ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
for sect = sect.Next; sect != nil; sect = sect.Next {
@ -758,7 +726,7 @@ func asmb() {
ld.Symsize = 0
ld.Lcsize = 0
symo = 0
symo := uint32(0)
if ld.Debug['s'] == 0 {
// TODO: rationalize
if ld.Debug['v'] != 0 {
@ -801,10 +769,10 @@ func asmb() {
ld.Asmplan9sym()
ld.Cflush()
sym = ld.Linklookup(ld.Ctxt, "pclntab", 0)
sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
if sym != nil {
ld.Lcsize = int32(len(sym.P))
for i = 0; int32(i) < ld.Lcsize; i++ {
for i := 0; int32(i) < ld.Lcsize; i++ {
ld.Cput(uint8(sym.P[i]))
}

View file

@ -163,11 +163,9 @@ func usage() {
}
func Main() {
var p string
// Allow GOARCH=Thestring or GOARCH=Thestringsuffix,
// but not other values.
p = obj.Getgoarch()
p := obj.Getgoarch()
if !strings.HasPrefix(p, Thestring) {
log.Fatalf("cannot use %cc with GOARCH=%s", Thechar, p)
@ -219,8 +217,6 @@ func Main() {
}
func assemble(file string) int {
var i int
if outfile == "" {
outfile = strings.TrimSuffix(filepath.Base(file), ".s") + "." + string(Thechar)
}
@ -235,6 +231,7 @@ func assemble(file string) int {
fmt.Fprintf(&obuf, "go object %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion())
fmt.Fprintf(&obuf, "!\n")
var i int
for Pass = 1; Pass <= 2; Pass++ {
pinit(file)
for i = 0; i < len(Dlist); i++ {

View file

@ -45,61 +45,50 @@ import (
* common code for all the assemblers
*/
func pragpack() {
for getnsc() != '\n' {
}
}
func pragvararg() {
for getnsc() != '\n' {
}
}
func pragcgo(name string) {
for getnsc() != '\n' {
}
}
func pragfpround() {
for getnsc() != '\n' {
}
}
func pragtextflag() {
for getnsc() != '\n' {
}
}
func pragdataflag() {
for getnsc() != '\n' {
}
}
func pragprofile() {
for getnsc() != '\n' {
}
}
func pragincomplete() {
for getnsc() != '\n' {
}
}
func setinclude(p string) {
var i int
if p == "" {
return
}
for i = 1; i < len(include); i++ {
for i := 1; i < len(include); i++ {
if p == include[i] {
return
}
@ -117,9 +106,7 @@ func errorexit() {
}
func pushio() {
var i *Io
i = iostack
i := iostack
if i == nil {
Yyerror("botch in pushio")
errorexit()
@ -129,10 +116,9 @@ func pushio() {
}
func newio() {
var i *Io
var pushdepth int = 0
i = iofree
i := iofree
if i == nil {
pushdepth++
if pushdepth > 1000 {
@ -149,9 +135,7 @@ func newio() {
}
func newfile(s string, f *os.File) {
var i *Io
i = ionext
i := ionext
i.Link = iostack
iostack = i
i.F = f
@ -175,16 +159,13 @@ func Settext(s *obj.LSym) {
}
func LabelLookup(s *Sym) *Sym {
var p string
var lab *Sym
if thetext == nil {
s.Labelname = s.Name
return s
}
p = string(fmt.Sprintf("%s.%s", thetext.Name, s.Name))
lab = Lookup(p)
p := string(fmt.Sprintf("%s.%s", thetext.Name, s.Name))
lab := Lookup(p)
lab.Labelname = s.Name
return lab
@ -249,11 +230,10 @@ type Yylval struct {
}
func Yylex(yylval *Yylval) int {
var c int
var c1 int
var s *Sym
c = peekc
c := peekc
if c != IGN {
peekc = IGN
goto l1
@ -282,7 +262,48 @@ l1:
goto aloop
}
if isdigit(c) {
goto tnum
yybuf.Reset()
if c != '0' {
goto dc
}
yybuf.WriteByte(byte(c))
c = GETC()
c1 = 3
if c == 'x' || c == 'X' {
c1 = 4
c = GETC()
} else if c < '0' || c > '7' {
goto dc
}
yylval.Lval = 0
for {
if c >= '0' && c <= '9' {
if c > '7' && c1 == 3 {
break
}
yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
yylval.Lval += int64(c) - '0'
c = GETC()
continue
}
if c1 == 3 {
break
}
if c >= 'A' && c <= 'F' {
c += 'a' - 'A'
}
if c >= 'a' && c <= 'f' {
yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
yylval.Lval += int64(c) - 'a' + 10
c = GETC()
continue
}
break
}
goto ncu
}
switch c {
case '\n':
@ -458,50 +479,6 @@ aloop:
yylval.Sval = last
return int(s.Type)
tnum:
yybuf.Reset()
if c != '0' {
goto dc
}
yybuf.WriteByte(byte(c))
c = GETC()
c1 = 3
if c == 'x' || c == 'X' {
c1 = 4
c = GETC()
} else if c < '0' || c > '7' {
goto dc
}
yylval.Lval = 0
for {
if c >= '0' && c <= '9' {
if c > '7' && c1 == 3 {
break
}
yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
yylval.Lval += int64(c) - '0'
c = GETC()
continue
}
if c1 == 3 {
break
}
if c >= 'A' && c <= 'F' {
c += 'a' - 'A'
}
if c >= 'a' && c <= 'f' {
yylval.Lval = int64(uint64(yylval.Lval) << uint(c1))
yylval.Lval += int64(c) - 'a' + 10
c = GETC()
continue
}
break
}
goto ncu
dc:
for {
if !(isdigit(c)) {
@ -529,9 +506,7 @@ ncu:
}
func getc() int {
var c int
c = peekc
c := peekc
if c != IGN {
peekc = IGN
if c == '\n' {
@ -571,11 +546,10 @@ func unget(c int) {
}
func escchar(e int) int {
var c int
var l int
loop:
c = getc()
c := getc()
if c == '\n' {
Yyerror("newline in string")
return EOF
@ -643,11 +617,10 @@ func pinit(f string) {
}
func filbuf() int {
var i *Io
var n int
loop:
i = iostack
i := iostack
if i == nil {
return EOF
}
@ -705,11 +678,10 @@ func prfile(l int32) {
}
func GETC() int {
var c int
if len(fi.P) == 0 {
return filbuf()
}
c = int(fi.P[0])
c := int(fi.P[0])
fi.P = fi.P[1:]
return c
}

View file

@ -43,14 +43,11 @@ const (
)
func getnsn() int32 {
var n int32
var c int
c = getnsc()
c := getnsc()
if c < '0' || c > '9' {
return -1
}
n = 0
n := int32(0)
for c >= '0' && c <= '9' {
n = n*10 + int32(c) - '0'
c = getc()
@ -61,9 +58,7 @@ func getnsn() int32 {
}
func getsym() *Sym {
var c int
c = getnsc()
c := getnsc()
if !isalpha(c) && c != '_' && c < 0x80 {
unget(c)
return nil
@ -84,15 +79,12 @@ func getsym() *Sym {
}
func getsymdots(dots *int) *Sym {
var c int
var s *Sym
s = getsym()
s := getsym()
if s != nil {
return s
}
c = getnsc()
c := getnsc()
if c != '.' {
unget(c)
return nil
@ -153,10 +145,9 @@ func getcom() int {
func dodefine(cp string) {
var s *Sym
var p string
if i := strings.Index(cp, "="); i >= 0 {
p = cp[i+1:]
p := cp[i+1:]
cp = cp[:i]
s = Lookup(cp)
s.Macro = &Macro{Text: p}
@ -186,14 +177,11 @@ var mactab = []struct {
}
func domacro() {
var i int
var s *Sym
s = getsym()
s := getsym()
if s == nil {
s = Lookup("endif")
}
for i = 0; i < len(mactab); i++ {
for i := 0; i < len(mactab); i++ {
if s.Name == mactab[i].Macname {
if mactab[i].Macf != nil {
mactab[i].Macf()
@ -209,9 +197,7 @@ func domacro() {
}
func macund() {
var s *Sym
s = getsym()
s := getsym()
macend()
if s == nil {
Yyerror("syntax in #undef")
@ -226,8 +212,6 @@ const (
)
func macdef() {
var s *Sym
var a *Sym
var args [NARG]string
var n int
var i int
@ -236,7 +220,7 @@ func macdef() {
var ischr int
var base bytes.Buffer
s = getsym()
s := getsym()
if s == nil {
goto bad
}
@ -251,6 +235,8 @@ func macdef() {
c = getnsc()
if c != ')' {
unget(c)
var a *Sym
var c int
for {
a = getsymdots(&dots)
if a == nil {
@ -316,7 +302,6 @@ func macdef() {
ischr = 0
}
} else {
if c == '"' || c == '\'' {
base.WriteByte(byte(c))
ischr = c
@ -417,20 +402,12 @@ bad:
if s == nil {
Yyerror("syntax in #define")
} else {
Yyerror("syntax in #define: %s", s.Name)
}
macend()
}
func macexpand(s *Sym) []byte {
var l int
var c int
var arg []string
var out bytes.Buffer
var buf bytes.Buffer
var cp string
if s.Macro.Narg == 0 {
if debug['m'] != 0 {
fmt.Printf("#expand %s %s\n", s.Name, s.Macro.Text)
@ -441,14 +418,19 @@ func macexpand(s *Sym) []byte {
nargs := s.Macro.Narg - 1
dots := s.Macro.Dots
c = getnsc()
c := getnsc()
var arg []string
var cp string
var out bytes.Buffer
if c != '(' {
goto bad
}
c = getc()
if c != ')' {
unget(c)
l = 0
l := 0
var buf bytes.Buffer
var c int
for {
c = getc()
if c == '"' {
@ -595,16 +577,14 @@ bad:
}
func macinc() {
var c0 int
var c int
var i int
var buf bytes.Buffer
var f *os.File
var hp string
var str string
var symb string
c0 = getnsc()
c0 := getnsc()
if c0 != '"' {
c = c0
if c0 != '<' {
@ -630,7 +610,7 @@ func macinc() {
goto bad
}
for i = 0; i < len(include); i++ {
for i := 0; i < len(include); i++ {
if i == 0 && c0 == '>' {
continue
}
@ -663,13 +643,11 @@ bad:
}
func maclin() {
var c int
var n int32
var buf bytes.Buffer
var symb string
n = getnsn()
c = getc()
n := getnsn()
c := getc()
if n < 0 {
goto bad
}
@ -783,16 +761,43 @@ bad:
}
func macprag() {
var s *Sym
var c0 int
var c int
var buf bytes.Buffer
var symb string
s = getsym()
s := getsym()
if s != nil && s.Name == "lib" {
goto praglib
c0 := getnsc()
if c0 != '"' {
c = c0
if c0 != '<' {
goto bad
}
c0 = '>'
}
var buf bytes.Buffer
for {
c = getc()
if c == c0 {
break
}
if c == '\n' {
goto bad
}
buf.WriteByte(byte(c))
}
symb := buf.String()
c = getcom()
if c != '\n' {
goto bad
}
/*
* put pragma-line in as a funny history
*/
obj.Linklinehist(Ctxt, int(Lineno), symb, -1)
return
}
if s != nil && s.Name == "pack" {
pragpack()
@ -830,43 +835,9 @@ func macprag() {
}
for getnsc() != '\n' {
}
return
praglib:
c0 = getnsc()
if c0 != '"' {
c = c0
if c0 != '<' {
goto bad
}
c0 = '>'
}
for {
c = getc()
if c == c0 {
break
}
if c == '\n' {
goto bad
}
buf.WriteByte(byte(c))
}
symb = buf.String()
c = getcom()
if c != '\n' {
goto bad
}
/*
* put pragma-line in as a funny history
*/
obj.Linklinehist(Ctxt, int(Lineno), symb, -1)
return
bad:
unget(c)
Yyerror("syntax in #pragma lib")

View file

@ -582,7 +582,96 @@ func evconst(n *Node) {
var v Val
var norig *Node
if nr == nil {
goto unary
// copy numeric value to avoid modifying
// nl, in case someone still refers to it (e.g. iota).
v = nl.Val
if wl == TIDEAL {
v = copyval(v)
}
switch uint32(n.Op)<<16 | uint32(v.Ctype) {
default:
if n.Diag == 0 {
Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
n.Diag = 1
}
return
case OCONV<<16 | CTNIL,
OARRAYBYTESTR<<16 | CTNIL:
if n.Type.Etype == TSTRING {
v = tostr(v)
nl.Type = n.Type
break
}
fallthrough
// fall through
case OCONV<<16 | CTINT,
OCONV<<16 | CTRUNE,
OCONV<<16 | CTFLT,
OCONV<<16 | CTSTR:
convlit1(&nl, n.Type, true)
v = nl.Val
case OPLUS<<16 | CTINT,
OPLUS<<16 | CTRUNE:
break
case OMINUS<<16 | CTINT,
OMINUS<<16 | CTRUNE:
mpnegfix(v.U.Xval)
case OCOM<<16 | CTINT,
OCOM<<16 | CTRUNE:
et := Txxx
if nl.Type != nil {
et = int(nl.Type.Etype)
}
// calculate the mask in b
// result will be (a ^ mask)
var b Mpint
switch et {
// signed guys change sign
default:
Mpmovecfix(&b, -1)
// unsigned guys invert their bits
case TUINT8,
TUINT16,
TUINT32,
TUINT64,
TUINT,
TUINTPTR:
mpmovefixfix(&b, Maxintval[et])
}
mpxorfixfix(v.U.Xval, &b)
case OPLUS<<16 | CTFLT:
break
case OMINUS<<16 | CTFLT:
mpnegflt(v.U.Fval)
case OPLUS<<16 | CTCPLX:
break
case OMINUS<<16 | CTCPLX:
mpnegflt(&v.U.Cval.Real)
mpnegflt(&v.U.Cval.Imag)
case ONOT<<16 | CTBOOL:
if v.U.Bval == 0 {
goto settrue
}
goto setfalse
}
goto ret
}
if nr.Type == nil {
return
@ -944,97 +1033,6 @@ func evconst(n *Node) {
goto ret
// copy numeric value to avoid modifying
// nl, in case someone still refers to it (e.g. iota).
unary:
v = nl.Val
if wl == TIDEAL {
v = copyval(v)
}
switch uint32(n.Op)<<16 | uint32(v.Ctype) {
default:
if n.Diag == 0 {
Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
n.Diag = 1
}
return
case OCONV<<16 | CTNIL,
OARRAYBYTESTR<<16 | CTNIL:
if n.Type.Etype == TSTRING {
v = tostr(v)
nl.Type = n.Type
break
}
fallthrough
// fall through
case OCONV<<16 | CTINT,
OCONV<<16 | CTRUNE,
OCONV<<16 | CTFLT,
OCONV<<16 | CTSTR:
convlit1(&nl, n.Type, true)
v = nl.Val
case OPLUS<<16 | CTINT,
OPLUS<<16 | CTRUNE:
break
case OMINUS<<16 | CTINT,
OMINUS<<16 | CTRUNE:
mpnegfix(v.U.Xval)
case OCOM<<16 | CTINT,
OCOM<<16 | CTRUNE:
et := Txxx
if nl.Type != nil {
et = int(nl.Type.Etype)
}
// calculate the mask in b
// result will be (a ^ mask)
var b Mpint
switch et {
// signed guys change sign
default:
Mpmovecfix(&b, -1)
// unsigned guys invert their bits
case TUINT8,
TUINT16,
TUINT32,
TUINT64,
TUINT,
TUINTPTR:
mpmovefixfix(&b, Maxintval[et])
}
mpxorfixfix(v.U.Xval, &b)
case OPLUS<<16 | CTFLT:
break
case OMINUS<<16 | CTFLT:
mpnegflt(v.U.Fval)
case OPLUS<<16 | CTCPLX:
break
case OMINUS<<16 | CTCPLX:
mpnegflt(&v.U.Cval.Real)
mpnegflt(&v.U.Cval.Imag)
case ONOT<<16 | CTBOOL:
if v.U.Bval == 0 {
goto settrue
}
goto setfalse
}
ret:
norig = saveorig(n)
*n = *nl

View file

@ -255,10 +255,12 @@ func Complexop(n *Node, res *Node) bool {
}
if n.Op == OREAL || n.Op == OIMAG {
goto yes
//dump("\ncomplex-yes", n);
return true
}
goto no
//dump("\ncomplex-no", n);
return false
maybe:
switch n.Op {
@ -270,23 +272,20 @@ maybe:
OCOMPLEX,
OREAL,
OIMAG:
goto yes
//dump("\ncomplex-yes", n);
return true
case ODOT,
ODOTPTR,
OINDEX,
OIND,
ONAME:
goto yes
//dump("\ncomplex-yes", n);
return true
}
//dump("\ncomplex-no", n);
no:
return false
//dump("\ncomplex-yes", n);
yes:
return true
}
func Complexmove(f *Node, t *Node) {

View file

@ -844,7 +844,6 @@ type Loophack struct {
var _yylex_lstk *Loophack
func _yylex(yylval *yySymType) int32 {
var c int
var c1 int
var escflag int
var v int64
@ -857,7 +856,7 @@ func _yylex(yylval *yySymType) int32 {
prevlineno = lineno
l0:
c = getc()
c := getc()
if yy_isspace(c) {
if c == '\n' && curio.nlsemi != 0 {
ungetc(c)
@ -887,7 +886,82 @@ l0:
}
if yy_isdigit(c) {
goto tnum
cp = &lexbuf
cp.Reset()
if c != '0' {
for {
cp.WriteByte(byte(c))
c = getc()
if yy_isdigit(c) {
continue
}
if c == '.' {
goto casedot
}
if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
goto caseep
}
if c == 'i' {
goto casei
}
goto ncu
}
}
cp.WriteByte(byte(c))
c = getc()
if c == 'x' || c == 'X' {
for {
cp.WriteByte(byte(c))
c = getc()
if yy_isdigit(c) {
continue
}
if c >= 'a' && c <= 'f' {
continue
}
if c >= 'A' && c <= 'F' {
continue
}
if lexbuf.Len() == 2 {
Yyerror("malformed hex constant")
}
if c == 'p' {
goto caseep
}
goto ncu
}
}
if c == 'p' { // 0p begins floating point zero
goto caseep
}
c1 = 0
for {
if !yy_isdigit(c) {
break
}
if c < '0' || c > '7' {
c1 = 1 // not octal
}
cp.WriteByte(byte(c))
c = getc()
}
if c == '.' {
goto casedot
}
if c == 'e' || c == 'E' {
goto caseep
}
if c == 'i' {
goto casei
}
if c1 != 0 {
Yyerror("malformed octal constant")
}
goto ncu
}
switch c {
@ -1321,86 +1395,6 @@ talph:
yylval.sym = s
return int32(s.Lexical)
tnum:
cp = &lexbuf
cp.Reset()
if c != '0' {
for {
cp.WriteByte(byte(c))
c = getc()
if yy_isdigit(c) {
continue
}
goto dc
}
}
cp.WriteByte(byte(c))
c = getc()
if c == 'x' || c == 'X' {
for {
cp.WriteByte(byte(c))
c = getc()
if yy_isdigit(c) {
continue
}
if c >= 'a' && c <= 'f' {
continue
}
if c >= 'A' && c <= 'F' {
continue
}
if lexbuf.Len() == 2 {
Yyerror("malformed hex constant")
}
if c == 'p' {
goto caseep
}
goto ncu
}
}
if c == 'p' { // 0p begins floating point zero
goto caseep
}
c1 = 0
for {
if !yy_isdigit(c) {
break
}
if c < '0' || c > '7' {
c1 = 1 // not octal
}
cp.WriteByte(byte(c))
c = getc()
}
if c == '.' {
goto casedot
}
if c == 'e' || c == 'E' {
goto caseep
}
if c == 'i' {
goto casei
}
if c1 != 0 {
Yyerror("malformed octal constant")
}
goto ncu
dc:
if c == '.' {
goto casedot
}
if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
goto caseep
}
if c == 'i' {
goto casei
}
ncu:
cp = nil
ungetc(c)
@ -1523,31 +1517,90 @@ func more(pp *string) bool {
*/
func getlinepragma() int {
var cmd, verb, name string
var n int
var cp *bytes.Buffer
var linep int
c := int(getr())
if c == 'g' {
goto go_
cp := &lexbuf
cp.Reset()
cp.WriteByte('g') // already read
for {
c = int(getr())
if c == EOF || c >= utf8.RuneSelf {
return c
}
if c == '\n' {
break
}
cp.WriteByte(byte(c))
}
cp = nil
if strings.HasPrefix(lexbuf.String(), "go:cgo_") {
pragcgo(lexbuf.String())
}
cmd = lexbuf.String()
verb = cmd
if i := strings.Index(verb, " "); i >= 0 {
verb = verb[:i]
}
if verb == "go:linkname" {
if imported_unsafe == 0 {
Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
}
f := strings.Fields(cmd)
if len(f) != 3 {
Yyerror("usage: //go:linkname localname linkname")
return c
}
Lookup(f[1]).Linkname = f[2]
return c
}
if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
nointerface = true
return c
}
if verb == "go:noescape" {
noescape = true
return c
}
if verb == "go:nosplit" {
nosplit = true
return c
}
if verb == "go:nowritebarrier" {
if compiling_runtime == 0 {
Yyerror("//go:nowritebarrier only allowed in runtime")
}
nowritebarrier = true
return c
}
return c
}
if c != 'l' {
goto out
return c
}
for i := 1; i < 5; i++ {
c = int(getr())
if c != int("line "[i]) {
goto out
return c
}
}
cp = &lexbuf
cp := &lexbuf
cp.Reset()
linep = 0
linep := 0
for {
c = int(getr())
if c == EOF {
goto out
return c
}
if c == '\n' {
break
@ -1564,9 +1617,9 @@ func getlinepragma() int {
cp = nil
if linep == 0 {
goto out
return c
}
n = 0
n := 0
for _, c := range lexbuf.String()[linep:] {
if c < '0' || c > '9' {
goto out
@ -1579,7 +1632,7 @@ func getlinepragma() int {
}
if n <= 0 {
goto out
return c
}
// try to avoid allocating file name over and over
@ -1587,76 +1640,12 @@ func getlinepragma() int {
for h := Ctxt.Hist; h != nil; h = h.Link {
if h.Name != "" && h.Name == name {
linehist(h.Name, int32(n), 0)
goto out
return c
}
}
linehist(name, int32(n), 0)
goto out
go_:
cp = &lexbuf
cp.Reset()
cp.WriteByte('g') // already read
for {
c = int(getr())
if c == EOF || c >= utf8.RuneSelf {
goto out
}
if c == '\n' {
break
}
cp.WriteByte(byte(c))
}
cp = nil
if strings.HasPrefix(lexbuf.String(), "go:cgo_") {
pragcgo(lexbuf.String())
}
cmd = lexbuf.String()
verb = cmd
if i := strings.Index(verb, " "); i >= 0 {
verb = verb[:i]
}
if verb == "go:linkname" {
if imported_unsafe == 0 {
Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
}
f := strings.Fields(cmd)
if len(f) != 3 {
Yyerror("usage: //go:linkname localname linkname")
goto out
}
Lookup(f[1]).Linkname = f[2]
goto out
}
if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
nointerface = true
goto out
}
if verb == "go:noescape" {
noescape = true
goto out
}
if verb == "go:nosplit" {
nosplit = true
goto out
}
if verb == "go:nowritebarrier" {
if compiling_runtime == 0 {
Yyerror("//go:nowritebarrier only allowed in runtime")
}
nowritebarrier = true
goto out
}
return c
out:
return c
@ -1708,14 +1697,12 @@ func pragcgo(text string) {
var p string
p, ok = getquoted(&q)
if !ok {
goto err1
Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
return
}
pragcgobuf += fmt.Sprintf("cgo_dynamic_linker %v\n", plan9quote(p))
goto out
return
err1:
Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
goto out
}
if verb == "dynexport" {
@ -1729,7 +1716,7 @@ func pragcgo(text string) {
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("%s %v\n", verb, plan9quote(local))
goto out
return
}
remote = getimpsym(&q)
@ -1737,11 +1724,11 @@ func pragcgo(text string) {
goto err2
}
pragcgobuf += fmt.Sprintf("%s %v %v\n", verb, plan9quote(local), plan9quote(remote))
goto out
return
err2:
Yyerror("usage: //go:%s local [remote]", verb)
goto out
return
}
if verb == "cgo_import_dynamic" || verb == "dynimport" {
@ -1754,7 +1741,7 @@ func pragcgo(text string) {
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v\n", plan9quote(local))
goto out
return
}
remote = getimpsym(&q)
@ -1763,7 +1750,7 @@ func pragcgo(text string) {
}
if !more(&q) {
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v\n", plan9quote(local), plan9quote(remote))
goto out
return
}
p, ok = getquoted(&q)
@ -1771,24 +1758,22 @@ func pragcgo(text string) {
goto err3
}
pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v %v\n", plan9quote(local), plan9quote(remote), plan9quote(p))
goto out
return
err3:
Yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]")
goto out
return
}
if verb == "cgo_import_static" {
local := getimpsym(&q)
if local == "" || more(&q) {
goto err4
Yyerror("usage: //go:cgo_import_static local")
return
}
pragcgobuf += fmt.Sprintf("cgo_import_static %v\n", plan9quote(local))
goto out
return
err4:
Yyerror("usage: //go:cgo_import_static local")
goto out
}
if verb == "cgo_ldflag" {
@ -1796,17 +1781,13 @@ func pragcgo(text string) {
var p string
p, ok = getquoted(&q)
if !ok {
goto err5
Yyerror("usage: //go:cgo_ldflag \"arg\"")
return
}
pragcgobuf += fmt.Sprintf("cgo_ldflag %v\n", plan9quote(p))
goto out
return
err5:
Yyerror("usage: //go:cgo_ldflag \"arg\"")
goto out
}
out:
}
type yy struct{}
@ -1983,7 +1964,6 @@ func escchar(e int, escflg *int, val *int64) bool {
u := 0
c = int(getr())
var l int64
var i int
switch c {
case 'x':
@ -2010,7 +1990,24 @@ func escchar(e int, escflg *int, val *int64) bool {
'6',
'7':
*escflg = 1 // it's a byte
goto oct
l := int64(c) - '0'
for i := 2; i > 0; i-- {
c = getc()
if c >= '0' && c <= '7' {
l = l*8 + int64(c) - '0'
continue
}
Yyerror("non-octal character in escape sequence: %c", c)
ungetc(c)
}
if l > 255 {
Yyerror("octal escape value > 255: %d", l)
}
*val = l
return false
case 'a':
c = '\a'
@ -2039,7 +2036,7 @@ func escchar(e int, escflg *int, val *int64) bool {
return false
hex:
l = 0
l := int64(0)
for ; i > 0; i-- {
c = getc()
if c >= '0' && c <= '9' {
@ -2067,26 +2064,6 @@ hex:
l = utf8.RuneError
}
*val = l
return false
oct:
l = int64(c) - '0'
for i := 2; i > 0; i-- {
c = getc()
if c >= '0' && c <= '7' {
l = l*8 + int64(c) - '0'
continue
}
Yyerror("non-octal character in escape sequence: %c", c)
ungetc(c)
}
if l > 255 {
Yyerror("octal escape value > 255: %d", l)
}
*val = l
return false
}

View file

@ -451,7 +451,6 @@ bad:
//
func mpatofix(a *Mpint, as string) {
var c int
var s0 string
s := as
f := 0
@ -471,7 +470,43 @@ func mpatofix(a *Mpint, as string) {
fallthrough
case '0':
goto oct
var c int
c, s = intstarstringplusplus(s)
if c == 'x' || c == 'X' {
s0 := s
var c int
c, _ = intstarstringplusplus(s)
for c != 0 {
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
s = s[1:]
c, _ = intstarstringplusplus(s)
continue
}
Yyerror("malformed hex constant: %s", as)
goto bad
}
mphextofix(a, s0)
if a.Ovf != 0 {
Yyerror("constant too large: %s", as)
goto bad
}
goto out
}
for c != 0 {
if c >= '0' && c <= '7' {
mpmulcfix(a, 8)
mpaddcfix(a, int64(c)-'0')
c, s = intstarstringplusplus(s)
continue
}
Yyerror("malformed octal constant: %s", as)
goto bad
}
goto out
}
for c != 0 {
@ -488,45 +523,6 @@ func mpatofix(a *Mpint, as string) {
goto out
oct:
c, s = intstarstringplusplus(s)
if c == 'x' || c == 'X' {
goto hex
}
for c != 0 {
if c >= '0' && c <= '7' {
mpmulcfix(a, 8)
mpaddcfix(a, int64(c)-'0')
c, s = intstarstringplusplus(s)
continue
}
Yyerror("malformed octal constant: %s", as)
goto bad
}
goto out
hex:
s0 = s
c, _ = intstarstringplusplus(s)
for c != 0 {
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
s = s[1:]
c, _ = intstarstringplusplus(s)
continue
}
Yyerror("malformed hex constant: %s", as)
goto bad
}
mphextofix(a, s0)
if a.Ovf != 0 {
Yyerror("constant too large: %s", as)
goto bad
}
out:
if f != 0 {
mpnegfix(a)

View file

@ -187,12 +187,44 @@ func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
}
c := 0
var x int
if a.Neg != b.Neg {
goto sub
// perform a-b
switch mpcmp(a, b) {
case 0:
Mpmovecfix(a, 0)
case 1:
var x int
for i := 0; i < Mpprec; i++ {
x = a.A[i] - b.A[i] - c
c = 0
if x < 0 {
x += Mpbase
c = 1
}
a.A[i] = x
}
case -1:
a.Neg ^= 1
var x int
for i := 0; i < Mpprec; i++ {
x = b.A[i] - a.A[i] - c
c = 0
if x < 0 {
x += Mpbase
c = 1
}
a.A[i] = x
}
}
return
}
// perform a+b
var x int
for i := 0; i < Mpprec; i++ {
x = a.A[i] + b.A[i] + c
c = 0
@ -210,40 +242,6 @@ func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
}
return
// perform a-b
sub:
switch mpcmp(a, b) {
case 0:
Mpmovecfix(a, 0)
case 1:
var x int
for i := 0; i < Mpprec; i++ {
x = a.A[i] - b.A[i] - c
c = 0
if x < 0 {
x += Mpbase
c = 1
}
a.A[i] = x
}
case -1:
a.Neg ^= 1
var x int
for i := 0; i < Mpprec; i++ {
x = b.A[i] - a.A[i] - c
c = 0
if x < 0 {
x += Mpbase
c = 1
}
a.A[i] = x
}
}
}
func mpmulfixfix(a *Mpint, b *Mpint) {

View file

@ -657,7 +657,7 @@ Next:
PPARAMOUT:
pos, ok := to.Node.(*Node).Opt.(int32) // index in vars
if !ok {
goto Next1
return
}
if pos >= int32(len(vars)) || vars[pos] != to.Node {
Fatal("bad bookkeeping in liveness %v %d", Nconv(to.Node.(*Node), 0), pos)
@ -690,8 +690,6 @@ Next:
}
}
}
Next1:
}
// Constructs a new liveness structure used to hold the global state of the

View file

@ -185,32 +185,22 @@ func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
}
func mkvar(f *Flow, a *obj.Addr) Bits {
var v *Var
var i int
var n int
var et int
var flag int
var w int64
var o int64
var bit Bits
var node *Node
var r *Reg
/*
* mark registers used
*/
if a.Type == obj.TYPE_NONE {
goto none
return zbits
}
r = f.Data.(*Reg)
r := f.Data.(*Reg)
r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB
var n int
switch a.Type {
default:
regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
if regu == 0 {
goto none
return zbits
}
bit := zbits
bit.b[0] = regu
@ -227,7 +217,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
setaddrs(bit)
a.Type = obj.TYPE_ADDR
Ostats.Naddr++
goto none
return zbits
memcase:
fallthrough
@ -243,7 +233,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
*/
switch a.Name {
default:
goto none
return zbits
case obj.NAME_EXTERN,
obj.NAME_STATIC,
@ -253,25 +243,27 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
}
}
var node *Node
node, _ = a.Node.(*Node)
if node == nil || node.Op != ONAME || node.Orig == nil {
goto none
return zbits
}
node = node.Orig
if node.Orig != node {
Fatal("%v: bad node", Ctxt.Dconv(a))
}
if node.Sym == nil || node.Sym.Name[0] == '.' {
goto none
return zbits
}
et = int(a.Etype)
o = a.Offset
w = a.Width
et := int(a.Etype)
o := a.Offset
w := a.Width
if w < 0 {
Fatal("bad width %d for %v", w, Ctxt.Dconv(a))
}
flag = 0
flag := 0
var v *Var
for i := 0; i < nvar; i++ {
v = &var_[i:][0]
if v.node == node && int(v.name) == n {
@ -299,7 +291,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
switch et {
case 0,
TFUNC:
goto none
return zbits
}
if nvar >= NVAR {
@ -319,10 +311,10 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
}
}
goto none
return zbits
}
i = nvar
i := nvar
nvar++
v = &var_[i:][0]
v.id = i
@ -341,7 +333,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
node.Opt = v
bit = blsh(uint(i))
bit := blsh(uint(i))
if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
for z := 0; z < BITS; z++ {
externs.b[z] |= bit.b[z]
@ -401,9 +393,6 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
Ostats.Nvar++
return bit
none:
return zbits
}
func prop(f *Flow, ref Bits, cal Bits) {

View file

@ -533,21 +533,18 @@ func isliteral(n *Node) bool {
func simplename(n *Node) bool {
if n.Op != ONAME {
goto no
return false
}
if n.Addable == 0 {
goto no
return false
}
if n.Class&PHEAP != 0 {
goto no
return false
}
if n.Class == PPARAMREF {
goto no
return false
}
return true
no:
return false
}
func litas(l *Node, r *Node, init **NodeList) {
@ -1191,48 +1188,48 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
}
func oaslit(n *Node, init **NodeList) bool {
var ctxt int
if n.Left == nil || n.Right == nil {
goto no
// not a special composit literal assignment
return false
}
if n.Left.Type == nil || n.Right.Type == nil {
goto no
// not a special composit literal assignment
return false
}
if !simplename(n.Left) {
goto no
// not a special composit literal assignment
return false
}
if !Eqtype(n.Left.Type, n.Right.Type) {
goto no
// not a special composit literal assignment
return false
}
// context is init() function.
// implies generated data executed
// exactly once and not subject to races.
ctxt = 0
ctxt := 0
// if(n->dodata == 1)
// ctxt = 1;
switch n.Right.Op {
default:
goto no
// not a special composit literal assignment
return false
case OSTRUCTLIT,
OARRAYLIT,
OMAPLIT:
if vmatch1(n.Left, n.Right) {
goto no
// not a special composit literal assignment
return false
}
anylit(ctxt, n.Right, n.Left, init)
}
n.Op = OEMPTY
return true
// not a special composit literal assignment
no:
return false
}
func getlit(lit *Node) int {
@ -1244,7 +1241,7 @@ func getlit(lit *Node) int {
func stataddr(nam *Node, n *Node) bool {
if n == nil {
goto no
return false
}
switch n.Op {
@ -1281,7 +1278,6 @@ func stataddr(nam *Node, n *Node) bool {
return true
}
no:
return false
}
@ -1420,7 +1416,6 @@ func gen_as_init(n *Node) bool {
var nr *Node
var nl *Node
var nam Node
var nod1 Node
if n.Dodata == 0 {
goto no
@ -1436,7 +1431,7 @@ func gen_as_init(n *Node) bool {
if nam.Class != PEXTERN {
goto no
}
goto yes
return true
}
if nr.Type == nil || !Eqtype(nl.Type, nr.Type) {
@ -1466,7 +1461,33 @@ func gen_as_init(n *Node) bool {
case OSLICEARR:
if nr.Right.Op == OKEY && nr.Right.Left == nil && nr.Right.Right == nil {
nr = nr.Left
goto slice
gused(nil) // in case the data is the dest of a goto
nl := nr
if nr == nil || nr.Op != OADDR {
goto no
}
nr = nr.Left
if nr == nil || nr.Op != ONAME {
goto no
}
// nr is the array being converted to a slice
if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
goto no
}
nam.Xoffset += int64(Array_array)
gdata(&nam, nl, int(Types[Tptr].Width))
nam.Xoffset += int64(Array_nel) - int64(Array_array)
var nod1 Node
Nodconst(&nod1, Types[TINT], nr.Type.Bound)
gdata(&nam, &nod1, Widthint)
nam.Xoffset += int64(Array_cap) - int64(Array_nel)
gdata(&nam, &nod1, Widthint)
return true
}
goto no
@ -1505,37 +1526,8 @@ func gen_as_init(n *Node) bool {
gdatastring(&nam, nr.Val.U.Sval)
}
yes:
return true
slice:
gused(nil) // in case the data is the dest of a goto
nl = nr
if nr == nil || nr.Op != OADDR {
goto no
}
nr = nr.Left
if nr == nil || nr.Op != ONAME {
goto no
}
// nr is the array being converted to a slice
if nr.Type == nil || nr.Type.Etype != TARRAY || nr.Type.Bound < 0 {
goto no
}
nam.Xoffset += int64(Array_array)
gdata(&nam, nl, int(Types[Tptr].Width))
nam.Xoffset += int64(Array_nel) - int64(Array_array)
Nodconst(&nod1, Types[TINT], nr.Type.Bound)
gdata(&nam, &nod1, Widthint)
nam.Xoffset += int64(Array_cap) - int64(Array_nel)
gdata(&nam, &nod1, Widthint)
goto yes
no:
if n.Dodata == 2 {
Dump("\ngen_as_init", n)

View file

@ -1014,14 +1014,14 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
Fatal("struct/interface missing field: %v %v", Tconv(t1, 0), Tconv(t2, 0))
}
if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
goto no
return false
}
}
if t1 == nil && t2 == nil {
goto yes
return true
}
goto no
return false
// Loop over structs: receiver, in, out.
case TFUNC:
@ -1043,40 +1043,34 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
Fatal("func struct missing field: %v %v", Tconv(ta, 0), Tconv(tb, 0))
}
if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
goto no
return false
}
}
if ta != nil || tb != nil {
goto no
return false
}
}
if t1 == nil && t2 == nil {
goto yes
return true
}
goto no
return false
case TARRAY:
if t1.Bound != t2.Bound {
goto no
return false
}
case TCHAN:
if t1.Chan != t2.Chan {
goto no
return false
}
}
if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
goto yes
return true
}
goto no
yes:
return true
no:
return false
}
@ -1376,10 +1370,8 @@ func assignconv(n *Node, t *Type, context string) *Node {
}
func subtype(stp **Type, t *Type, d int) bool {
var st *Type
loop:
st = *stp
st := *stp
if st == nil {
return false
}
@ -1762,7 +1754,7 @@ func Structfirst(s *Iter, nn **Type) *Type {
t = n.Type
if t == nil {
goto rnil
return nil
}
if t.Etype != TFIELD {
@ -1775,7 +1767,6 @@ func Structfirst(s *Iter, nn **Type) *Type {
bad:
Fatal("structfirst: not struct %v", Tconv(n, 0))
rnil:
return nil
}
@ -1783,21 +1774,17 @@ func structnext(s *Iter) *Type {
n := s.T
t := n.Down
if t == nil {
goto rnil
return nil
}
if t.Etype != TFIELD {
goto bad
Fatal("structnext: not struct %v", Tconv(n, 0))
return nil
}
s.T = t
return t
bad:
Fatal("structnext: not struct %v", Tconv(n, 0))
rnil:
return nil
}
/*
@ -2135,54 +2122,47 @@ out:
// will give shortest unique addressing.
// modify the tree with missing type names.
func adddot(n *Node) *Node {
var s *Sym
var c int
var d int
typecheck(&n.Left, Etype|Erv)
n.Diag |= n.Left.Diag
t := n.Left.Type
if t == nil {
goto ret
}
if n.Left.Op == OTYPE {
goto ret
}
if n.Right.Op != ONAME {
goto ret
}
s = n.Right.Sym
if s == nil {
goto ret
}
for d = 0; d < len(dotlist); d++ {
c = adddot1(s, t, d, nil, 0)
if c > 0 {
goto out
}
}
goto ret
out:
if c > 1 {
Yyerror("ambiguous selector %v", Nconv(n, 0))
n.Left = nil
return n
}
// rebuild elided dots
for c := d - 1; c >= 0; c-- {
if n.Left.Type != nil && Isptr[n.Left.Type.Etype] != 0 {
n.Left.Implicit = 1
}
n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
if n.Left.Op == OTYPE {
return n
}
if n.Right.Op != ONAME {
return n
}
s := n.Right.Sym
if s == nil {
return n
}
var c int
for d := 0; d < len(dotlist); d++ {
c = adddot1(s, t, d, nil, 0)
if c > 0 {
if c > 1 {
Yyerror("ambiguous selector %v", Nconv(n, 0))
n.Left = nil
return n
}
// rebuild elided dots
for c := d - 1; c >= 0; c-- {
if n.Left.Type != nil && Isptr[n.Left.Type.Etype] != 0 {
n.Left.Implicit = 1
}
n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
}
return n
}
}
ret:
return n
}
@ -3301,18 +3281,15 @@ func structcount(t *Type) int {
* 1000+ if it is a -(power of 2)
*/
func powtwo(n *Node) int {
var v uint64
var b uint64
if n == nil || n.Op != OLITERAL || n.Type == nil {
goto no
return -1
}
if Isint[n.Type.Etype] == 0 {
goto no
return -1
}
v = uint64(Mpgetfix(n.Val.U.Xval))
b = 1
v := uint64(Mpgetfix(n.Val.U.Xval))
b := uint64(1)
for i := 0; i < 64; i++ {
if b == v {
return i
@ -3321,7 +3298,7 @@ func powtwo(n *Node) int {
}
if Issigned[n.Type.Etype] == 0 {
goto no
return -1
}
v = -v
@ -3333,7 +3310,6 @@ func powtwo(n *Node) int {
b = b << 1
}
no:
return -1
}
@ -3592,22 +3568,19 @@ func pathtoprefix(s string) string {
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
goto escape
var buf bytes.Buffer
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
fmt.Fprintf(&buf, "%%%02x", c)
continue
}
buf.WriteByte(c)
}
return buf.String()
}
}
return s
escape:
var buf bytes.Buffer
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
fmt.Fprintf(&buf, "%%%02x", c)
continue
}
buf.WriteByte(c)
}
return buf.String()
}
func mkpkg(path_ *Strlit) *Pkg {

View file

@ -1805,7 +1805,25 @@ reswitch:
arith:
if op == OLSH || op == ORSH {
goto shift
defaultlit(&r, Types[TUINT])
n.Right = r
t := r.Type
if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
goto error
}
t = l.Type
if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
goto error
}
// no defaultlit for left
// the outer context gives the type
n.Type = l.Type
goto ret
}
// ideal mixed with non-ideal
@ -1993,27 +2011,6 @@ arith:
n.Type = t
goto ret
shift:
defaultlit(&r, Types[TUINT])
n.Right = r
t = r.Type
if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
goto error
}
t = l.Type
if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
goto error
}
// no defaultlit for left
// the outer context gives the type
n.Type = l.Type
goto ret
doconv:
ok |= Erv
saveorignode(n)

View file

@ -14,31 +14,28 @@ import "cmd/internal/obj"
* rewrite with a constant
*/
func unsafenmagic(nn *Node) *Node {
var r *Node
var s *Sym
var v int64
fn := nn.Left
args := nn.List
if safemode != 0 || fn == nil || fn.Op != ONAME {
goto no
return nil
}
s = fn.Sym
s := fn.Sym
if s == nil {
goto no
return nil
}
if s.Pkg != unsafepkg {
goto no
return nil
}
if args == nil {
Yyerror("missing argument for %v", Sconv(s, 0))
goto no
return nil
}
r = args.N
r := args.N
var v int64
if s.Name == "Sizeof" {
typecheck(&r, Erv)
defaultlit(&r, nil)
@ -127,7 +124,6 @@ func unsafenmagic(nn *Node) *Node {
goto yes
}
no:
return nil
bad:

View file

@ -39,17 +39,14 @@ func isdigit(c int) bool {
func plan9quote(s string) string {
if s == "" {
goto needquote
return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
for i := 0; i < len(s); i++ {
if s[i] <= ' ' || s[i] == '\'' {
goto needquote
return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
}
return s
needquote:
return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
// simulation of int(*s++) in C

View file

@ -3653,7 +3653,23 @@ func walkrotate(np **Node) {
if sl >= 0 {
sr := int(Mpgetfix(r.Right.Val.U.Xval))
if sr >= 0 && sl+sr == w {
goto yes
// Rewrite left shift half to left rotate.
if l.Op == OLSH {
n = l
} else {
n = r
}
n.Op = OLROT
// Remove rotate 0 and rotate w.
s := int(Mpgetfix(n.Right.Val.U.Xval))
if s == 0 || s == w {
n = n.Left
}
*np = n
return
}
}
return
@ -3661,25 +3677,6 @@ func walkrotate(np **Node) {
// TODO: Could allow s and 32-s if s is bounded (maybe s&31 and 32-s&31).
return
// Rewrite left shift half to left rotate.
yes:
if l.Op == OLSH {
n = l
} else {
n = r
}
n.Op = OLROT
// Remove rotate 0 and rotate w.
s := int(Mpgetfix(n.Right.Val.U.Xval))
if s == 0 || s == w {
n = n.Left
}
*np = n
return
}
/*
@ -3793,11 +3790,124 @@ func walkdiv(np **Node, init **NodeList) {
return
}
var n1 *Node
var m Magic
var n2 *Node
if pow < 0 {
goto divbymul
// try to do division by multiply by (2^w)/d
// see hacker's delight chapter 10
// TODO: support 64-bit magic multiply here.
var m Magic
m.W = w
if Issigned[nl.Type.Etype] != 0 {
m.Sd = Mpgetfix(nr.Val.U.Xval)
Smagic(&m)
} else {
m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
Umagic(&m)
}
if m.Bad != 0 {
return
}
// We have a quick division method so use it
// for modulo too.
if n.Op == OMOD {
// rewrite as A%B = A - (A/B*B).
n1 := Nod(ODIV, nl, nr)
n2 := Nod(OMUL, n1, nr)
n = Nod(OSUB, nl, n2)
goto ret
}
switch Simtype[nl.Type.Etype] {
default:
return
// n1 = nl * magic >> w (HMUL)
case TUINT8,
TUINT16,
TUINT32:
nc := Nod(OXXX, nil, nil)
Nodconst(nc, nl.Type, int64(m.Um))
n1 := Nod(OMUL, nl, nc)
typecheck(&n1, Erv)
n1.Op = OHMUL
if m.Ua != 0 {
// Select a Go type with (at least) twice the width.
var twide *Type
switch Simtype[nl.Type.Etype] {
default:
return
case TUINT8,
TUINT16:
twide = Types[TUINT32]
case TUINT32:
twide = Types[TUINT64]
case TINT8,
TINT16:
twide = Types[TINT32]
case TINT32:
twide = Types[TINT64]
}
// add numerator (might overflow).
// n2 = (n1 + nl)
n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
// shift by m.s
nc := Nod(OXXX, nil, nil)
Nodconst(nc, Types[TUINT], int64(m.S))
n = conv(Nod(ORSH, n2, nc), nl.Type)
} else {
// n = n1 >> m.s
nc := Nod(OXXX, nil, nil)
Nodconst(nc, Types[TUINT], int64(m.S))
n = Nod(ORSH, n1, nc)
}
// n1 = nl * magic >> w
case TINT8,
TINT16,
TINT32:
nc := Nod(OXXX, nil, nil)
Nodconst(nc, nl.Type, m.Sm)
n1 := Nod(OMUL, nl, nc)
typecheck(&n1, Erv)
n1.Op = OHMUL
if m.Sm < 0 {
// add the numerator.
n1 = Nod(OADD, n1, nl)
}
// shift by m.s
nc = Nod(OXXX, nil, nil)
Nodconst(nc, Types[TUINT], int64(m.S))
n2 := conv(Nod(ORSH, n1, nc), nl.Type)
// add 1 iff n1 is negative.
nc = Nod(OXXX, nil, nil)
Nodconst(nc, Types[TUINT], int64(w)-1)
n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
n = Nod(OSUB, n2, n3)
// apply sign.
if m.Sd < 0 {
n = Nod(OMINUS, n, nil)
}
}
goto ret
}
switch pow {
@ -3905,127 +4015,6 @@ func walkdiv(np **Node, init **NodeList) {
goto ret
// try to do division by multiply by (2^w)/d
// see hacker's delight chapter 10
// TODO: support 64-bit magic multiply here.
divbymul:
m.W = w
if Issigned[nl.Type.Etype] != 0 {
m.Sd = Mpgetfix(nr.Val.U.Xval)
Smagic(&m)
} else {
m.Ud = uint64(Mpgetfix(nr.Val.U.Xval))
Umagic(&m)
}
if m.Bad != 0 {
return
}
// We have a quick division method so use it
// for modulo too.
if n.Op == OMOD {
goto longmod
}
switch Simtype[nl.Type.Etype] {
default:
return
// n1 = nl * magic >> w (HMUL)
case TUINT8,
TUINT16,
TUINT32:
nc := Nod(OXXX, nil, nil)
Nodconst(nc, nl.Type, int64(m.Um))
n1 := Nod(OMUL, nl, nc)
typecheck(&n1, Erv)
n1.Op = OHMUL
if m.Ua != 0 {
// Select a Go type with (at least) twice the width.
var twide *Type
switch Simtype[nl.Type.Etype] {
default:
return
case TUINT8,
TUINT16:
twide = Types[TUINT32]
case TUINT32:
twide = Types[TUINT64]
case TINT8,
TINT16:
twide = Types[TINT32]
case TINT32:
twide = Types[TINT64]
}
// add numerator (might overflow).
// n2 = (n1 + nl)
n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
// shift by m.s
nc := Nod(OXXX, nil, nil)
Nodconst(nc, Types[TUINT], int64(m.S))
n = conv(Nod(ORSH, n2, nc), nl.Type)
} else {
// n = n1 >> m.s
nc := Nod(OXXX, nil, nil)
Nodconst(nc, Types[TUINT], int64(m.S))
n = Nod(ORSH, n1, nc)
}
// n1 = nl * magic >> w
case TINT8,
TINT16,
TINT32:
nc := Nod(OXXX, nil, nil)
Nodconst(nc, nl.Type, m.Sm)
n1 := Nod(OMUL, nl, nc)
typecheck(&n1, Erv)
n1.Op = OHMUL
if m.Sm < 0 {
// add the numerator.
n1 = Nod(OADD, n1, nl)
}
// shift by m.s
nc = Nod(OXXX, nil, nil)
Nodconst(nc, Types[TUINT], int64(m.S))
n2 := conv(Nod(ORSH, n1, nc), nl.Type)
// add 1 iff n1 is negative.
nc = Nod(OXXX, nil, nil)
Nodconst(nc, Types[TUINT], int64(w)-1)
n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
n = Nod(OSUB, n2, n3)
// apply sign.
if m.Sd < 0 {
n = Nod(OMINUS, n, nil)
}
}
goto ret
// rewrite as A%B = A - (A/B*B).
longmod:
n1 = Nod(ODIV, nl, nr)
n2 = Nod(OMUL, n1, nr)
n = Nod(OSUB, nl, n2)
goto ret
ret:
typecheck(&n, Erv)
walkexpr(&n, init)

View file

@ -81,9 +81,7 @@ func setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
}
func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
var off int64
off = s.Size
off := s.Size
setuintxx(ctxt, s, off, v, int64(wid))
return off
}
@ -113,17 +111,14 @@ func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
}
func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
i = s.Size
i := s.Size
s.Size += int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
r = Addrel(s)
r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = uint8(ctxt.Arch.Ptrsize)
@ -133,17 +128,14 @@ func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
}
func Addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
i = s.Size
i := s.Size
s.Size += 4
Symgrow(ctxt, s, s.Size)
r = Addrel(s)
r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Add = add
@ -157,8 +149,6 @@ func Addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
}
func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
var r *Reloc
if s.Type == 0 {
s.Type = SDATA
}
@ -168,7 +158,7 @@ func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
Symgrow(ctxt, s, s.Size)
}
r = Addrel(s)
r := Addrel(s)
r.Sym = t
r.Off = int32(off)
r.Siz = uint8(ctxt.Arch.Ptrsize)
@ -182,17 +172,14 @@ func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
}
func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
var i int64
var r *Reloc
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
i = s.Size
i := s.Size
s.Size += int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
r = Addrel(s)
r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = uint8(ctxt.Arch.Ptrsize)
@ -201,17 +188,14 @@ func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
}
func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc
if s.Type == 0 {
s.Type = SDATA
}
s.Reachable = true
i = s.Size
i := s.Size
s.Size += 4
Symgrow(ctxt, s, s.Size)
r = Addrel(s)
r := Addrel(s)
r.Sym = t
r.Off = int32(i)
r.Siz = 4
@ -253,16 +237,12 @@ func listsubp(s *LSym) **LSym {
}
func listsort(l *LSym, cmp func(*LSym, *LSym) int, nextp func(*LSym) **LSym) *LSym {
var l1 *LSym
var l2 *LSym
var le *LSym
if l == nil || *nextp(l) == nil {
return l
}
l1 = l
l2 = l
l1 := l
l2 := l
for {
l2 = *nextp(l2)
if l2 == nil {
@ -289,7 +269,7 @@ func listsort(l *LSym, cmp func(*LSym, *LSym) int, nextp func(*LSym) **LSym) *LS
l2 = *nextp(l2)
}
le = l
le := l
for {
if l1 == nil {
@ -332,14 +312,13 @@ func relocsym(s *LSym) {
var r *Reloc
var rs *LSym
var i16 int16
var ri int32
var off int32
var siz int32
var fl int32
var o int64
Ctxt.Cursym = s
for ri = 0; ri < int32(len(s.R)); ri++ {
for ri := int32(0); ri < int32(len(s.R)); ri++ {
r = &s.R[ri]
r.Done = 1
off = r.Off
@ -591,34 +570,28 @@ func relocsym(s *LSym) {
}
func reloc() {
var s *LSym
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f reloc\n", obj.Cputime())
}
Bflush(&Bso)
for s = Ctxt.Textp; s != nil; s = s.Next {
for s := Ctxt.Textp; s != nil; s = s.Next {
relocsym(s)
}
for s = datap; s != nil; s = s.Next {
for s := datap; s != nil; s = s.Next {
relocsym(s)
}
}
func dynrelocsym(s *LSym) {
var ri int
var r *Reloc
if HEADTYPE == Hwindows {
var rel *LSym
var targ *LSym
rel = Linklookup(Ctxt, ".rel", 0)
rel := Linklookup(Ctxt, ".rel", 0)
if s == rel {
return
}
for ri = 0; ri < len(s.R); ri++ {
var r *Reloc
var targ *LSym
for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
targ = r.Sym
if targ == nil {
@ -655,7 +628,8 @@ func dynrelocsym(s *LSym) {
return
}
for ri = 0; ri < len(s.R); ri++ {
var r *Reloc
for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
if r.Sym != nil && r.Sym.Type == SDYNIMPORT || r.Type >= 256 {
if r.Sym != nil && !r.Sym.Reachable {
@ -667,8 +641,6 @@ func dynrelocsym(s *LSym) {
}
func dynreloc() {
var s *LSym
// -d suppresses dynamic loader format, so we may as well not
// compute these sections or mark their symbols as reachable.
if Debug['d'] != 0 && HEADTYPE != Hwindows {
@ -679,10 +651,10 @@ func dynreloc() {
}
Bflush(&Bso)
for s = Ctxt.Textp; s != nil; s = s.Next {
for s := Ctxt.Textp; s != nil; s = s.Next {
dynrelocsym(s)
}
for s = datap; s != nil; s = s.Next {
for s := datap; s != nil; s = s.Next {
dynrelocsym(s)
}
if Iself {
@ -692,9 +664,6 @@ func dynreloc() {
func blk(start *LSym, addr int64, size int64) {
var sym *LSym
var eaddr int64
var p []byte
var ep []byte
for sym = start; sym != nil; sym = sym.Next {
if sym.Type&SSUB == 0 && sym.Value >= addr {
@ -702,7 +671,9 @@ func blk(start *LSym, addr int64, size int64) {
}
}
eaddr = addr + size
eaddr := addr + size
var ep []byte
var p []byte
for ; sym != nil; sym = sym.Next {
if sym.Type&SSUB != 0 {
continue
@ -746,11 +717,6 @@ func blk(start *LSym, addr int64, size int64) {
}
func Codeblk(addr int64, size int64) {
var sym *LSym
var eaddr int64
var n int64
var q []byte
if Debug['a'] != 0 {
fmt.Fprintf(&Bso, "codeblk [%#x,%#x) at offset %#x\n", addr, addr+size, Cpos())
}
@ -762,6 +728,7 @@ func Codeblk(addr int64, size int64) {
return
}
var sym *LSym
for sym = Ctxt.Textp; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@ -771,7 +738,9 @@ func Codeblk(addr int64, size int64) {
}
}
eaddr = addr + size
eaddr := addr + size
var n int64
var q []byte
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@ -816,15 +785,6 @@ func Codeblk(addr int64, size int64) {
}
func Datblk(addr int64, size int64) {
var sym *LSym
var i int64
var eaddr int64
var p []byte
var ep []byte
var typ string
var rsname string
var r *Reloc
if Debug['a'] != 0 {
fmt.Fprintf(&Bso, "datblk [%#x,%#x) at offset %#x\n", addr, addr+size, Cpos())
}
@ -836,13 +796,20 @@ func Datblk(addr int64, size int64) {
return
}
var sym *LSym
for sym = datap; sym != nil; sym = sym.Next {
if sym.Value >= addr {
break
}
}
eaddr = addr + size
eaddr := addr + size
var ep []byte
var i int64
var p []byte
var r *Reloc
var rsname string
var typ string
for ; sym != nil; sym = sym.Next {
if sym.Value >= eaddr {
break
@ -922,21 +889,16 @@ func addstrdata1(arg string) {
}
func addstrdata(name string, value string) {
var s *LSym
var sp *LSym
var p string
var reachable bool
p = fmt.Sprintf("%s.str", name)
sp = Linklookup(Ctxt, p, 0)
p := fmt.Sprintf("%s.str", name)
sp := Linklookup(Ctxt, p, 0)
Addstring(sp, value)
sp.Type = SRODATA
s = Linklookup(Ctxt, name, 0)
s := Linklookup(Ctxt, name, 0)
s.Size = 0
s.Dupok = 1
reachable = s.Reachable
reachable := s.Reachable
Addaddr(Ctxt, s, sp)
adduintxx(Ctxt, s, uint64(len(value)), Thearch.Ptrsize)
@ -949,15 +911,12 @@ func addstrdata(name string, value string) {
}
func Addstring(s *LSym, str string) int64 {
var n int
var r int32
if s.Type == 0 {
s.Type = SNOPTRDATA
}
s.Reachable = true
r = int32(s.Size)
n = len(str) + 1
r := int32(s.Size)
n := len(str) + 1
if s.Name == ".shstrtab" {
elfsetstring(str, int(r))
}
@ -969,9 +928,7 @@ func Addstring(s *LSym, str string) int64 {
}
func dosymtype() {
var s *LSym
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if len(s.P) > 0 {
if s.Type == SBSS {
s.Type = SDATA
@ -984,13 +941,11 @@ func dosymtype() {
}
func symalign(s *LSym) int32 {
var align int32
if s.Align != 0 {
return s.Align
}
align = int32(Thearch.Maxalign)
align := int32(Thearch.Maxalign)
for int64(align) > s.Size && align > 1 {
align >>= 1
}
@ -1008,9 +963,8 @@ func aligndatsize(datsize int64, s *LSym) int64 {
// the list of symbols s; the list stops when s->type exceeds type.
func maxalign(s *LSym, type_ int) int32 {
var align int32
var max int32
max = 0
max := int32(0)
for ; s != nil && int(s.Type) <= type_; s = s.Next {
align = symalign(s)
if max < align {
@ -1042,16 +996,13 @@ func proggenemit(g *ProgGen, v uint8) {
// Writes insData block from g->data.
func proggendataflush(g *ProgGen) {
var i int32
var s int32
if g.datasize == 0 {
return
}
proggenemit(g, obj.InsData)
proggenemit(g, uint8(g.datasize))
s = (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
for i = 0; i < s; i++ {
s := (g.datasize + obj.PointersPerByte - 1) / obj.PointersPerByte
for i := int32(0); i < s; i++ {
proggenemit(g, g.data[i])
}
g.datasize = 0
@ -1068,9 +1019,7 @@ func proggendata(g *ProgGen, d uint8) {
// Skip v bytes due to alignment, etc.
func proggenskip(g *ProgGen, off int64, v int64) {
var i int64
for i = off; i < off+v; i++ {
for i := off; i < off+v; i++ {
if (i % int64(Thearch.Ptrsize)) == 0 {
proggendata(g, obj.BitsScalar)
}
@ -1101,11 +1050,6 @@ func proggenfini(g *ProgGen, size int64) {
// This function generates GC pointer info for global variables.
func proggenaddsym(g *ProgGen, s *LSym) {
var gcprog *LSym
var mask []byte
var i int64
var size int64
if s.Size == 0 {
return
}
@ -1128,10 +1072,10 @@ func proggenaddsym(g *ProgGen, s *LSym) {
if (s.Size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned conservative symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
size = (s.Size + int64(Thearch.Ptrsize) - 1) / int64(Thearch.Ptrsize) * int64(Thearch.Ptrsize)
size := (s.Size + int64(Thearch.Ptrsize) - 1) / int64(Thearch.Ptrsize) * int64(Thearch.Ptrsize)
if size < int64(32*Thearch.Ptrsize) {
// Emit small symbols as data.
for i = 0; i < size/int64(Thearch.Ptrsize); i++ {
for i := int64(0); i < size/int64(Thearch.Ptrsize); i++ {
proggendata(g, obj.BitsPointer)
}
} else {
@ -1148,7 +1092,7 @@ func proggenaddsym(g *ProgGen, s *LSym) {
if s.Size < int64(32*Thearch.Ptrsize) {
// Emit small symbols as data.
// This case also handles unaligned and tiny symbols, so tread carefully.
for i = s.Value; i < s.Value+s.Size; i++ {
for i := s.Value; i < s.Value+s.Size; i++ {
if (i % int64(Thearch.Ptrsize)) == 0 {
proggendata(g, obj.BitsScalar)
}
@ -1168,24 +1112,24 @@ func proggenaddsym(g *ProgGen, s *LSym) {
// gc program, copy directly
proggendataflush(g)
gcprog = decodetype_gcprog(s.Gotype)
size = decodetype_size(s.Gotype)
gcprog := decodetype_gcprog(s.Gotype)
size := decodetype_size(s.Gotype)
if (size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned gcprog symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
for i = 0; i < int64(len(gcprog.P)-1); i++ {
for i := int64(0); i < int64(len(gcprog.P)-1); i++ {
proggenemit(g, uint8(gcprog.P[i]))
}
g.pos = s.Value + size
} else {
// gc mask, it's small so emit as data
mask = decodetype_gcmask(s.Gotype)
mask := decodetype_gcmask(s.Gotype)
size = decodetype_size(s.Gotype)
size := decodetype_size(s.Gotype)
if (size%int64(Thearch.Ptrsize) != 0) || (g.pos%int64(Thearch.Ptrsize) != 0) {
Diag("proggenaddsym: unaligned gcmask symbol %s: size=%d pos=%d", s.Name, s.Size, g.pos)
}
for i = 0; i < size; i += int64(Thearch.Ptrsize) {
for i := int64(0); i < size; i += int64(Thearch.Ptrsize) {
proggendata(g, uint8((mask[i/int64(Thearch.Ptrsize)/2]>>uint64((i/int64(Thearch.Ptrsize)%2)*4+2))&obj.BitsMask))
}
g.pos = s.Value + size
@ -1193,9 +1137,7 @@ func proggenaddsym(g *ProgGen, s *LSym) {
}
func growdatsize(datsizep *int64, s *LSym) {
var datsize int64
datsize = *datsizep
datsize := *datsizep
if s.Size < 0 {
Diag("negative size (datsize = %d, s->size = %d)", datsize, s.Size)
}
@ -1206,27 +1148,15 @@ func growdatsize(datsizep *int64, s *LSym) {
}
func dodata() {
var n int32
var datsize int64
var sect *Section
var segro *Segment
var s *LSym
var last *LSym
var l **LSym
var toc *LSym
var gcdata *LSym
var gcbss *LSym
var gen ProgGen
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f dodata\n", obj.Cputime())
}
Bflush(&Bso)
last = nil
last := (*LSym)(nil)
datap = nil
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Special != 0 {
continue
}
@ -1245,7 +1175,7 @@ func dodata() {
}
}
for s = datap; s != nil; s = s.Next {
for s := datap; s != nil; s = s.Next {
if int64(len(s.P)) > s.Size {
Diag("%s: initialize bounds (%d < %d)", s.Name, int64(s.Size), len(s.P))
}
@ -1265,6 +1195,8 @@ func dodata() {
dynreloc()
/* some symbols may no longer belong in datap (Mach-O) */
var l **LSym
var s *LSym
for l = &datap; ; {
s = *l
if s == nil {
@ -1299,8 +1231,9 @@ func dodata() {
}
/* writable ELF sections */
datsize = 0
datsize := int64(0)
var sect *Section
for ; s != nil && s.Type < SELFGOT; s = s.Next {
sect = addsection(&Segdata, s.Name, 06)
sect.Align = symalign(s)
@ -1315,10 +1248,11 @@ func dodata() {
/* .got (and .toc on ppc64) */
if s.Type == SELFGOT {
sect = addsection(&Segdata, ".got", 06)
sect := addsection(&Segdata, ".got", 06)
sect.Align = maxalign(s, SELFGOT)
datsize = Rnd(datsize, int64(sect.Align))
sect.Vaddr = uint64(datsize)
var toc *LSym
for ; s != nil && s.Type == SELFGOT; s = s.Next {
datsize = aligndatsize(datsize, s)
s.Sect = sect
@ -1363,7 +1297,7 @@ func dodata() {
/* shared library initializer */
if Flag_shared != 0 {
sect = addsection(&Segdata, ".init_array", 06)
sect := addsection(&Segdata, ".init_array", 06)
sect.Align = maxalign(s, SINITARR)
datsize = Rnd(datsize, int64(sect.Align))
sect.Vaddr = uint64(datsize)
@ -1385,7 +1319,8 @@ func dodata() {
sect.Vaddr = uint64(datsize)
Linklookup(Ctxt, "runtime.data", 0).Sect = sect
Linklookup(Ctxt, "runtime.edata", 0).Sect = sect
gcdata = Linklookup(Ctxt, "runtime.gcdata", 0)
gcdata := Linklookup(Ctxt, "runtime.gcdata", 0)
var gen ProgGen
proggeninit(&gen, gcdata)
for ; s != nil && s.Type < SBSS; s = s.Next {
if s.Type == SINITARR {
@ -1412,7 +1347,7 @@ func dodata() {
sect.Vaddr = uint64(datsize)
Linklookup(Ctxt, "runtime.bss", 0).Sect = sect
Linklookup(Ctxt, "runtime.ebss", 0).Sect = sect
gcbss = Linklookup(Ctxt, "runtime.gcbss", 0)
gcbss := Linklookup(Ctxt, "runtime.gcbss", 0)
proggeninit(&gen, gcbss)
for ; s != nil && s.Type < SNOPTRBSS; s = s.Next {
s.Sect = sect
@ -1449,7 +1384,7 @@ func dodata() {
}
if Iself && Linkmode == LinkExternal && s != nil && s.Type == STLSBSS && HEADTYPE != Hopenbsd {
sect = addsection(&Segdata, ".tbss", 06)
sect := addsection(&Segdata, ".tbss", 06)
sect.Align = int32(Thearch.Ptrsize)
sect.Vaddr = 0
datsize = 0
@ -1486,6 +1421,7 @@ func dodata() {
* since it's not our decision; that code expects the sections in
* segtext.
*/
var segro *Segment
if Iself && Linkmode == LinkInternal {
segro = &Segrodata
} else {
@ -1600,17 +1536,17 @@ func dodata() {
}
/* number the sections */
n = 1
n := int32(1)
for sect = Segtext.Sect; sect != nil; sect = sect.Next {
for sect := Segtext.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
for sect = Segdata.Sect; sect != nil; sect = sect.Next {
for sect := Segdata.Sect; sect != nil; sect = sect.Next {
sect.Extnum = int16(n)
n++
}
@ -1618,9 +1554,6 @@ func dodata() {
// assign addresses to text
func textaddress() {
var va uint64
var sect *Section
var sym *LSym
var sub *LSym
addsection(&Segtext, ".text", 05)
@ -1628,14 +1561,14 @@ func textaddress() {
// Assign PCs in text segment.
// Could parallelize, by assigning to text
// and then letting threads copy down, but probably not worth it.
sect = Segtext.Sect
sect := Segtext.Sect
sect.Align = int32(Funcalign)
Linklookup(Ctxt, "runtime.text", 0).Sect = sect
Linklookup(Ctxt, "runtime.etext", 0).Sect = sect
va = uint64(INITTEXT)
va := uint64(INITTEXT)
sect.Vaddr = va
for sym = Ctxt.Textp; sym != nil; sym = sym.Next {
for sym := Ctxt.Textp; sym != nil; sym = sym.Next {
sym.Sect = sect
if sym.Type&SSUB != 0 {
continue
@ -1664,26 +1597,11 @@ func textaddress() {
// assign addresses
func address() {
var s *Section
var text *Section
var data *Section
var rodata *Section
var symtab *Section
var pclntab *Section
var noptr *Section
var bss *Section
var noptrbss *Section
var typelink *Section
var sym *LSym
var sub *LSym
var va uint64
var vlen int64
va = uint64(INITTEXT)
va := uint64(INITTEXT)
Segtext.Rwx = 05
Segtext.Vaddr = va
Segtext.Fileoff = uint64(HEADR)
for s = Segtext.Sect; s != nil; s = s.Next {
for s := Segtext.Sect; s != nil; s = s.Next {
va = uint64(Rnd(int64(va), int64(s.Align)))
s.Vaddr = va
va += s.Length
@ -1704,7 +1622,7 @@ func address() {
Segrodata.Vaddr = va
Segrodata.Fileoff = va - Segtext.Vaddr + Segtext.Fileoff
Segrodata.Filelen = 0
for s = Segrodata.Sect; s != nil; s = s.Next {
for s := Segrodata.Sect; s != nil; s = s.Next {
va = uint64(Rnd(int64(va), int64(s.Align)))
s.Vaddr = va
va += s.Length
@ -1725,11 +1643,12 @@ func address() {
if HEADTYPE == Hplan9 {
Segdata.Fileoff = Segtext.Fileoff + Segtext.Filelen
}
data = nil
noptr = nil
bss = nil
noptrbss = nil
for s = Segdata.Sect; s != nil; s = s.Next {
data := (*Section)(nil)
noptr := (*Section)(nil)
bss := (*Section)(nil)
noptrbss := (*Section)(nil)
var vlen int64
for s := Segdata.Sect; s != nil; s = s.Next {
vlen = int64(s.Length)
if s.Next != nil {
vlen = int64(s.Next.Vaddr - s.Vaddr)
@ -1753,17 +1672,19 @@ func address() {
Segdata.Filelen = bss.Vaddr - Segdata.Vaddr
text = Segtext.Sect
text := Segtext.Sect
var rodata *Section
if Segrodata.Sect != nil {
rodata = Segrodata.Sect
} else {
rodata = text.Next
}
typelink = rodata.Next
symtab = typelink.Next
pclntab = symtab.Next
typelink := rodata.Next
symtab := typelink.Next
pclntab := symtab.Next
for sym = datap; sym != nil; sym = sym.Next {
var sub *LSym
for sym := datap; sym != nil; sym = sym.Next {
Ctxt.Cursym = sym
if sym.Sect != nil {
sym.Value += int64((sym.Sect.(*Section)).Vaddr)
@ -1780,7 +1701,7 @@ func address() {
xdefine("runtime.typelink", SRODATA, int64(typelink.Vaddr))
xdefine("runtime.etypelink", SRODATA, int64(typelink.Vaddr+typelink.Length))
sym = Linklookup(Ctxt, "runtime.gcdata", 0)
sym := Linklookup(Ctxt, "runtime.gcdata", 0)
xdefine("runtime.egcdata", SRODATA, Symaddr(sym)+sym.Size)
Linklookup(Ctxt, "runtime.egcdata", 0).Sect = sym.Sect

View file

@ -11,9 +11,7 @@ import "cmd/internal/obj"
// ../gc/reflect.c stuffs in these.
func decode_reloc(s *LSym, off int32) *Reloc {
var i int
for i = 0; i < len(s.R); i++ {
for i := 0; i < len(s.R); i++ {
if s.R[i].Off == off {
return &s.R[i:][0]
}
@ -22,9 +20,7 @@ func decode_reloc(s *LSym, off int32) *Reloc {
}
func decode_reloc_sym(s *LSym, off int32) *LSym {
var r *Reloc
r = decode_reloc(s, off)
r := decode_reloc(s, off)
if r == nil {
return nil
}
@ -75,9 +71,7 @@ func decodetype_gcprog(s *LSym) *LSym {
}
func decodetype_gcmask(s *LSym) []byte {
var mask *LSym
mask = decode_reloc_sym(s, 1*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize))
mask := decode_reloc_sym(s, 1*int32(Thearch.Ptrsize)+8+1*int32(Thearch.Ptrsize))
return mask.P
}
@ -124,9 +118,7 @@ func decodetype_funcoutcount(s *LSym) int {
}
func decodetype_funcintype(s *LSym, i int) *LSym {
var r *Reloc
r = decode_reloc(s, int32(commonsize())+int32(Thearch.Ptrsize))
r := decode_reloc(s, int32(commonsize())+int32(Thearch.Ptrsize))
if r == nil {
return nil
}
@ -134,9 +126,7 @@ func decodetype_funcintype(s *LSym, i int) *LSym {
}
func decodetype_funcouttype(s *LSym, i int) *LSym {
var r *Reloc
r = decode_reloc(s, int32(commonsize())+2*int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize))
r := decode_reloc(s, int32(commonsize())+2*int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize))
if r == nil {
return nil
}
@ -154,16 +144,14 @@ func structfieldsize() int {
// Type.StructType.fields[]-> name, typ and offset.
func decodetype_structfieldname(s *LSym, i int) string {
var r *Reloc
// go.string."foo" 0x28 / 0x40
s = decode_reloc_sym(s, int32(commonsize())+int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize)+int32(i)*int32(structfieldsize()))
if s == nil { // embedded structs have a nil name.
return ""
}
r = decode_reloc(s, 0) // s has a pointer to the string data at offset 0
if r == nil { // shouldn't happen.
r := decode_reloc(s, 0) // s has a pointer to the string data at offset 0
if r == nil { // shouldn't happen.
return ""
}
return cstring(r.Sym.P[r.Add:])

File diff suppressed because it is too large Load diff

View file

@ -804,13 +804,11 @@ func elf64phdr(e *ElfPhdr) {
}
func elf32phdr(e *ElfPhdr) {
var frag int
if e.type_ == PT_LOAD {
// Correct ELF loaders will do this implicitly,
// but buggy ELF loaders like the one in some
// versions of QEMU won't.
frag = int(e.vaddr & (e.align - 1))
frag := int(e.vaddr & (e.align - 1))
e.off -= uint64(frag)
e.vaddr -= uint64(frag)
@ -856,16 +854,14 @@ func elf32shdr(e *ElfShdr) {
}
func elfwriteshdrs() uint32 {
var i int
if elf64 != 0 {
for i = 0; i < int(ehdr.shnum); i++ {
for i := 0; i < int(ehdr.shnum); i++ {
elf64shdr(shdr[i])
}
return uint32(ehdr.shnum) * ELF64SHDRSIZE
}
for i = 0; i < int(ehdr.shnum); i++ {
for i := 0; i < int(ehdr.shnum); i++ {
elf32shdr(shdr[i])
}
return uint32(ehdr.shnum) * ELF32SHDRSIZE
@ -883,25 +879,21 @@ func elfsetstring(s string, off int) {
}
func elfwritephdrs() uint32 {
var i int
if elf64 != 0 {
for i = 0; i < int(ehdr.phnum); i++ {
for i := 0; i < int(ehdr.phnum); i++ {
elf64phdr(phdr[i])
}
return uint32(ehdr.phnum) * ELF64PHDRSIZE
}
for i = 0; i < int(ehdr.phnum); i++ {
for i := 0; i < int(ehdr.phnum); i++ {
elf32phdr(phdr[i])
}
return uint32(ehdr.phnum) * ELF32PHDRSIZE
}
func newElfPhdr() *ElfPhdr {
var e *ElfPhdr
e = new(ElfPhdr)
e := new(ElfPhdr)
if ehdr.phnum >= NSECT {
Diag("too many phdrs")
} else {
@ -917,9 +909,7 @@ func newElfPhdr() *ElfPhdr {
}
func newElfShdr(name int64) *ElfShdr {
var e *ElfShdr
e = new(ElfShdr)
e := new(ElfShdr)
e.name = uint32(name)
e.shnum = int(ehdr.shnum)
if ehdr.shnum >= NSECT {
@ -937,9 +927,7 @@ func getElfEhdr() *ElfEhdr {
}
func elf64writehdr() uint32 {
var i int
for i = 0; i < EI_NIDENT; i++ {
for i := 0; i < EI_NIDENT; i++ {
Cput(ehdr.ident[i])
}
Thearch.Wput(ehdr.type_)
@ -959,9 +947,7 @@ func elf64writehdr() uint32 {
}
func elf32writehdr() uint32 {
var i int
for i = 0; i < EI_NIDENT; i++ {
for i := 0; i < EI_NIDENT; i++ {
Cput(ehdr.ident[i])
}
Thearch.Wput(ehdr.type_)
@ -1037,10 +1023,8 @@ func elfwritedynentsymsize(s *LSym, tag int, t *LSym) {
}
func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int {
var n int
interp = p
n = len(interp) + 1
n := len(interp) + 1
sh.addr = startva + resoff - uint64(n)
sh.off = resoff - uint64(n)
sh.size = uint64(n)
@ -1049,9 +1033,7 @@ func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int {
}
func elfwriteinterp() int {
var sh *ElfShdr
sh = elfshname(".interp")
sh := elfshname(".interp")
Cseek(int64(sh.off))
coutbuf.w.WriteString(interp)
Cput(0)
@ -1059,9 +1041,7 @@ func elfwriteinterp() int {
}
func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sz int) int {
var n uint64
n = 3*4 + uint64(sz) + resoff%4
n := 3*4 + uint64(sz) + resoff%4
sh.type_ = SHT_NOTE
sh.flags = SHF_ALLOC
@ -1074,9 +1054,7 @@ func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sz int) int {
}
func elfwritenotehdr(str string, namesz uint32, descsz uint32, tag uint32) *ElfShdr {
var sh *ElfShdr
sh = elfshname(str)
sh := elfshname(str)
// Write Elf_Note header.
Cseek(int64(sh.off))
@ -1099,17 +1077,13 @@ const (
var ELF_NOTE_NETBSD_NAME = []byte("NetBSD\x00")
func elfnetbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
var n int
n = int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4))
n := int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4))
return elfnote(sh, startva, resoff, n)
}
func elfwritenetbsdsig() int {
var sh *ElfShdr
// Write Elf_Note header.
sh = elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG)
sh := elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG)
if sh == nil {
return 0
@ -1135,17 +1109,13 @@ const (
var ELF_NOTE_OPENBSD_NAME = []byte("OpenBSD\x00")
func elfopenbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
var n int
n = ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ
n := ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ
return elfnote(sh, startva, resoff, n)
}
func elfwriteopenbsdsig() int {
var sh *ElfShdr
// Write Elf_Note header.
sh = elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG)
sh := elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG)
if sh == nil {
return 0
@ -1160,9 +1130,6 @@ func elfwriteopenbsdsig() int {
}
func addbuildinfo(val string) {
var ov string
var i int
var b int
var j int
if val[0] != '0' || val[1] != 'x' {
@ -1170,9 +1137,10 @@ func addbuildinfo(val string) {
Exit(2)
}
ov = val
ov := val
val = val[2:]
i = 0
i := 0
var b int
for val != "" {
if len(val) == 1 {
fmt.Fprintf(os.Stderr, "%s: -B argument must have even number of digits: %s\n", os.Args[0], ov)
@ -1216,16 +1184,12 @@ const (
var ELF_NOTE_BUILDINFO_NAME = []byte("GNU\x00")
func elfbuildinfo(sh *ElfShdr, startva uint64, resoff uint64) int {
var n int
n = int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4))
n := int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4))
return elfnote(sh, startva, resoff, n)
}
func elfwritebuildinfo() int {
var sh *ElfShdr
sh = elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG)
sh := elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG)
if sh == nil {
return 0
}
@ -1254,7 +1218,6 @@ type Elflib struct {
func addelflib(list **Elflib, file string, vers string) *Elfaux {
var lib *Elflib
var aux *Elfaux
for lib = *list; lib != nil; lib = lib.next {
if lib.file == file {
@ -1267,75 +1230,59 @@ func addelflib(list **Elflib, file string, vers string) *Elfaux {
*list = lib
havelib:
for aux = lib.aux; aux != nil; aux = aux.next {
for aux := lib.aux; aux != nil; aux = aux.next {
if aux.vers == vers {
goto haveaux
return aux
}
}
aux = new(Elfaux)
aux := new(Elfaux)
aux.next = lib.aux
aux.vers = vers
lib.aux = aux
haveaux:
return aux
}
func elfdynhash() {
var s *LSym
var sy *LSym
var dynstr *LSym
var i int
var j int
var nbucket int
var b int
var nfile int
var hc uint32
var chain []uint32
var buckets []uint32
var nsym int
var name string
var need []*Elfaux
var needlib *Elflib
var l *Elflib
var x *Elfaux
if !Iself {
return
}
nsym = Nelfsym
s = Linklookup(Ctxt, ".hash", 0)
nsym := Nelfsym
s := Linklookup(Ctxt, ".hash", 0)
s.Type = SELFROSECT
s.Reachable = true
i = nsym
nbucket = 1
i := nsym
nbucket := 1
for i > 0 {
nbucket++
i >>= 1
}
needlib = nil
need = make([]*Elfaux, nsym)
chain = make([]uint32, nsym)
buckets = make([]uint32, nbucket)
needlib := (*Elflib)(nil)
need := make([]*Elfaux, nsym)
chain := make([]uint32, nsym)
buckets := make([]uint32, nbucket)
if need == nil || chain == nil || buckets == nil {
Ctxt.Cursym = nil
Diag("out of memory")
Errorexit()
}
for i = 0; i < nsym; i++ {
for i := 0; i < nsym; i++ {
need[i] = nil
}
for i = 0; i < nsym; i++ {
for i := 0; i < nsym; i++ {
chain[i] = 0
}
for i = 0; i < nbucket; i++ {
for i := 0; i < nbucket; i++ {
buckets[i] = 0
}
for sy = Ctxt.Allsym; sy != nil; sy = sy.Allsym {
var b int
var hc uint32
var name string
for sy := Ctxt.Allsym; sy != nil; sy = sy.Allsym {
if sy.Dynid <= 0 {
continue
}
@ -1354,20 +1301,22 @@ func elfdynhash() {
Adduint32(Ctxt, s, uint32(nbucket))
Adduint32(Ctxt, s, uint32(nsym))
for i = 0; i < nbucket; i++ {
for i := 0; i < nbucket; i++ {
Adduint32(Ctxt, s, buckets[i])
}
for i = 0; i < nsym; i++ {
for i := 0; i < nsym; i++ {
Adduint32(Ctxt, s, chain[i])
}
// version symbols
dynstr = Linklookup(Ctxt, ".dynstr", 0)
dynstr := Linklookup(Ctxt, ".dynstr", 0)
s = Linklookup(Ctxt, ".gnu.version_r", 0)
i = 2
nfile = 0
for l = needlib; l != nil; l = l.next {
nfile := 0
var j int
var x *Elfaux
for l := needlib; l != nil; l = l.next {
nfile++
// header
@ -1405,7 +1354,7 @@ func elfdynhash() {
// version references
s = Linklookup(Ctxt, ".gnu.version", 0)
for i = 0; i < nsym; i++ {
for i := 0; i < nsym; i++ {
if i == 0 {
Adduint16(Ctxt, s, 0) // first entry - no symbol
} else if need[i] == nil {
@ -1424,14 +1373,14 @@ func elfdynhash() {
}
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
sy = Linklookup(Ctxt, ".rela.plt", 0)
sy := Linklookup(Ctxt, ".rela.plt", 0)
if sy.Size > 0 {
Elfwritedynent(s, DT_PLTREL, DT_RELA)
elfwritedynentsymsize(s, DT_PLTRELSZ, sy)
elfwritedynentsym(s, DT_JMPREL, sy)
}
} else {
sy = Linklookup(Ctxt, ".rel.plt", 0)
sy := Linklookup(Ctxt, ".rel.plt", 0)
if sy.Size > 0 {
Elfwritedynent(s, DT_PLTREL, DT_REL)
elfwritedynentsymsize(s, DT_PLTRELSZ, sy)
@ -1443,9 +1392,7 @@ func elfdynhash() {
}
func elfphload(seg *Segment) *ElfPhdr {
var ph *ElfPhdr
ph = newElfPhdr()
ph := newElfPhdr()
ph.type_ = PT_LOAD
if seg.Rwx&4 != 0 {
ph.flags |= PF_R
@ -1467,45 +1414,37 @@ func elfphload(seg *Segment) *ElfPhdr {
}
func elfshname(name string) *ElfShdr {
var i int
var off int
var sh *ElfShdr
for i = 0; i < nelfstr; i++ {
for i := 0; i < nelfstr; i++ {
if name == elfstr[i].s {
off = elfstr[i].off
goto found
for i = 0; i < int(ehdr.shnum); i++ {
sh = shdr[i]
if sh.name == uint32(off) {
return sh
}
}
sh = newElfShdr(int64(off))
return sh
}
}
Diag("cannot find elf name %s", name)
Errorexit()
return nil
found:
for i = 0; i < int(ehdr.shnum); i++ {
sh = shdr[i]
if sh.name == uint32(off) {
return sh
}
}
sh = newElfShdr(int64(off))
return sh
}
func elfshalloc(sect *Section) *ElfShdr {
var sh *ElfShdr
sh = elfshname(sect.Name)
sh := elfshname(sect.Name)
sect.Elfsect = sh
return sh
}
func elfshbits(sect *Section) *ElfShdr {
var sh *ElfShdr
sh = elfshalloc(sect)
sh := elfshalloc(sect)
if sh.type_ > 0 {
return sh
}
@ -1540,11 +1479,6 @@ func elfshbits(sect *Section) *ElfShdr {
}
func elfshreloc(sect *Section) *ElfShdr {
var typ int
var sh *ElfShdr
var prefix string
var buf string
// If main section is SHT_NOBITS, nothing to relocate.
// Also nothing to relocate in .shstrtab.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
@ -1554,6 +1488,8 @@ func elfshreloc(sect *Section) *ElfShdr {
return nil
}
var prefix string
var typ int
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
prefix = ".rela"
typ = SHT_RELA
@ -1562,8 +1498,8 @@ func elfshreloc(sect *Section) *ElfShdr {
typ = SHT_REL
}
buf = fmt.Sprintf("%s%s", prefix, sect.Name)
sh = elfshname(buf)
buf := fmt.Sprintf("%s%s", prefix, sect.Name)
sh := elfshname(buf)
sh.type_ = uint32(typ)
sh.entsize = uint64(Thearch.Regsize) * 2
if typ == SHT_RELA {
@ -1578,11 +1514,6 @@ func elfshreloc(sect *Section) *ElfShdr {
}
func elfrelocsect(sect *Section, first *LSym) {
var ri int
var sym *LSym
var eaddr int32
var r *Reloc
// If main section is SHT_NOBITS, nothing to relocate.
// Also nothing to relocate in .shstrtab.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
@ -1593,6 +1524,7 @@ func elfrelocsect(sect *Section, first *LSym) {
}
sect.Reloff = uint64(Cpos())
var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@ -1602,7 +1534,9 @@ func elfrelocsect(sect *Section, first *LSym) {
}
}
eaddr = int32(sect.Vaddr + sect.Length)
eaddr := int32(sect.Vaddr + sect.Length)
var r *Reloc
var ri int
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@ -1635,35 +1569,29 @@ func elfrelocsect(sect *Section, first *LSym) {
}
func Elfemitreloc() {
var sect *Section
for Cpos()&7 != 0 {
Cput(0)
}
elfrelocsect(Segtext.Sect, Ctxt.Textp)
for sect = Segtext.Sect.Next; sect != nil; sect = sect.Next {
for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
for sect = Segdata.Sect; sect != nil; sect = sect.Next {
for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfrelocsect(sect, datap)
}
}
func doelf() {
var s *LSym
var shstrtab *LSym
var dynstr *LSym
if !Iself {
return
}
/* predefine strings we need for section headers */
shstrtab = Linklookup(Ctxt, ".shstrtab", 0)
shstrtab := Linklookup(Ctxt, ".shstrtab", 0)
shstrtab.Type = SELFROSECT
shstrtab.Reachable = true
@ -1767,7 +1695,7 @@ func doelf() {
Addstring(shstrtab, ".gnu.version_r")
/* dynamic symbol table - first entry all zeros */
s = Linklookup(Ctxt, ".dynsym", 0)
s := Linklookup(Ctxt, ".dynsym", 0)
s.Type = SELFROSECT
s.Reachable = true
@ -1785,7 +1713,7 @@ func doelf() {
if s.Size == 0 {
Addstring(s, "")
}
dynstr = s
dynstr := s
/* relocation table */
if Thearch.Thechar == '6' || Thearch.Thechar == '9' {
@ -1804,7 +1732,7 @@ func doelf() {
/* ppc64 glink resolver */
if Thearch.Thechar == '9' {
s = Linklookup(Ctxt, ".glink", 0)
s := Linklookup(Ctxt, ".glink", 0)
s.Reachable = true
s.Type = SELFRXSECT
}
@ -1901,8 +1829,7 @@ func doelf() {
// Do not write DT_NULL. elfdynhash will finish it.
func shsym(sh *ElfShdr, s *LSym) {
var addr int64
addr = Symaddr(s)
addr := Symaddr(s)
if sh.flags&SHF_ALLOC != 0 {
sh.addr = uint64(addr)
}
@ -1920,35 +1847,22 @@ func phsh(ph *ElfPhdr, sh *ElfShdr) {
}
func Asmbelfsetup() {
var sect *Section
/* This null SHdr must appear before all others */
elfshname("")
for sect = Segtext.Sect; sect != nil; sect = sect.Next {
for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
for sect = Segdata.Sect; sect != nil; sect = sect.Next {
for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshalloc(sect)
}
}
func Asmbelf(symo int64) {
var a int64
var o int64
var startva int64
var resoff int64
var eh *ElfEhdr
var ph *ElfPhdr
var pph *ElfPhdr
var pnote *ElfPhdr
var sh *ElfShdr
var sect *Section
eh = getElfEhdr()
eh := getElfEhdr()
switch Thearch.Thechar {
default:
Diag("unknown architecture in asmbelf")
@ -1968,10 +1882,11 @@ func Asmbelf(symo int64) {
eh.machine = EM_PPC64
}
startva = INITTEXT - int64(HEADR)
resoff = ELFRESERVE
startva := INITTEXT - int64(HEADR)
resoff := int64(ELFRESERVE)
pph = nil
pph := (*ElfPhdr)(nil)
var pnote *ElfPhdr
if Linkmode == LinkExternal {
/* skip program headers */
eh.phoff = 0
@ -1996,7 +1911,7 @@ func Asmbelf(symo int64) {
* Except on NaCl where it must not be loaded.
*/
if HEADTYPE != Hnacl {
o = int64(Segtext.Vaddr - pph.vaddr)
o := int64(Segtext.Vaddr - pph.vaddr)
Segtext.Vaddr -= uint64(o)
Segtext.Length += uint64(o)
o = int64(Segtext.Fileoff - pph.off)
@ -2006,7 +1921,7 @@ func Asmbelf(symo int64) {
if Debug['d'] == 0 {
/* interpreter */
sh = elfshname(".interp")
sh := elfshname(".interp")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC
@ -2035,7 +1950,7 @@ func Asmbelf(symo int64) {
resoff -= int64(elfinterp(sh, uint64(startva), uint64(resoff), interpreter))
ph = newElfPhdr()
ph := newElfPhdr()
ph.type_ = PT_INTERP
ph.flags = PF_R
phsh(ph, sh)
@ -2043,7 +1958,7 @@ func Asmbelf(symo int64) {
pnote = nil
if HEADTYPE == Hnetbsd || HEADTYPE == Hopenbsd {
sh = nil
sh := (*ElfShdr)(nil)
switch HEADTYPE {
case Hnetbsd:
sh = elfshname(".note.netbsd.ident")
@ -2061,7 +1976,7 @@ func Asmbelf(symo int64) {
}
if len(buildinfo) > 0 {
sh = elfshname(".note.gnu.build-id")
sh := elfshname(".note.gnu.build-id")
resoff -= int64(elfbuildinfo(sh, uint64(startva), uint64(resoff)))
if pnote == nil {
@ -2083,7 +1998,7 @@ func Asmbelf(symo int64) {
/* Dynamic linking sections */
if Debug['d'] == 0 {
sh = elfshname(".dynsym")
sh := elfshname(".dynsym")
sh.type_ = SHT_DYNSYM
sh.flags = SHF_ALLOC
if elf64 != 0 {
@ -2104,7 +2019,7 @@ func Asmbelf(symo int64) {
shsym(sh, Linklookup(Ctxt, ".dynstr", 0))
if elfverneed != 0 {
sh = elfshname(".gnu.version")
sh := elfshname(".gnu.version")
sh.type_ = SHT_GNU_VERSYM
sh.flags = SHF_ALLOC
sh.addralign = 2
@ -2124,7 +2039,7 @@ func Asmbelf(symo int64) {
switch eh.machine {
case EM_X86_64,
EM_PPC64:
sh = elfshname(".rela.plt")
sh := elfshname(".rela.plt")
sh.type_ = SHT_RELA
sh.flags = SHF_ALLOC
sh.entsize = ELF64RELASIZE
@ -2142,7 +2057,7 @@ func Asmbelf(symo int64) {
shsym(sh, Linklookup(Ctxt, ".rela", 0))
default:
sh = elfshname(".rel.plt")
sh := elfshname(".rel.plt")
sh.type_ = SHT_REL
sh.flags = SHF_ALLOC
sh.entsize = ELF32RELSIZE
@ -2160,7 +2075,7 @@ func Asmbelf(symo int64) {
}
if eh.machine == EM_PPC64 {
sh = elfshname(".glink")
sh := elfshname(".glink")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC + SHF_EXECINSTR
sh.addralign = 4
@ -2188,7 +2103,7 @@ func Asmbelf(symo int64) {
// On ppc64, .got comes from the input files, so don't
// create it here, and .got.plt is not used.
if eh.machine != EM_PPC64 {
sh = elfshname(".got")
sh := elfshname(".got")
sh.type_ = SHT_PROGBITS
sh.flags = SHF_ALLOC + SHF_WRITE
sh.entsize = uint64(Thearch.Regsize)
@ -2220,7 +2135,7 @@ func Asmbelf(symo int64) {
sh.addralign = uint64(Thearch.Regsize)
sh.link = uint32(elfshname(".dynstr").shnum)
shsym(sh, Linklookup(Ctxt, ".dynamic", 0))
ph = newElfPhdr()
ph := newElfPhdr()
ph.type_ = PT_DYNAMIC
ph.flags = PF_R + PF_W
phsh(ph, sh)
@ -2232,7 +2147,7 @@ func Asmbelf(symo int64) {
// not currently support it. This is handled
// appropriately in runtime/cgo.
if Ctxt.Tlsoffset != 0 && HEADTYPE != Hopenbsd {
ph = newElfPhdr()
ph := newElfPhdr()
ph.type_ = PT_TLS
ph.flags = PF_R
ph.memsz = uint64(-Ctxt.Tlsoffset)
@ -2241,7 +2156,7 @@ func Asmbelf(symo int64) {
}
if HEADTYPE == Hlinux {
ph = newElfPhdr()
ph := newElfPhdr()
ph.type_ = PT_GNU_STACK
ph.flags = PF_W + PF_R
ph.align = uint64(Thearch.Regsize)
@ -2253,7 +2168,7 @@ func Asmbelf(symo int64) {
}
elfobj:
sh = elfshname(".shstrtab")
sh := elfshname(".shstrtab")
sh.type_ = SHT_STRTAB
sh.addralign = 1
shsym(sh, Linklookup(Ctxt, ".shstrtab", 0))
@ -2265,29 +2180,29 @@ elfobj:
elfshname(".strtab")
}
for sect = Segtext.Sect; sect != nil; sect = sect.Next {
for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
for sect = Segdata.Sect; sect != nil; sect = sect.Next {
for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshbits(sect)
}
if Linkmode == LinkExternal {
for sect = Segtext.Sect; sect != nil; sect = sect.Next {
for sect := Segtext.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
for sect = Segrodata.Sect; sect != nil; sect = sect.Next {
for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
for sect = Segdata.Sect; sect != nil; sect = sect.Next {
for sect := Segdata.Sect; sect != nil; sect = sect.Next {
elfshreloc(sect)
}
// add a .note.GNU-stack section to mark the stack as non-executable
sh = elfshname(".note.GNU-stack")
sh := elfshname(".note.GNU-stack")
sh.type_ = SHT_PROGBITS
sh.addralign = 1
@ -2297,7 +2212,7 @@ elfobj:
// generate .tbss section for dynamic internal linking (except for OpenBSD)
// external linking generates .tbss in data.c
if Linkmode == LinkInternal && Debug['d'] == 0 && HEADTYPE != Hopenbsd {
sh = elfshname(".tbss")
sh := elfshname(".tbss")
sh.type_ = SHT_NOBITS
sh.addralign = uint64(Thearch.Regsize)
sh.size = uint64(-Ctxt.Tlsoffset)
@ -2305,7 +2220,7 @@ elfobj:
}
if Debug['s'] == 0 {
sh = elfshname(".symtab")
sh := elfshname(".symtab")
sh.type_ = SHT_SYMTAB
sh.off = uint64(symo)
sh.size = uint64(Symsize)
@ -2368,7 +2283,7 @@ elfobj:
}
Cseek(0)
a = 0
a := int64(0)
a += int64(elfwritehdr())
a += int64(elfwritephdrs())
a += int64(elfwriteshdrs())

View file

@ -54,11 +54,8 @@ var ihash [NIHASH]*Import
var nimport int
func hashstr(name string) int {
var h uint32
var cp string
h = 0
for cp = name; cp != ""; cp = cp[1:] {
h := uint32(0)
for cp := name; cp != ""; cp = cp[1:] {
h = h*1119 + uint32(cp[0])
}
h &= 0xffffff
@ -66,16 +63,13 @@ func hashstr(name string) int {
}
func ilookup(name string) *Import {
var h int
var x *Import
h = hashstr(name) % NIHASH
for x = ihash[h]; x != nil; x = x.hash {
h := hashstr(name) % NIHASH
for x := ihash[h]; x != nil; x = x.hash {
if x.name[0] == name[0] && x.name == name {
return x
}
}
x = new(Import)
x := new(Import)
x.name = name
x.hash = ihash[h]
ihash[h] = x
@ -84,10 +78,7 @@ func ilookup(name string) *Import {
}
func ldpkg(f *Biobuf, pkg string, length int64, filename string, whence int) {
var bdata []byte
var data string
var p0, p1 int
var name string
if Debug['g'] != 0 {
return
@ -101,7 +92,7 @@ func ldpkg(f *Biobuf, pkg string, length int64, filename string, whence int) {
return
}
bdata = make([]byte, length)
bdata := make([]byte, length)
if int64(Bread(f, bdata)) != length {
fmt.Fprintf(os.Stderr, "%s: short pkg read %s\n", os.Args[0], filename)
if Debug['u'] != 0 {
@ -109,7 +100,7 @@ func ldpkg(f *Biobuf, pkg string, length int64, filename string, whence int) {
}
return
}
data = string(bdata)
data := string(bdata)
// first \n$$ marks beginning of exports - skip rest of line
p0 = strings.Index(data, "\n$$")
@ -153,7 +144,7 @@ func ldpkg(f *Biobuf, pkg string, length int64, filename string, whence int) {
for p0 < p1 && (data[p0] == ' ' || data[p0] == '\t' || data[p0] == '\n') {
p0++
}
name = data[p0:]
name := data[p0:]
for p0 < p1 && data[p0] != ' ' && data[p0] != '\t' && data[p0] != '\n' {
p0++
}
@ -221,14 +212,13 @@ func ldpkg(f *Biobuf, pkg string, length int64, filename string, whence int) {
}
func loadpkgdata(file string, pkg string, data string) {
var p string
var prefix string
var name string
var def string
var x *Import
file = file
p = data
p := data
for parsepkgdata(file, pkg, &p, &prefix, &name, &def) > 0 {
x = ilookup(name)
if x.prefix == "" {
@ -250,15 +240,10 @@ func loadpkgdata(file string, pkg string, data string) {
}
func parsepkgdata(file string, pkg string, pp *string, prefixp *string, namep *string, defp *string) int {
var p string
var prefix string
var name string
var def string
var meth string
var inquote bool
// skip white space
p = *pp
p := *pp
loop:
for len(p) > 0 && (p[0] == ' ' || p[0] == '\t' || p[0] == '\n') {
@ -310,9 +295,9 @@ loop:
prefix = prefix[:len(prefix)-len(p)-1]
// name: a.b followed by space
name = p
name := p
inquote = false
inquote := false
for len(p) > 0 {
if p[0] == ' ' && !inquote {
break
@ -334,7 +319,7 @@ loop:
p = p[1:]
// def: free form to new line
def = p
def := p
for len(p) > 0 && p[0] != '\n' {
p = p[1:]
@ -347,6 +332,7 @@ loop:
p = p[1:]
// include methods on successive lines in def of named type
var meth string
for parsemethod(&p, &meth) > 0 {
if defbuf == nil {
defbuf = new(bytes.Buffer)
@ -372,10 +358,8 @@ loop:
}
func parsemethod(pp *string, methp *string) int {
var p string
// skip white space
p = *pp
p := *pp
for len(p) > 0 && (p[0] == ' ' || p[0] == '\t') {
p = p[1:]
@ -415,7 +399,6 @@ useline:
func loadcgo(file string, pkg string, p string) {
var next string
var p0 string
var q string
var f []string
var local string
@ -423,7 +406,7 @@ func loadcgo(file string, pkg string, p string) {
var lib string
var s *LSym
p0 = ""
p0 := ""
for ; p != ""; p = next {
if i := strings.Index(p, "\n"); i >= 0 {
p, next = p[:i], p[i+1:]
@ -610,10 +593,9 @@ func mark(s *LSym) {
func markflood() {
var a *Auto
var s *LSym
var i int
for s = markq; s != nil; s = s.Queue {
for s := markq; s != nil; s = s.Queue {
if s.Type == STEXT {
if Debug['v'] > 1 {
fmt.Fprintf(&Bso, "marktext %s\n", s.Name)
@ -659,38 +641,32 @@ var markextra = []string{
}
func deadcode() {
var i int
var s *LSym
var last *LSym
var p *LSym
var fmt_ string
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f deadcode\n", obj.Cputime())
}
mark(Linklookup(Ctxt, INITENTRY, 0))
for i = 0; i < len(markextra); i++ {
for i := 0; i < len(markextra); i++ {
mark(Linklookup(Ctxt, markextra[i], 0))
}
for i = 0; i < len(dynexp); i++ {
for i := 0; i < len(dynexp); i++ {
mark(dynexp[i])
}
markflood()
// keep each beginning with 'typelink.' if the symbol it points at is being kept.
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.typelink.") {
s.Reachable = len(s.R) == 1 && s.R[0].Sym.Reachable
}
}
// remove dead text but keep file information (z symbols).
last = nil
last := (*LSym)(nil)
for s = Ctxt.Textp; s != nil; s = s.Next {
for s := Ctxt.Textp; s != nil; s = s.Next {
if !s.Reachable {
continue
}
@ -710,7 +686,7 @@ func deadcode() {
last.Next = nil
}
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.weak.") {
s.Special = 1 // do not lay out in data segment
s.Reachable = true
@ -719,9 +695,10 @@ func deadcode() {
}
// record field tracking references
fmt_ = ""
fmt_ := ""
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
var p *LSym
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.track.") {
s.Special = 1 // do not lay out in data segment
s.Hide = 1
@ -741,7 +718,7 @@ func deadcode() {
if tracksym == "" {
return
}
s = Linklookup(Ctxt, tracksym, 0)
s := Linklookup(Ctxt, tracksym, 0)
if !s.Reachable {
return
}
@ -749,12 +726,11 @@ func deadcode() {
}
func doweak() {
var s *LSym
var t *LSym
// resolve weak references only if
// target symbol will be in binary anyway.
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if strings.HasPrefix(s.Name, "go.weak.") {
t = Linkrlookup(Ctxt, s.Name[8:], int(s.Version))
if t != nil && t.Type != 0 && t.Reachable {
@ -772,13 +748,11 @@ func doweak() {
}
func addexport() {
var i int
if HEADTYPE == Hdarwin {
return
}
for i = 0; i < len(dynexp); i++ {
for i := 0; i < len(dynexp); i++ {
Thearch.Adddynsym(Ctxt, dynexp[i])
}
}
@ -840,16 +814,13 @@ var phash [1024]*Pkg
var pkgall *Pkg
func getpkg(path_ string) *Pkg {
var p *Pkg
var h int
h = hashstr(path_) % len(phash)
for p = phash[h]; p != nil; p = p.next {
h := hashstr(path_) % len(phash)
for p := phash[h]; p != nil; p = p.next {
if p.path_ == path_ {
return p
}
}
p = new(Pkg)
p := new(Pkg)
p.path_ = path_
p.next = phash[h]
phash[h] = p
@ -859,24 +830,18 @@ func getpkg(path_ string) *Pkg {
}
func imported(pkg string, import_ string) {
var p *Pkg
var i *Pkg
// everyone imports runtime, even runtime.
if import_ == "\"runtime\"" {
return
}
pkg = fmt.Sprintf("\"%v\"", Zconv(pkg, 0)) // turn pkg path into quoted form, freed below
p = getpkg(pkg)
i = getpkg(import_)
p := getpkg(pkg)
i := getpkg(import_)
i.impby = append(i.impby, p)
}
func cycle(p *Pkg) *Pkg {
var i int
var bad *Pkg
if p.checked != 0 {
return nil
}
@ -889,7 +854,8 @@ func cycle(p *Pkg) *Pkg {
}
p.mark = 1
for i = 0; i < len(p.impby); i++ {
var bad *Pkg
for i := 0; i < len(p.impby); i++ {
bad = cycle(p.impby[i])
if bad != nil {
p.mark = 0
@ -908,9 +874,7 @@ func cycle(p *Pkg) *Pkg {
}
func importcycles() {
var p *Pkg
for p = pkgall; p != nil; p = p.all {
for p := pkgall; p != nil; p = p.all {
cycle(p)
}
}

View file

@ -86,10 +86,7 @@ func addlib(ctxt *Link, src string, obj string, pathname string) {
* pkg: package import path, e.g. container/vector
*/
func addlibpath(ctxt *Link, srcref string, objref string, file string, pkg string) {
var i int
var l *Library
for i = 0; i < len(ctxt.Library); i++ {
for i := 0; i < len(ctxt.Library); i++ {
if file == ctxt.Library[i].File {
return
}
@ -100,7 +97,7 @@ func addlibpath(ctxt *Link, srcref string, objref string, file string, pkg strin
}
ctxt.Library = append(ctxt.Library, Library{})
l = &ctxt.Library[len(ctxt.Library)-1]
l := &ctxt.Library[len(ctxt.Library)-1]
l.Objref = objref
l.Srcref = srcref
l.File = file

View file

@ -285,39 +285,35 @@ func valuecmp(a *LSym, b *LSym) int {
}
func ldelf(f *Biobuf, pkg string, length int64, pn string) {
var err error
var base int32
var add uint64
var info uint64
var name string
var i int
var j int
var rela int
var is64 int
var n int
var flag int
var hdrbuf [64]uint8
var p []byte
var hdr *ElfHdrBytes
var elfobj *ElfObj
var sect *ElfSect
var rsect *ElfSect
var sym ElfSym
var e binary.ByteOrder
var r []Reloc
var rp *Reloc
var s *LSym
var symbols []*LSym
symbols = nil
symbols := []*LSym(nil)
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f ldelf %s\n", obj.Cputime(), pn)
}
Ctxt.Version++
base = int32(Boffset(f))
base := int32(Boffset(f))
var add uint64
var e binary.ByteOrder
var elfobj *ElfObj
var err error
var flag int
var hdr *ElfHdrBytes
var hdrbuf [64]uint8
var info uint64
var is64 int
var j int
var n int
var name string
var p []byte
var r []Reloc
var rela int
var rp *Reloc
var rsect *ElfSect
var s *LSym
var sect *ElfSect
var sym ElfSym
if Bread(f, hdrbuf[:]) != len(hdrbuf) {
goto bad
}
@ -348,10 +344,8 @@ func ldelf(f *Biobuf, pkg string, length int64, pn string) {
is64 = 0
if hdr.Ident[4] == ElfClass64 {
var hdr *ElfHdrBytes64
is64 = 1
hdr = new(ElfHdrBytes64)
hdr := new(ElfHdrBytes64)
binary.Read(bytes.NewReader(hdrbuf[:]), binary.BigEndian, hdr) // only byte arrays; byte order doesn't matter
elfobj.type_ = uint32(e.Uint16(hdr.Type[:]))
elfobj.machine = uint32(e.Uint16(hdr.Machine[:]))
@ -426,7 +420,7 @@ func ldelf(f *Biobuf, pkg string, length int64, pn string) {
elfobj.sect = make([]ElfSect, elfobj.shnum)
elfobj.nsect = uint(elfobj.shnum)
for i = 0; uint(i) < elfobj.nsect; i++ {
for i := 0; uint(i) < elfobj.nsect; i++ {
if Bseek(f, int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 {
goto bad
}
@ -478,7 +472,7 @@ func ldelf(f *Biobuf, pkg string, length int64, pn string) {
if err = elfmap(elfobj, sect); err != nil {
goto bad
}
for i = 0; uint(i) < elfobj.nsect; i++ {
for i := 0; uint(i) < elfobj.nsect; i++ {
if elfobj.sect[i].nameoff != 0 {
elfobj.sect[i].name = cstring(sect.base[elfobj.sect[i].nameoff:])
}
@ -517,7 +511,7 @@ func ldelf(f *Biobuf, pkg string, length int64, pn string) {
// as well use one large chunk.
// create symbols for elfmapped sections
for i = 0; uint(i) < elfobj.nsect; i++ {
for i := 0; uint(i) < elfobj.nsect; i++ {
sect = &elfobj.sect[i]
if (sect.type_ != ElfSectProgbits && sect.type_ != ElfSectNobits) || sect.flags&ElfSectFlagAlloc == 0 {
continue
@ -572,7 +566,7 @@ func ldelf(f *Biobuf, pkg string, length int64, pn string) {
Errorexit()
}
for i = 1; i < elfobj.nsymtab; i++ {
for i := 1; i < elfobj.nsymtab; i++ {
if err = readelfsym(elfobj, i, &sym, 1); err != nil {
goto bad
}
@ -645,7 +639,7 @@ func ldelf(f *Biobuf, pkg string, length int64, pn string) {
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
for i = 0; uint(i) < elfobj.nsect; i++ {
for i := 0; uint(i) < elfobj.nsect; i++ {
s = elfobj.sect[i].sym
if s == nil {
continue
@ -676,7 +670,7 @@ func ldelf(f *Biobuf, pkg string, length int64, pn string) {
}
// load relocations
for i = 0; uint(i) < elfobj.nsect; i++ {
for i := 0; uint(i) < elfobj.nsect; i++ {
rsect = &elfobj.sect[i]
if rsect.type_ != ElfSectRela && rsect.type_ != ElfSectRel {
continue
@ -782,9 +776,7 @@ bad:
}
func section(elfobj *ElfObj, name string) *ElfSect {
var i int
for i = 0; uint(i) < elfobj.nsect; i++ {
for i := 0; uint(i) < elfobj.nsect; i++ {
if elfobj.sect[i].name != "" && name != "" && elfobj.sect[i].name == name {
return &elfobj.sect[i]
}
@ -812,8 +804,6 @@ func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) {
}
func readelfsym(elfobj *ElfObj, i int, sym *ElfSym, needSym int) (err error) {
var s *LSym
if i >= elfobj.nsymtab || i < 0 {
err = fmt.Errorf("invalid elf symbol index")
return err
@ -845,7 +835,7 @@ func readelfsym(elfobj *ElfObj, i int, sym *ElfSym, needSym int) (err error) {
sym.other = b.Other
}
s = nil
s := (*LSym)(nil)
if sym.name == "_GLOBAL_OFFSET_TABLE_" {
sym.name = ".got"
}
@ -940,11 +930,8 @@ func (x rbyoff) Swap(i, j int) {
}
func (x rbyoff) Less(i, j int) bool {
var a *Reloc
var b *Reloc
a = &x[i]
b = &x[j]
a := &x[i]
b := &x[j]
if a.Off < b.Off {
return true
}

View file

@ -172,13 +172,8 @@ const (
)
func unpackcmd(p []byte, m *LdMachoObj, c *LdMachoCmd, type_ uint, sz uint) int {
var e4 func([]byte) uint32
var e8 func([]byte) uint64
var s *LdMachoSect
var i int
e4 = m.e.Uint32
e8 = m.e.Uint64
e4 := m.e.Uint32
e8 := m.e.Uint64
c.type_ = int(type_)
c.size = uint32(sz)
@ -204,7 +199,8 @@ func unpackcmd(p []byte, m *LdMachoObj, c *LdMachoCmd, type_ uint, sz uint) int
return -1
}
p = p[56:]
for i = 0; uint32(i) < c.seg.nsect; i++ {
var s *LdMachoSect
for i := 0; uint32(i) < c.seg.nsect; i++ {
s = &c.seg.sect[i]
s.name = cstring(p[0:16])
s.segname = cstring(p[16:32])
@ -238,7 +234,8 @@ func unpackcmd(p []byte, m *LdMachoObj, c *LdMachoCmd, type_ uint, sz uint) int
return -1
}
p = p[72:]
for i = 0; uint32(i) < c.seg.nsect; i++ {
var s *LdMachoSect
for i := 0; uint32(i) < c.seg.nsect; i++ {
s = &c.seg.sect[i]
s.name = cstring(p[0:16])
s.segname = cstring(p[16:32])
@ -293,24 +290,19 @@ func unpackcmd(p []byte, m *LdMachoObj, c *LdMachoCmd, type_ uint, sz uint) int
}
func macholoadrel(m *LdMachoObj, sect *LdMachoSect) int {
var rel []LdMachoRel
var r *LdMachoRel
var buf []byte
var p []byte
var i int
var n int
var v uint32
if sect.rel != nil || sect.nreloc == 0 {
return 0
}
rel = make([]LdMachoRel, sect.nreloc)
n = int(sect.nreloc * 8)
buf = make([]byte, n)
rel := make([]LdMachoRel, sect.nreloc)
n := int(sect.nreloc * 8)
buf := make([]byte, n)
if Bseek(m.f, m.base+int64(sect.reloff), 0) < 0 || Bread(m.f, buf) != n {
return -1
}
for i = 0; uint32(i) < sect.nreloc; i++ {
var p []byte
var r *LdMachoRel
var v uint32
for i := 0; uint32(i) < sect.nreloc; i++ {
r = &rel[i]
p = buf[i*8:]
r.addr = m.e.Uint32(p)
@ -347,56 +339,44 @@ func macholoadrel(m *LdMachoObj, sect *LdMachoSect) int {
}
func macholoaddsym(m *LdMachoObj, d *LdMachoDysymtab) int {
var p []byte
var i int
var n int
n := int(d.nindirectsyms)
n = int(d.nindirectsyms)
p = make([]byte, n*4)
p := make([]byte, n*4)
if Bseek(m.f, m.base+int64(d.indirectsymoff), 0) < 0 || Bread(m.f, p) != len(p) {
return -1
}
d.indir = make([]uint32, n)
for i = 0; i < n; i++ {
for i := 0; i < n; i++ {
d.indir[i] = m.e.Uint32(p[4*i:])
}
return 0
}
func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int {
var strbuf []byte
var symbuf []byte
var p []byte
var i int
var n int
var symsize int
var sym []LdMachoSym
var s *LdMachoSym
var v uint32
if symtab.sym != nil {
return 0
}
strbuf = make([]byte, symtab.strsize)
strbuf := make([]byte, symtab.strsize)
if Bseek(m.f, m.base+int64(symtab.stroff), 0) < 0 || Bread(m.f, strbuf) != len(strbuf) {
return -1
}
symsize = 12
symsize := 12
if m.is64 {
symsize = 16
}
n = int(symtab.nsym * uint32(symsize))
symbuf = make([]byte, n)
n := int(symtab.nsym * uint32(symsize))
symbuf := make([]byte, n)
if Bseek(m.f, m.base+int64(symtab.symoff), 0) < 0 || Bread(m.f, symbuf) != len(symbuf) {
return -1
}
sym = make([]LdMachoSym, symtab.nsym)
p = symbuf
for i = 0; uint32(i) < symtab.nsym; i++ {
sym := make([]LdMachoSym, symtab.nsym)
p := symbuf
var s *LdMachoSym
var v uint32
for i := 0; uint32(i) < symtab.nsym; i++ {
s = &sym[i]
v = m.e.Uint32(p)
if v >= symtab.strsize {
@ -421,13 +401,11 @@ func macholoadsym(m *LdMachoObj, symtab *LdMachoSymtab) int {
func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
var err error
var i int
var j int
var is64 bool
var secaddr uint64
var hdr [7 * 4]uint8
var cmdp []byte
var tmp [4]uint8
var dat []byte
var ncmd uint32
var cmdsz uint32
@ -436,7 +414,6 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
var off uint32
var m *LdMachoObj
var e binary.ByteOrder
var base int64
var sect *LdMachoSect
var rel *LdMachoRel
var rpi int
@ -452,7 +429,7 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
var name string
Ctxt.Version++
base = Boffset(f)
base := Boffset(f)
if Bread(f, hdr[:]) != len(hdr) {
goto bad
}
@ -475,6 +452,7 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
}
if is64 {
var tmp [4]uint8
Bread(f, tmp[:4]) // skip reserved word in header
}
@ -524,7 +502,7 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
symtab = nil
dsymtab = nil
for i = 0; uint32(i) < ncmd; i++ {
for i := 0; uint32(i) < ncmd; i++ {
ty = e.Uint32(cmdp)
sz = e.Uint32(cmdp[4:])
m.cmd[i].off = off
@ -581,7 +559,7 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
goto bad
}
for i = 0; uint32(i) < c.seg.nsect; i++ {
for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
if sect.segname != "__TEXT" && sect.segname != "__DATA" {
continue
@ -623,8 +601,7 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
// enter sub-symbols into symbol table.
// have to guess sizes from next symbol.
for i = 0; uint32(i) < symtab.nsym; i++ {
var v int
for i := 0; uint32(i) < symtab.nsym; i++ {
sym = &symtab.sym[i]
if sym.type_&N_STAB != 0 {
continue
@ -636,7 +613,7 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
if name[0] == '_' && name[1] != '\x00' {
name = name[1:]
}
v = 0
v := 0
if sym.type_&N_EXT == 0 {
v = Ctxt.Version
}
@ -688,7 +665,7 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
for i = 0; uint32(i) < c.seg.nsect; i++ {
for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
s = sect.sym
if s == nil {
@ -730,7 +707,7 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
}
// load relocations
for i = 0; uint32(i) < c.seg.nsect; i++ {
for i := 0; uint32(i) < c.seg.nsect; i++ {
sect = &c.seg.sect[i]
s = sect.sym
if s == nil {
@ -746,9 +723,6 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
rp = &r[rpi]
rel = &sect.rel[j]
if rel.scattered != 0 {
var k int
var ks *LdMachoSect
if Thearch.Thechar != '8' {
// mach-o only uses scattered relocation on 32-bit platforms
Diag("unexpected scattered relocation")
@ -792,54 +766,53 @@ func ldmacho(f *Biobuf, pkg string, length int64, pn string) {
// now consider the desired symbol.
// find the section where it lives.
for k = 0; uint32(k) < c.seg.nsect; k++ {
var ks *LdMachoSect
for k := 0; uint32(k) < c.seg.nsect; k++ {
ks = &c.seg.sect[k]
if ks.addr <= uint64(rel.value) && uint64(rel.value) < ks.addr+ks.size {
goto foundk
if ks.sym != nil {
rp.Sym = ks.sym
rp.Add += int64(uint64(rel.value) - ks.addr)
} else if ks.segname == "__IMPORT" && ks.name == "__pointers" {
// handle reference to __IMPORT/__pointers.
// how much worse can this get?
// why are we supporting 386 on the mac anyway?
rp.Type = 512 + MACHO_FAKE_GOTPCREL
// figure out which pointer this is a reference to.
k = int(uint64(ks.res1) + (uint64(rel.value)-ks.addr)/4)
// load indirect table for __pointers
// fetch symbol number
if dsymtab == nil || k < 0 || uint32(k) >= dsymtab.nindirectsyms || dsymtab.indir == nil {
err = fmt.Errorf("invalid scattered relocation: indirect symbol reference out of range")
goto bad
}
k = int(dsymtab.indir[k])
if k < 0 || uint32(k) >= symtab.nsym {
err = fmt.Errorf("invalid scattered relocation: symbol reference out of range")
goto bad
}
rp.Sym = symtab.sym[k].sym
} else {
err = fmt.Errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name)
goto bad
}
rpi++
// skip #1 of 2 rel; continue skips #2 of 2.
j++
continue
}
}
err = fmt.Errorf("unsupported scattered relocation: invalid address %#x", rel.addr)
goto bad
foundk:
if ks.sym != nil {
rp.Sym = ks.sym
rp.Add += int64(uint64(rel.value) - ks.addr)
} else if ks.segname == "__IMPORT" && ks.name == "__pointers" {
// handle reference to __IMPORT/__pointers.
// how much worse can this get?
// why are we supporting 386 on the mac anyway?
rp.Type = 512 + MACHO_FAKE_GOTPCREL
// figure out which pointer this is a reference to.
k = int(uint64(ks.res1) + (uint64(rel.value)-ks.addr)/4)
// load indirect table for __pointers
// fetch symbol number
if dsymtab == nil || k < 0 || uint32(k) >= dsymtab.nindirectsyms || dsymtab.indir == nil {
err = fmt.Errorf("invalid scattered relocation: indirect symbol reference out of range")
goto bad
}
k = int(dsymtab.indir[k])
if k < 0 || uint32(k) >= symtab.nsym {
err = fmt.Errorf("invalid scattered relocation: symbol reference out of range")
goto bad
}
rp.Sym = symtab.sym[k].sym
} else {
err = fmt.Errorf("unsupported scattered relocation: reference to %s/%s", ks.segname, ks.name)
goto bad
}
rpi++
// skip #1 of 2 rel; continue skips #2 of 2.
j++
continue
}
rp.Siz = rel.length

View file

@ -127,36 +127,31 @@ type PeObj struct {
}
func ldpe(f *Biobuf, pkg string, length int64, pn string) {
var err error
var name string
var base int32
var l uint32
var i int
var j int
var numaux int
var peobj *PeObj
var sect *PeSect
var rsect *PeSect
var symbuf [18]uint8
var s *LSym
var r []Reloc
var rp *Reloc
var sym *PeSym
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "%5.2f ldpe %s\n", obj.Cputime(), pn)
}
sect = nil
sect := (*PeSect)(nil)
Ctxt.Version++
base = int32(Boffset(f))
base := int32(Boffset(f))
peobj = new(PeObj)
peobj := new(PeObj)
peobj.f = f
peobj.base = uint32(base)
peobj.name = pn
// read header
var err error
var j int
var l uint32
var name string
var numaux int
var r []Reloc
var rp *Reloc
var rsect *PeSect
var s *LSym
var sym *PeSym
var symbuf [18]uint8
if err = binary.Read(f, binary.LittleEndian, &peobj.fh); err != nil {
goto bad
}
@ -165,7 +160,7 @@ func ldpe(f *Biobuf, pkg string, length int64, pn string) {
peobj.sect = make([]PeSect, peobj.fh.NumberOfSections)
peobj.nsect = uint(peobj.fh.NumberOfSections)
for i = 0; i < int(peobj.fh.NumberOfSections); i++ {
for i := 0; i < int(peobj.fh.NumberOfSections); i++ {
if err = binary.Read(f, binary.LittleEndian, &peobj.sect[i].sh); err != nil {
goto bad
}
@ -189,7 +184,7 @@ func ldpe(f *Biobuf, pkg string, length int64, pn string) {
}
// rewrite section names if they start with /
for i = 0; i < int(peobj.fh.NumberOfSections); i++ {
for i := 0; i < int(peobj.fh.NumberOfSections); i++ {
if peobj.sect[i].name == "" {
continue
}
@ -205,7 +200,7 @@ func ldpe(f *Biobuf, pkg string, length int64, pn string) {
peobj.npesym = uint(peobj.fh.NumberOfSymbols)
Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable), 0)
for i = 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 {
for i := 0; uint32(i) < peobj.fh.NumberOfSymbols; i += numaux + 1 {
Bseek(f, int64(base)+int64(peobj.fh.PointerToSymbolTable)+int64(len(symbuf))*int64(i), 0)
if Bread(f, symbuf[:]) != len(symbuf) {
goto bad
@ -230,7 +225,7 @@ func ldpe(f *Biobuf, pkg string, length int64, pn string) {
}
// create symbols for mapped sections
for i = 0; uint(i) < peobj.nsect; i++ {
for i := 0; uint(i) < peobj.nsect; i++ {
sect = &peobj.sect[i]
if sect.sh.Characteristics&IMAGE_SCN_MEM_DISCARDABLE != 0 {
continue
@ -277,7 +272,7 @@ func ldpe(f *Biobuf, pkg string, length int64, pn string) {
}
// load relocations
for i = 0; uint(i) < peobj.nsect; i++ {
for i := 0; uint(i) < peobj.nsect; i++ {
rsect = &peobj.sect[i]
if rsect.sym == nil || rsect.sh.NumberOfRelocations == 0 {
continue
@ -298,12 +293,9 @@ func ldpe(f *Biobuf, pkg string, length int64, pn string) {
if Bread(f, symbuf[:10]) != 10 {
goto bad
}
var rva uint32
var symindex uint32
var type_ uint16
rva = Le32(symbuf[0:])
symindex = Le32(symbuf[4:])
type_ = Le16(symbuf[8:])
rva := Le32(symbuf[0:])
symindex := Le32(symbuf[4:])
type_ := Le16(symbuf[8:])
if err = readpesym(peobj, int(symindex), &sym); err != nil {
goto bad
}
@ -360,7 +352,7 @@ func ldpe(f *Biobuf, pkg string, length int64, pn string) {
}
// enter sub-symbols into symbol table.
for i = 0; uint(i) < peobj.npesym; i++ {
for i := 0; uint(i) < peobj.npesym; i++ {
if peobj.pesym[i].name == "" {
continue
}
@ -429,7 +421,7 @@ func ldpe(f *Biobuf, pkg string, length int64, pn string) {
// Sort outer lists by address, adding to textp.
// This keeps textp in increasing address order.
for i = 0; uint(i) < peobj.nsect; i++ {
for i := 0; uint(i) < peobj.nsect; i++ {
s = peobj.sect[i].sym
if s == nil {
continue
@ -486,18 +478,15 @@ func issect(s *PeSym) bool {
}
func readpesym(peobj *PeObj, i int, y **PeSym) (err error) {
var s *LSym
var sym *PeSym
var name string
if uint(i) >= peobj.npesym || i < 0 {
err = fmt.Errorf("invalid pe symbol index")
return err
}
sym = &peobj.pesym[i]
sym := &peobj.pesym[i]
*y = sym
var name string
if issect(sym) {
name = peobj.sect[sym.sectnum-1].sym.Name
} else {
@ -515,6 +504,7 @@ func readpesym(peobj *PeObj, i int, y **PeSym) (err error) {
name = name[:i]
}
var s *LSym
switch sym.type_ {
default:
err = fmt.Errorf("%s: invalid symbol type %d", sym.name, sym.type_)

View file

@ -294,16 +294,13 @@ func mayberemoveoutfile() {
}
func libinit() {
var suffix string
var suffixsep string
Funcalign = Thearch.Funcalign
mywhatsys() // get goroot, goarch, goos
// add goroot to the end of the libdir list.
suffix = ""
suffix := ""
suffixsep = ""
suffixsep := ""
if flag_installsuffix != "" {
suffixsep = "_"
suffix = flag_installsuffix
@ -353,11 +350,9 @@ func Errorexit() {
func loadinternal(name string) {
var pname string
var i int
var found int
found = 0
for i = 0; i < len(Ctxt.Libdir); i++ {
found := 0
for i := 0; i < len(Ctxt.Libdir); i++ {
pname = fmt.Sprintf("%s/%s.a", Ctxt.Libdir[i], name)
if Debug['v'] != 0 {
fmt.Fprintf(&Bso, "searching for %s.a in %s\n", name, pname)
@ -375,15 +370,8 @@ func loadinternal(name string) {
}
func loadlib() {
var i int
var w int
var x int
var s *LSym
var tlsg *LSym
var cgostrsym string
if Flag_shared != 0 {
s = Linklookup(Ctxt, "runtime.islibrary", 0)
s := Linklookup(Ctxt, "runtime.islibrary", 0)
s.Dupok = 1
Adduint8(Ctxt, s, 1)
}
@ -396,6 +384,7 @@ func loadlib() {
loadinternal("runtime/race")
}
var i int
for i = 0; i < len(Ctxt.Library); i++ {
if Debug['v'] > 1 {
fmt.Fprintf(&Bso, "%5.2f autolib: %s (from %s)\n", obj.Cputime(), Ctxt.Library[i].File, Ctxt.Library[i].Objref)
@ -438,7 +427,7 @@ func loadlib() {
}
// Pretend that we really imported the package.
s = Linklookup(Ctxt, "go.importpath.runtime/cgo.", 0)
s := Linklookup(Ctxt, "go.importpath.runtime/cgo.", 0)
s.Type = SDATA
s.Dupok = 1
@ -446,10 +435,10 @@ func loadlib() {
// Provided by the code that imports the package.
// Since we are simulating the import, we have to provide this string.
cgostrsym = "go.string.\"runtime/cgo\""
cgostrsym := "go.string.\"runtime/cgo\""
if Linkrlookup(Ctxt, cgostrsym, 0) == nil {
s = Linklookup(Ctxt, cgostrsym, 0)
s := Linklookup(Ctxt, cgostrsym, 0)
s.Type = SRODATA
s.Reachable = true
addstrdata(cgostrsym, "runtime/cgo")
@ -459,7 +448,7 @@ func loadlib() {
if Linkmode == LinkInternal {
// Drop all the cgo_import_static declarations.
// Turns out we won't be needing them.
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type == SHOSTOBJ {
// If a symbol was marked both
// cgo_import_static and cgo_import_dynamic,
@ -474,7 +463,7 @@ func loadlib() {
}
}
tlsg = Linklookup(Ctxt, "runtime.tlsg", 0)
tlsg := Linklookup(Ctxt, "runtime.tlsg", 0)
// For most ports, runtime.tlsg is a placeholder symbol for TLS
// relocation. However, the Android and Darwin arm ports need it
@ -492,13 +481,13 @@ func loadlib() {
Ctxt.Tlsg = tlsg
// Now that we know the link mode, trim the dynexp list.
x = CgoExportDynamic
x := CgoExportDynamic
if Linkmode == LinkExternal {
x = CgoExportStatic
}
w = 0
for i = 0; i < len(dynexp); i++ {
w := 0
for i := 0; i < len(dynexp); i++ {
if int(dynexp[i].Cgoexport)&x != 0 {
dynexp[w] = dynexp[i]
w++
@ -564,12 +553,6 @@ func nextar(bp *Biobuf, off int64, a *ArHdr) int64 {
}
func objfile(file string, pkg string) {
var off int64
var l int64
var f *Biobuf
var pname string
var arhdr ArHdr
pkg = pathtoprefix(pkg)
if Debug['v'] > 1 {
@ -577,6 +560,7 @@ func objfile(file string, pkg string) {
}
Bflush(&Bso)
var err error
var f *Biobuf
f, err = Bopenr(file)
if err != nil {
Diag("cannot open file %s: %v", file, err)
@ -586,7 +570,7 @@ func objfile(file string, pkg string) {
magbuf := make([]byte, len(ARMAG))
if Bread(f, magbuf) != len(magbuf) || !strings.HasPrefix(string(magbuf), ARMAG) {
/* load it as a regular file */
l = Bseek(f, 0, 2)
l := Bseek(f, 0, 2)
Bseek(f, 0, 0)
ldobj(f, pkg, l, file, file, FileObj)
@ -596,9 +580,11 @@ func objfile(file string, pkg string) {
}
/* skip over optional __.GOSYMDEF and process __.PKGDEF */
off = Boffset(f)
off := Boffset(f)
l = nextar(f, off, &arhdr)
var arhdr ArHdr
l := nextar(f, off, &arhdr)
var pname string
if l <= 0 {
Diag("%s: short read on archive file symbol header", file)
goto out
@ -684,12 +670,8 @@ var internalpkg = []string{
}
func ldhostobj(ld func(*Biobuf, string, int64, string), f *Biobuf, pkg string, length int64, pn string, file string) {
var i int
var isinternal int
var h *Hostobj
isinternal = 0
for i = 0; i < len(internalpkg); i++ {
isinternal := 0
for i := 0; i < len(internalpkg); i++ {
if pkg == internalpkg[i] {
isinternal = 1
break
@ -713,7 +695,7 @@ func ldhostobj(ld func(*Biobuf, string, int64, string), f *Biobuf, pkg string, l
}
hostobj = append(hostobj, Hostobj{})
h = &hostobj[len(hostobj)-1]
h := &hostobj[len(hostobj)-1]
h.ld = ld
h.pkg = pkg
h.pn = pn
@ -723,11 +705,10 @@ func ldhostobj(ld func(*Biobuf, string, int64, string), f *Biobuf, pkg string, l
}
func hostobjs() {
var i int
var f *Biobuf
var h *Hostobj
for i = 0; i < len(hostobj); i++ {
for i := 0; i < len(hostobj); i++ {
h = &hostobj[i]
var err error
f, err = Bopenr(h.file)
@ -750,8 +731,6 @@ func rmtemp() {
}
func hostlinksetup() {
var p string
if Linkmode != LinkExternal {
return
}
@ -769,7 +748,7 @@ func hostlinksetup() {
// change our output to temporary object file
cout.Close()
p = fmt.Sprintf("%s/go.o", tmpdir)
p := fmt.Sprintf("%s/go.o", tmpdir)
var err error
cout, err = os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
if err != nil {
@ -783,14 +762,6 @@ func hostlinksetup() {
var hostlink_buf = make([]byte, 64*1024)
func hostlink() {
var p string
var argv []string
var i int
var n int
var length int
var h *Hostobj
var f *Biobuf
if Linkmode != LinkExternal || nerrors > 0 {
return
}
@ -798,6 +769,7 @@ func hostlink() {
if extld == "" {
extld = "gcc"
}
var argv []string
argv = append(argv, extld)
switch Thearch.Thechar {
case '8':
@ -851,7 +823,12 @@ func hostlink() {
// already wrote main object file
// copy host objects to temporary directory
for i = 0; i < len(hostobj); i++ {
var f *Biobuf
var h *Hostobj
var length int
var n int
var p string
for i := 0; i < len(hostobj); i++ {
h = &hostobj[i]
var err error
f, err = Bopenr(h.file)
@ -886,7 +863,7 @@ func hostlink() {
length -= n
}
if err = w.Close(); err != nil {
if err := w.Close(); err != nil {
Ctxt.Cursym = nil
Diag("cannot write %s: %v", p, err)
Errorexit()
@ -896,6 +873,7 @@ func hostlink() {
}
argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
var i int
for i = 0; i < len(ldflag); i++ {
argv = append(argv, ldflag[i])
}
@ -935,30 +913,18 @@ func hostlink() {
}
func ldobj(f *Biobuf, pkg string, length int64, pn string, file string, whence int) {
var line string
var c1 int
var c2 int
var c3 int
var c4 int
var magic uint32
var import0 int64
var import1 int64
var eof int64
var start int64
var t string
eof = Boffset(f) + length
eof := Boffset(f) + length
pn = pn
start = Boffset(f)
c1 = Bgetc(f)
c2 = Bgetc(f)
c3 = Bgetc(f)
c4 = Bgetc(f)
start := Boffset(f)
c1 := Bgetc(f)
c2 := Bgetc(f)
c3 := Bgetc(f)
c4 := Bgetc(f)
Bseek(f, start, 0)
magic = uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
if magic == 0x7f454c46 { // \x7F E L F
ldhostobj(ldelf, f, pkg, length, pn, file)
return
@ -975,8 +941,11 @@ func ldobj(f *Biobuf, pkg string, length int64, pn string, file string, whence i
}
/* check the header */
line = Brdline(f, '\n')
line := Brdline(f, '\n')
var import0 int64
var import1 int64
var t string
if line == "" {
if Blinelen(f) > 0 {
Diag("%s: not an object file", pn)
@ -1055,9 +1024,7 @@ eof:
}
func zerosig(sp string) {
var s *LSym
s = Linklookup(Ctxt, sp, 0)
s := Linklookup(Ctxt, sp, 0)
s.Sig = 0
}
@ -1097,44 +1064,40 @@ func pathtoprefix(s string) string {
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
goto escape
var buf bytes.Buffer
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
fmt.Fprintf(&buf, "%%%02x", c)
continue
}
buf.WriteByte(c)
}
return buf.String()
}
}
return s
escape:
var buf bytes.Buffer
for i := 0; i < len(s); i++ {
c := s[i]
if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
fmt.Fprintf(&buf, "%%%02x", c)
continue
}
buf.WriteByte(c)
}
return buf.String()
}
func iconv(p string) string {
var fp string
if p == "" {
var fp string
fp += "<nil>"
return fp
}
p = pathtoprefix(p)
var fp string
fp += p
return fp
}
func addsection(seg *Segment, name string, rwx int) *Section {
var l **Section
var sect *Section
for l = &seg.Sect; *l != nil; l = &(*l).Next {
}
sect = new(Section)
sect := new(Section)
sect.Rwx = uint8(rwx)
sect.Name = name
sect.Seg = seg
@ -1197,7 +1160,6 @@ func callsize() int {
func dostkcheck() {
var ch Chain
var s *LSym
morestack = Linklookup(Ctxt, "runtime.morestack", 0)
newstack = Linklookup(Ctxt, "runtime.newstack", 0)
@ -1215,7 +1177,7 @@ func dostkcheck() {
// Check every function, but do the nosplit functions in a first pass,
// to make the printed failure chains as short as possible.
for s = Ctxt.Textp; s != nil; s = s.Next {
for s := Ctxt.Textp; s != nil; s = s.Next {
// runtime.racesymbolizethunk is called from gcc-compiled C
// code running on the operating system thread stack.
// It uses more than the usual amount of stack but that's okay.
@ -1230,7 +1192,7 @@ func dostkcheck() {
}
}
for s = Ctxt.Textp; s != nil; s = s.Next {
for s := Ctxt.Textp; s != nil; s = s.Next {
if s.Nosplit == 0 {
Ctxt.Cursym = s
ch.sym = s
@ -1240,17 +1202,8 @@ func dostkcheck() {
}
func stkcheck(up *Chain, depth int) int {
var ch Chain
var ch1 Chain
var s *LSym
var limit int
var r *Reloc
var ri int
var endr int
var pcsp Pciter
limit = up.limit
s = up.sym
limit := up.limit
s := up.sym
// Don't duplicate work: only need to consider each
// function at top of safe zone once.
@ -1288,12 +1241,16 @@ func stkcheck(up *Chain, depth int) int {
return 0
}
var ch Chain
ch.up = up
// Walk through sp adjustments in function, consuming relocs.
ri = 0
ri := 0
endr = len(s.R)
endr := len(s.R)
var ch1 Chain
var pcsp Pciter
var r *Reloc
for pciterinit(Ctxt, &pcsp, &s.Pcln.Pcsp); pcsp.done == 0; pciternext(&pcsp) {
// pcsp.value is in effect for [pcsp.pc, pcsp.nextpc).
@ -1384,16 +1341,12 @@ func stkprint(ch *Chain, limit int) {
func Yconv(s *LSym) string {
var fp string
var fmt_ string
var i int
var str string
if s == nil {
fp += fmt.Sprintf("<nil>")
} else {
fmt_ = ""
fmt_ := ""
fmt_ += fmt.Sprintf("%s @0x%08x [%d]", s.Name, int64(s.Value), int64(s.Size))
for i = 0; int64(i) < s.Size; i++ {
for i := 0; int64(i) < s.Size; i++ {
if i%8 == 0 {
fmt_ += fmt.Sprintf("\n\t0x%04x ", i)
}
@ -1401,11 +1354,11 @@ func Yconv(s *LSym) string {
}
fmt_ += fmt.Sprintf("\n")
for i = 0; i < len(s.R); i++ {
for i := 0; i < len(s.R); i++ {
fmt_ += fmt.Sprintf("\t0x%04x[%x] %d %s[%x]\n", s.R[i].Off, s.R[i].Siz, s.R[i].Type, s.R[i].Sym.Name, int64(s.R[i].Add))
}
str = fmt_
str := fmt_
fp += str
}
@ -1439,9 +1392,7 @@ func usage() {
}
func setheadtype(s string) {
var h int
h = headtype(s)
h := headtype(s)
if h < 0 {
fmt.Fprintf(os.Stderr, "unknown header type -H %s\n", s)
Errorexit()
@ -1462,13 +1413,9 @@ func doversion() {
}
func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
var a *Auto
var s *LSym
var off int32
// These symbols won't show up in the first loop below because we
// skip STEXT symbols. Normal STEXT symbols are emitted by walking textp.
s = Linklookup(Ctxt, "runtime.text", 0)
s := Linklookup(Ctxt, "runtime.text", 0)
if s.Type == STEXT {
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
@ -1478,7 +1425,7 @@ func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
}
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Hide != 0 || (s.Name[0] == '.' && s.Version == 0 && s.Name != ".rathole") {
continue
}
@ -1518,7 +1465,9 @@ func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
}
}
for s = Ctxt.Textp; s != nil; s = s.Next {
var a *Auto
var off int32
for s := Ctxt.Textp; s != nil; s = s.Next {
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), s.Gotype)
// NOTE(ality): acid can't produce a stack trace without .frame symbols
@ -1568,9 +1517,7 @@ func Symaddr(s *LSym) int64 {
}
func xdefine(p string, t int, v int64) {
var s *LSym
s = Linklookup(Ctxt, p, 0)
s := Linklookup(Ctxt, p, 0)
s.Type = int16(t)
s.Value = v
s.Reachable = true
@ -1589,14 +1536,11 @@ func datoff(addr int64) int64 {
}
func Entryvalue() int64 {
var a string
var s *LSym
a = INITENTRY
a := INITENTRY
if a[0] >= '0' && a[0] <= '9' {
return atolwhex(a)
}
s = Linklookup(Ctxt, a, 0)
s := Linklookup(Ctxt, a, 0)
if s.Type == 0 {
return INITTEXT
}
@ -1607,11 +1551,10 @@ func Entryvalue() int64 {
}
func undefsym(s *LSym) {
var i int
var r *Reloc
Ctxt.Cursym = s
for i = 0; i < len(s.R); i++ {
for i := 0; i < len(s.R); i++ {
r = &s.R[i]
if r.Sym == nil { // happens for some external ARM relocs
continue
@ -1626,12 +1569,10 @@ func undefsym(s *LSym) {
}
func undef() {
var s *LSym
for s = Ctxt.Textp; s != nil; s = s.Next {
for s := Ctxt.Textp; s != nil; s = s.Next {
undefsym(s)
}
for s = datap; s != nil; s = s.Next {
for s := datap; s != nil; s = s.Next {
undefsym(s)
}
if nerrors > 0 {
@ -1640,15 +1581,13 @@ func undef() {
}
func callgraph() {
var s *LSym
var r *Reloc
var i int
if Debug['c'] == 0 {
return
}
for s = Ctxt.Textp; s != nil; s = s.Next {
var i int
var r *Reloc
for s := Ctxt.Textp; s != nil; s = s.Next {
for i = 0; i < len(s.R); i++ {
r = &s.R[i]
if r.Sym == nil {
@ -1678,11 +1617,6 @@ func Diag(format string, args ...interface{}) {
}
func checkgo() {
var s *LSym
var r *Reloc
var i int
var changed int
if Debug['C'] == 0 {
return
}
@ -1691,6 +1625,10 @@ func checkgo() {
// which would simplify this logic quite a bit.
// Mark every Go-called C function with cfunc=2, recursively.
var changed int
var i int
var r *Reloc
var s *LSym
for {
changed = 0
for s = Ctxt.Textp; s != nil; s = s.Next {
@ -1716,7 +1654,7 @@ func checkgo() {
// Complain about Go-called C functions that can split the stack
// (that can be preempted for garbage collection or trigger a stack copy).
for s = Ctxt.Textp; s != nil; s = s.Next {
for s := Ctxt.Textp; s != nil; s = s.Next {
if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
for i = 0; i < len(s.R); i++ {
r = &s.R[i]
@ -1736,13 +1674,11 @@ func checkgo() {
}
func Rnd(v int64, r int64) int64 {
var c int64
if r <= 0 {
return v
}
v += r - 1
c = v % r
c := v % r
if c < 0 {
c += r
}

View file

@ -156,14 +156,12 @@ func newMachoLoad(type_ uint32, ndata uint32) *MachoLoad {
}
func newMachoSeg(name string, msect int) *MachoSeg {
var s *MachoSeg
if nseg >= len(seg) {
Diag("too many segs")
Errorexit()
}
s = &seg[nseg]
s := &seg[nseg]
nseg++
s.name = name
s.msect = uint32(msect)
@ -172,14 +170,12 @@ func newMachoSeg(name string, msect int) *MachoSeg {
}
func newMachoSect(seg *MachoSeg, name string, segname string) *MachoSect {
var s *MachoSect
if seg.nsect >= seg.msect {
Diag("too many sects in segment %s", seg.name)
Errorexit()
}
s = &seg.sect[seg.nsect]
s := &seg.sect[seg.nsect]
seg.nsect++
s.name = name
s.segname = segname
@ -196,18 +192,10 @@ var ndylib int
var linkoff int64
func machowrite() int {
var o1 int64
var loadsize int
var i int
var j int
var s *MachoSeg
var t *MachoSect
var l *MachoLoad
o1 := Cpos()
o1 = Cpos()
loadsize = 4 * 4 * ndebug
for i = 0; i < len(load); i++ {
loadsize := 4 * 4 * ndebug
for i := 0; i < len(load); i++ {
loadsize += 4 * (len(load[i].data) + 2)
}
if macho64 {
@ -237,7 +225,10 @@ func machowrite() int {
Thearch.Lput(0) /* reserved */
}
for i = 0; i < nseg; i++ {
var j int
var s *MachoSeg
var t *MachoSect
for i := 0; i < nseg; i++ {
s = &seg[i]
if macho64 {
Thearch.Lput(25) /* segment 64 */
@ -296,7 +287,8 @@ func machowrite() int {
}
}
for i = 0; i < len(load); i++ {
var l *MachoLoad
for i := 0; i < len(load); i++ {
l = &load[i]
Thearch.Lput(l.type_)
Thearch.Lput(4 * (uint32(len(l.data)) + 2))
@ -309,14 +301,12 @@ func machowrite() int {
}
func domacho() {
var s *LSym
if Debug['d'] != 0 {
return
}
// empirically, string table must begin with " \x00".
s = Linklookup(Ctxt, ".machosymstr", 0)
s := Linklookup(Ctxt, ".machosymstr", 0)
s.Type = SMACHOSYMSTR
s.Reachable = true
@ -328,7 +318,7 @@ func domacho() {
s.Reachable = true
if Linkmode != LinkExternal {
s = Linklookup(Ctxt, ".plt", 0) // will be __symbol_stub
s := Linklookup(Ctxt, ".plt", 0) // will be __symbol_stub
s.Type = SMACHOPLT
s.Reachable = true
@ -364,12 +354,9 @@ func Machoadddynlib(lib string) {
}
func machoshbits(mseg *MachoSeg, sect *Section, segname string) {
var msect *MachoSect
var buf string
buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1)
buf = "__" + strings.Replace(sect.Name[1:], ".", "_", -1)
msect = newMachoSect(mseg, buf, segname)
msect := newMachoSect(mseg, buf, segname)
if sect.Rellen > 0 {
msect.reloc = uint32(sect.Reloff)
msect.nreloc = uint32(sect.Rellen / 8)
@ -413,20 +400,10 @@ func machoshbits(mseg *MachoSeg, sect *Section, segname string) {
}
func Asmbmacho() {
var v int64
var w int64
var va int64
var a int
var i int
var mh *MachoHdr
var ms *MachoSeg
var ml *MachoLoad
var sect *Section
/* apple MACH */
va = INITTEXT - int64(HEADR)
va := INITTEXT - int64(HEADR)
mh = getMachoHdr()
mh := getMachoHdr()
switch Thearch.Thechar {
default:
Diag("unknown mach architecture")
@ -446,7 +423,7 @@ func Asmbmacho() {
mh.subcpu = MACHO_SUBCPU_X86
}
ms = nil
ms := (*MachoSeg)(nil)
if Linkmode == LinkExternal {
/* segment for entire file */
ms = newMachoSeg("", 40)
@ -462,7 +439,7 @@ func Asmbmacho() {
}
/* text */
v = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND))
v := Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND))
if Linkmode != LinkExternal {
ms = newMachoSeg("__TEXT", 20)
@ -474,13 +451,13 @@ func Asmbmacho() {
ms.prot2 = 5
}
for sect = Segtext.Sect; sect != nil; sect = sect.Next {
for sect := Segtext.Sect; sect != nil; sect = sect.Next {
machoshbits(ms, sect, "__TEXT")
}
/* data */
if Linkmode != LinkExternal {
w = int64(Segdata.Length)
w := int64(Segdata.Length)
ms = newMachoSeg("__DATA", 20)
ms.vaddr = uint64(va) + uint64(v)
ms.vsize = uint64(w)
@ -490,7 +467,7 @@ func Asmbmacho() {
ms.prot2 = 3
}
for sect = Segdata.Sect; sect != nil; sect = sect.Next {
for sect := Segdata.Sect; sect != nil; sect = sect.Next {
machoshbits(ms, sect, "__DATA")
}
@ -502,20 +479,20 @@ func Asmbmacho() {
fallthrough
case '5':
ml = newMachoLoad(5, 17+2) /* unix thread */
ml := newMachoLoad(5, 17+2) /* unix thread */
ml.data[0] = 1 /* thread type */
ml.data[1] = 17 /* word count */
ml.data[2+15] = uint32(Entryvalue()) /* start pc */
case '6':
ml = newMachoLoad(5, 42+2) /* unix thread */
ml := newMachoLoad(5, 42+2) /* unix thread */
ml.data[0] = 4 /* thread type */
ml.data[1] = 42 /* word count */
ml.data[2+32] = uint32(Entryvalue()) /* start pc */
ml.data[2+32+1] = uint32(Entryvalue() >> 16 >> 16) // hide >>32 for 8l
case '8':
ml = newMachoLoad(5, 16+2) /* unix thread */
ml := newMachoLoad(5, 16+2) /* unix thread */
ml.data[0] = 1 /* thread type */
ml.data[1] = 16 /* word count */
ml.data[2+10] = uint32(Entryvalue()) /* start pc */
@ -523,20 +500,15 @@ func Asmbmacho() {
}
if Debug['d'] == 0 {
var s1 *LSym
var s2 *LSym
var s3 *LSym
var s4 *LSym
// must match domacholink below
s1 = Linklookup(Ctxt, ".machosymtab", 0)
s1 := Linklookup(Ctxt, ".machosymtab", 0)
s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
s3 = Linklookup(Ctxt, ".linkedit.got", 0)
s4 = Linklookup(Ctxt, ".machosymstr", 0)
s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
s3 := Linklookup(Ctxt, ".linkedit.got", 0)
s4 := Linklookup(Ctxt, ".machosymstr", 0)
if Linkmode != LinkExternal {
ms = newMachoSeg("__LINKEDIT", 0)
ms := newMachoSeg("__LINKEDIT", 0)
ms.vaddr = uint64(va) + uint64(v) + uint64(Rnd(int64(Segdata.Length), int64(INITRND)))
ms.vsize = uint64(s1.Size) + uint64(s2.Size) + uint64(s3.Size) + uint64(s4.Size)
ms.fileoffset = uint64(linkoff)
@ -545,7 +517,7 @@ func Asmbmacho() {
ms.prot2 = 3
}
ml = newMachoLoad(2, 4) /* LC_SYMTAB */
ml := newMachoLoad(2, 4) /* LC_SYMTAB */
ml.data[0] = uint32(linkoff) /* symoff */
ml.data[1] = uint32(nsortsym) /* nsyms */
ml.data[2] = uint32(linkoff + s1.Size + s2.Size + s3.Size) /* stroff */
@ -554,11 +526,11 @@ func Asmbmacho() {
machodysymtab()
if Linkmode != LinkExternal {
ml = newMachoLoad(14, 6) /* LC_LOAD_DYLINKER */
ml.data[0] = 12 /* offset to string */
ml := newMachoLoad(14, 6) /* LC_LOAD_DYLINKER */
ml.data[0] = 12 /* offset to string */
stringtouint32(ml.data[1:], "/usr/lib/dyld")
for i = 0; i < len(dylib); i++ {
for i := 0; i < len(dylib); i++ {
ml = newMachoLoad(12, 4+(uint32(len(dylib[i]))+1+7)/8*2) /* LC_LOAD_DYLIB */
ml.data[0] = 24 /* offset of string from beginning of load */
ml.data[1] = 0 /* time stamp */
@ -574,7 +546,7 @@ func Asmbmacho() {
dwarfaddmachoheaders()
}
a = machowrite()
a := machowrite()
if int32(a) > HEADR {
Diag("HEADR too small: %d > %d", a, HEADR)
}
@ -624,16 +596,11 @@ func (x machoscmp) Swap(i, j int) {
}
func (x machoscmp) Less(i, j int) bool {
var s1 *LSym
var s2 *LSym
var k1 int
var k2 int
s1 := x[i]
s2 := x[j]
s1 = x[i]
s2 = x[j]
k1 = symkind(s1)
k2 = symkind(s2)
k1 := symkind(s1)
k2 := symkind(s2)
if k1 != k2 {
return k1-k2 < 0
}
@ -642,10 +609,8 @@ func (x machoscmp) Less(i, j int) bool {
}
func machogenasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
var s *LSym
genasmsym(put)
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type == SDYNIMPORT || s.Type == SHOSTOBJ {
if s.Reachable {
put(s, "", 'D', 0, 0, 0, nil)
@ -655,12 +620,10 @@ func machogenasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
}
func machosymorder() {
var i int
// On Mac OS X Mountain Lion, we must sort exported symbols
// So we sort them here and pre-allocate dynid for them
// See http://golang.org/issue/4029
for i = 0; i < len(dynexp); i++ {
for i := 0; i < len(dynexp); i++ {
dynexp[i].Reachable = true
}
machogenasmsym(addsym)
@ -668,23 +631,20 @@ func machosymorder() {
nsortsym = 0
machogenasmsym(addsym)
sort.Sort(machoscmp(sortsym[:nsortsym]))
for i = 0; i < nsortsym; i++ {
for i := 0; i < nsortsym; i++ {
sortsym[i].Dynid = int32(i)
}
}
func machosymtab() {
var i int
var symtab *LSym
var symstr *LSym
var s *LSym
var o *LSym
var p string
symtab = Linklookup(Ctxt, ".machosymtab", 0)
symstr = Linklookup(Ctxt, ".machosymstr", 0)
symtab := Linklookup(Ctxt, ".machosymtab", 0)
symstr := Linklookup(Ctxt, ".machosymstr", 0)
for i = 0; i < nsortsym; i++ {
for i := 0; i < nsortsym; i++ {
s = sortsym[i]
Adduint32(Ctxt, symtab, uint32(symstr.Size))
@ -737,15 +697,9 @@ func machosymtab() {
}
func machodysymtab() {
var n int
var ml *MachoLoad
var s1 *LSym
var s2 *LSym
var s3 *LSym
ml := newMachoLoad(11, 18) /* LC_DYSYMTAB */
ml = newMachoLoad(11, 18) /* LC_DYSYMTAB */
n = 0
n := 0
ml.data[0] = uint32(n) /* ilocalsym */
ml.data[1] = uint32(nkind[SymKindLocal]) /* nlocalsym */
n += nkind[SymKindLocal]
@ -765,10 +719,10 @@ func machodysymtab() {
ml.data[11] = 0 /* nextrefsyms */
// must match domacholink below
s1 = Linklookup(Ctxt, ".machosymtab", 0)
s1 := Linklookup(Ctxt, ".machosymtab", 0)
s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
s3 = Linklookup(Ctxt, ".linkedit.got", 0)
s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
s3 := Linklookup(Ctxt, ".linkedit.got", 0)
ml.data[12] = uint32(linkoff + s1.Size) /* indirectsymoff */
ml.data[13] = uint32((s2.Size + s3.Size) / 4) /* nindirectsyms */
@ -779,20 +733,14 @@ func machodysymtab() {
}
func Domacholink() int64 {
var size int
var s1 *LSym
var s2 *LSym
var s3 *LSym
var s4 *LSym
machosymtab()
// write data that will be linkedit section
s1 = Linklookup(Ctxt, ".machosymtab", 0)
s1 := Linklookup(Ctxt, ".machosymtab", 0)
s2 = Linklookup(Ctxt, ".linkedit.plt", 0)
s3 = Linklookup(Ctxt, ".linkedit.got", 0)
s4 = Linklookup(Ctxt, ".machosymstr", 0)
s2 := Linklookup(Ctxt, ".linkedit.plt", 0)
s3 := Linklookup(Ctxt, ".linkedit.got", 0)
s4 := Linklookup(Ctxt, ".machosymstr", 0)
// Force the linkedit section to end on a 16-byte
// boundary. This allows pure (non-cgo) Go binaries
@ -815,7 +763,7 @@ func Domacholink() int64 {
Adduint8(Ctxt, s4, 0)
}
size = int(s1.Size + s2.Size + s3.Size + s4.Size)
size := int(s1.Size + s2.Size + s3.Size + s4.Size)
if size > 0 {
linkoff = Rnd(int64(uint64(HEADR)+Segtext.Length), int64(INITRND)) + Rnd(int64(Segdata.Filelen), int64(INITRND)) + Rnd(int64(Segdwarf.Filelen), int64(INITRND))
@ -831,17 +779,13 @@ func Domacholink() int64 {
}
func machorelocsect(sect *Section, first *LSym) {
var sym *LSym
var eaddr int32
var ri int
var r *Reloc
// If main section has no bits, nothing to relocate.
if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
return
}
sect.Reloff = uint64(Cpos())
var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@ -851,7 +795,9 @@ func machorelocsect(sect *Section, first *LSym) {
}
}
eaddr = int32(sect.Vaddr + sect.Length)
eaddr := int32(sect.Vaddr + sect.Length)
var r *Reloc
var ri int
for ; sym != nil; sym = sym.Next {
if !sym.Reachable {
continue
@ -876,17 +822,15 @@ func machorelocsect(sect *Section, first *LSym) {
}
func Machoemitreloc() {
var sect *Section
for Cpos()&7 != 0 {
Cput(0)
}
machorelocsect(Segtext.Sect, Ctxt.Textp)
for sect = Segtext.Sect.Next; sect != nil; sect = sect.Next {
for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
machorelocsect(sect, datap)
}
for sect = Segdata.Sect; sect != nil; sect = sect.Next {
for sect := Segdata.Sect; sect != nil; sect = sect.Next {
machorelocsect(sect, datap)
}
}

View file

@ -17,23 +17,19 @@ var startmagic string = "\x00\x00go13ld"
var endmagic string = "\xff\xffgo13ld"
func ldobjfile(ctxt *Link, f *Biobuf, pkg string, length int64, pn string) {
var c int
var buf [8]uint8
var start int64
var lib string
start = Boffset(f)
start := Boffset(f)
ctxt.Version++
buf = [8]uint8{}
buf := [8]uint8{}
Bread(f, buf[:])
if string(buf[:]) != startmagic {
log.Fatalf("%s: invalid file start %x %x %x %x %x %x %x %x", pn, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7])
}
c = Bgetc(f)
c := Bgetc(f)
if c != 1 {
log.Fatalf("%s: invalid file version number %d", pn, c)
}
var lib string
for {
lib = rdstring(f)
if lib == "" {
@ -65,45 +61,28 @@ func ldobjfile(ctxt *Link, f *Biobuf, pkg string, length int64, pn string) {
var readsym_ndup int
func readsym(ctxt *Link, f *Biobuf, pkg string, pn string) {
var i int
var j int
var c int
var t int
var v int
var n int
var nreloc int
var size int
var dupok int
var name string
var data []byte
var r *Reloc
var s *LSym
var dup *LSym
var typ *LSym
var pc *Pcln
var a *Auto
if Bgetc(f) != 0xfe {
log.Fatalf("readsym out of sync")
}
t = int(rdint(f))
name = expandpkg(rdstring(f), pkg)
v = int(rdint(f))
t := int(rdint(f))
name := expandpkg(rdstring(f), pkg)
v := int(rdint(f))
if v != 0 && v != 1 {
log.Fatalf("invalid symbol version %d", v)
}
dupok = int(rdint(f))
dupok := int(rdint(f))
dupok &= 1
size = int(rdint(f))
typ = rdsym(ctxt, f, pkg)
size := int(rdint(f))
typ := rdsym(ctxt, f, pkg)
var data []byte
rddata(f, &data)
nreloc = int(rdint(f))
nreloc := int(rdint(f))
if v != 0 {
v = ctxt.Version
}
s = Linklookup(ctxt, name, v)
dup = nil
s := Linklookup(ctxt, name, v)
dup := (*LSym)(nil)
if s.Type != 0 && s.Type != SXREF {
if (t == SDATA || t == SBSS || t == SNOPTRBSS) && len(data) == 0 && nreloc == 0 {
if s.Size < int64(size) {
@ -155,7 +134,8 @@ overwrite:
if nreloc > 0 {
s.R = make([]Reloc, nreloc)
s.R = s.R[:nreloc]
for i = 0; i < nreloc; i++ {
var r *Reloc
for i := 0; i < nreloc; i++ {
r = &s.R[i]
r.Off = int32(rdint(f))
r.Siz = uint8(rdint(f))
@ -179,11 +159,12 @@ overwrite:
s.Args = int32(rdint(f))
s.Locals = int32(rdint(f))
s.Nosplit = uint8(rdint(f))
v = int(rdint(f))
v := int(rdint(f))
s.Leaf = uint8(v & 1)
s.Cfunc = uint8(v & 2)
n = int(rdint(f))
for i = 0; i < n; i++ {
n := int(rdint(f))
var a *Auto
for i := 0; i < n; i++ {
a = new(Auto)
a.Asym = rdsym(ctxt, f, pkg)
a.Aoffset = int32(rdint(f))
@ -194,30 +175,30 @@ overwrite:
}
s.Pcln = new(Pcln)
pc = s.Pcln
pc := s.Pcln
rddata(f, &pc.Pcsp.P)
rddata(f, &pc.Pcfile.P)
rddata(f, &pc.Pcline.P)
n = int(rdint(f))
pc.Pcdata = make([]Pcdata, n)
pc.Npcdata = n
for i = 0; i < n; i++ {
for i := 0; i < n; i++ {
rddata(f, &pc.Pcdata[i].P)
}
n = int(rdint(f))
pc.Funcdata = make([]*LSym, n)
pc.Funcdataoff = make([]int64, n)
pc.Nfuncdata = n
for i = 0; i < n; i++ {
for i := 0; i < n; i++ {
pc.Funcdata[i] = rdsym(ctxt, f, pkg)
}
for i = 0; i < n; i++ {
for i := 0; i < n; i++ {
pc.Funcdataoff[i] = rdint(f)
}
n = int(rdint(f))
pc.File = make([]*LSym, n)
pc.Nfile = n
for i = 0; i < n; i++ {
for i := 0; i < n; i++ {
pc.File[i] = rdsym(ctxt, f, pkg)
}
@ -257,7 +238,9 @@ overwrite:
fmt.Fprintf(ctxt.Bso, " args=%#x locals=%#x", uint64(s.Args), uint64(s.Locals))
}
fmt.Fprintf(ctxt.Bso, "\n")
for i = 0; i < len(s.P); {
var c int
var j int
for i := 0; i < len(s.P); {
fmt.Fprintf(ctxt.Bso, "\t%#04x", uint(i))
for j = i; j < i+16 && j < len(s.P); j++ {
fmt.Fprintf(ctxt.Bso, " %02x", s.P[j])
@ -279,7 +262,8 @@ overwrite:
i += 16
}
for i = 0; i < len(s.R); i++ {
var r *Reloc
for i := 0; i < len(s.R); i++ {
r = &s.R[i]
fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%d\n", int(r.Off), r.Siz, r.Type, r.Sym.Name, int64(r.Add))
}
@ -288,11 +272,9 @@ overwrite:
func rdint(f *Biobuf) int64 {
var c int
var uv uint64
var shift int
uv = 0
for shift = 0; ; shift += 7 {
uv := uint64(0)
for shift := 0; ; shift += 7 {
if shift >= 64 {
log.Fatalf("corrupt input")
}
@ -322,12 +304,7 @@ func rddata(f *Biobuf, pp *[]byte) {
var symbuf []byte
func rdsym(ctxt *Link, f *Biobuf, pkg string) *LSym {
var n int
var v int
var p string
var s *LSym
n = int(rdint(f))
n := int(rdint(f))
if n == 0 {
rdint(f)
return nil
@ -337,25 +314,23 @@ func rdsym(ctxt *Link, f *Biobuf, pkg string) *LSym {
symbuf = make([]byte, n)
}
Bread(f, symbuf[:n])
p = string(symbuf[:n])
v = int(rdint(f))
p := string(symbuf[:n])
v := int(rdint(f))
if v != 0 {
v = ctxt.Version
}
s = Linklookup(ctxt, expandpkg(p, pkg), v)
s := Linklookup(ctxt, expandpkg(p, pkg), v)
if v == 0 && s.Name[0] == '$' && s.Type == 0 {
if strings.HasPrefix(s.Name, "$f32.") {
var i32 int32
x, _ := strconv.ParseUint(s.Name[5:], 16, 32)
i32 = int32(x)
i32 := int32(x)
s.Type = SRODATA
Adduint32(ctxt, s, uint32(i32))
s.Reachable = false
} else if strings.HasPrefix(s.Name, "$f64.") || strings.HasPrefix(s.Name, "$i64.") {
var i64 int64
x, _ := strconv.ParseUint(s.Name[5:], 16, 64)
i64 = int64(x)
i64 := int64(x)
s.Type = SRODATA
Adduint64(ctxt, s, uint64(i64))
s.Reachable = false

View file

@ -40,13 +40,9 @@ import (
// iteration over encoded pcdata tables.
func getvarint(pp *[]byte) uint32 {
var p []byte
var shift int
var v uint32
v = 0
p = *pp
for shift = 0; ; shift += 7 {
v := uint32(0)
p := *pp
for shift := 0; ; shift += 7 {
v |= uint32(p[0]&0x7F) << uint(shift)
tmp4 := p
p = p[1:]
@ -60,9 +56,6 @@ func getvarint(pp *[]byte) uint32 {
}
func pciternext(it *Pciter) {
var v uint32
var dv int32
it.pc = it.nextpc
if it.done != 0 {
return
@ -73,7 +66,7 @@ func pciternext(it *Pciter) {
}
// value delta
v = getvarint(&it.p)
v := getvarint(&it.p)
if v == 0 && it.start == 0 {
it.done = 1
@ -81,7 +74,7 @@ func pciternext(it *Pciter) {
}
it.start = 0
dv = int32(v>>1) ^ (int32(v<<31) >> 31)
dv := int32(v>>1) ^ (int32(v<<31) >> 31)
it.value += dv
// pc delta
@ -107,12 +100,8 @@ func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
// license that can be found in the LICENSE file.
func addvarint(d *Pcdata, val uint32) {
var n int32
var v uint32
var p []byte
n = 0
for v = val; v >= 0x80; v >>= 7 {
n := int32(0)
for v := val; v >= 0x80; v >>= 7 {
n++
}
n++
@ -123,7 +112,8 @@ func addvarint(d *Pcdata, val uint32) {
}
d.P = d.P[:old+int(n)]
p = d.P[old:]
p := d.P[old:]
var v uint32
for v = val; v >= 0x80; v >>= 7 {
p[0] = byte(v | 0x80)
p = p[1:]
@ -132,9 +122,7 @@ func addvarint(d *Pcdata, val uint32) {
}
func addpctab(ftab *LSym, off int32, d *Pcdata) int32 {
var start int32
start = int32(len(ftab.P))
start := int32(len(ftab.P))
Symgrow(Ctxt, ftab, int64(start)+int64(len(d.P)))
copy(ftab.P[start:], d.P)
@ -142,29 +130,18 @@ func addpctab(ftab *LSym, off int32, d *Pcdata) int32 {
}
func ftabaddstring(ftab *LSym, s string) int32 {
var n int32
var start int32
n = int32(len(s)) + 1
start = int32(len(ftab.P))
n := int32(len(s)) + 1
start := int32(len(ftab.P))
Symgrow(Ctxt, ftab, int64(start)+int64(n)+1)
copy(ftab.P[start:], s)
return start
}
func renumberfiles(ctxt *Link, files []*LSym, d *Pcdata) {
var i int
var f *LSym
var out Pcdata
var it Pciter
var v uint32
var oldval int32
var newval int32
var val int32
var dv int32
// Give files numbers.
for i = 0; i < len(files); i++ {
for i := 0; i < len(files); i++ {
f = files[i]
if f.Type != SFILEPATH {
ctxt.Nhistfile++
@ -175,9 +152,14 @@ func renumberfiles(ctxt *Link, files []*LSym, d *Pcdata) {
}
}
newval = -1
out = Pcdata{}
newval := int32(-1)
out := Pcdata{}
var dv int32
var it Pciter
var oldval int32
var v uint32
var val int32
for pciterinit(ctxt, &it, d); it.done == 0; pciternext(&it) {
// value delta
oldval = it.value
@ -221,22 +203,8 @@ func container(s *LSym) int {
var pclntab_zpcln Pcln
func pclntab() {
var i int32
var nfunc int32
var start int32
var funcstart int32
var ftab *LSym
var s *LSym
var last *LSym
var off int32
var end int32
var frameptrsize int32
var funcdata_bytes int64
var pcln *Pcln
var it Pciter
funcdata_bytes = 0
ftab = Linklookup(Ctxt, "runtime.pclntab", 0)
funcdata_bytes := int64(0)
ftab := Linklookup(Ctxt, "runtime.pclntab", 0)
ftab.Type = SPCLNTAB
ftab.Reachable = true
@ -246,7 +214,7 @@ func pclntab() {
// function table, alternating PC and offset to func struct [each entry thearch.ptrsize bytes]
// end PC [thearch.ptrsize bytes]
// offset to file table [4 bytes]
nfunc = 0
nfunc := int32(0)
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
if container(Ctxt.Cursym) == 0 {
@ -261,7 +229,14 @@ func pclntab() {
setuintxx(Ctxt, ftab, 8, uint64(nfunc), int64(Thearch.Ptrsize))
nfunc = 0
last = nil
last := (*LSym)(nil)
var end int32
var frameptrsize int32
var funcstart int32
var i int32
var it Pciter
var off int32
var pcln *Pcln
for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
last = Ctxt.Cursym
if container(Ctxt.Cursym) != 0 {
@ -366,14 +341,14 @@ func pclntab() {
setaddrplus(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize), last, last.Size)
// Start file table.
start = int32(len(ftab.P))
start := int32(len(ftab.P))
start += int32(-len(ftab.P)) & (int32(Thearch.Ptrsize) - 1)
setuint32(Ctxt, ftab, 8+int64(Thearch.Ptrsize)+int64(nfunc)*2*int64(Thearch.Ptrsize)+int64(Thearch.Ptrsize), uint32(start))
Symgrow(Ctxt, ftab, int64(start)+(int64(Ctxt.Nhistfile)+1)*4)
setuint32(Ctxt, ftab, int64(start), uint32(Ctxt.Nhistfile))
for s = Ctxt.Filesyms; s != nil; s = s.Next {
for s := Ctxt.Filesyms; s != nil; s = s.Next {
setuint32(Ctxt, ftab, int64(start)+s.Value*4, uint32(ftabaddstring(ftab, s.Name)))
}
@ -394,43 +369,32 @@ const (
// findfunctab generates a lookup table to quickly find the containing
// function for a pc. See src/runtime/symtab.go:findfunc for details.
func findfunctab() {
var t *LSym
var s *LSym
var e *LSym
var idx int32
var i int32
var j int32
var nbuckets int32
var n int32
var base int32
var min int64
var max int64
var p int64
var q int64
var indexes []int32
t = Linklookup(Ctxt, "runtime.findfunctab", 0)
t := Linklookup(Ctxt, "runtime.findfunctab", 0)
t.Type = SRODATA
t.Reachable = true
// find min and max address
min = Ctxt.Textp.Value
min := Ctxt.Textp.Value
max = 0
for s = Ctxt.Textp; s != nil; s = s.Next {
max := int64(0)
for s := Ctxt.Textp; s != nil; s = s.Next {
max = s.Value + s.Size
}
// for each subbucket, compute the minimum of all symbol indexes
// that map to that subbucket.
n = int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE)
n := int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE)
indexes = make([]int32, n)
for i = 0; i < n; i++ {
indexes := make([]int32, n)
for i := int32(0); i < n; i++ {
indexes[i] = NOIDX
}
idx = 0
for s = Ctxt.Textp; s != nil; s = s.Next {
idx := int32(0)
var e *LSym
var i int32
var p int64
var q int64
for s := Ctxt.Textp; s != nil; s = s.Next {
if container(s) != 0 {
continue
}
@ -461,12 +425,14 @@ func findfunctab() {
}
// allocate table
nbuckets = int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE)
nbuckets := int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE)
Symgrow(Ctxt, t, 4*int64(nbuckets)+int64(n))
// fill in table
for i = 0; i < nbuckets; i++ {
var base int32
var j int32
for i := int32(0); i < nbuckets; i++ {
base = indexes[i*SUBBUCKETS]
if base == NOIDX {
Diag("hole in findfunctab")

View file

@ -371,14 +371,12 @@ var coffsym []COFFSym
var ncoffsym int
func addpesection(name string, sectsize int, filesize int) *IMAGE_SECTION_HEADER {
var h *IMAGE_SECTION_HEADER
if pensect == 16 {
Diag("too many sections")
Errorexit()
}
h = &sh[pensect]
h := &sh[pensect]
pensect++
copy(h.Name[:], name)
h.VirtualSize = uint32(sectsize)
@ -466,14 +464,11 @@ func strput(s string) {
}
func initdynimport() *Dll {
var m *Imp
var d *Dll
var s *LSym
var dynamic *LSym
dr = nil
m = nil
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
m := (*Imp)(nil)
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Type != SDYNIMPORT {
continue
}
@ -497,10 +492,10 @@ func initdynimport() *Dll {
d.ms = m
}
dynamic = Linklookup(Ctxt, ".windynamic", 0)
dynamic := Linklookup(Ctxt, ".windynamic", 0)
dynamic.Reachable = true
dynamic.Type = SWINDOWS
for d = dr; d != nil; d = d.next {
for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
m.s.Type = SWINDOWS | SSUB
m.s.Sub = dynamic.Sub
@ -516,35 +511,26 @@ func initdynimport() *Dll {
}
func addimports(datsect *IMAGE_SECTION_HEADER) {
var isect *IMAGE_SECTION_HEADER
var n uint64
var oftbase uint64
var ftbase uint64
var startoff int64
var endoff int64
var m *Imp
var d *Dll
var dynamic *LSym
startoff = Cpos()
dynamic = Linklookup(Ctxt, ".windynamic", 0)
startoff := Cpos()
dynamic := Linklookup(Ctxt, ".windynamic", 0)
// skip import descriptor table (will write it later)
n = 0
n := uint64(0)
for d = dr; d != nil; d = d.next {
for d := dr; d != nil; d = d.next {
n++
}
Cseek(startoff + int64(binary.Size(&IMAGE_IMPORT_DESCRIPTOR{}))*int64(n+1))
// write dll names
for d = dr; d != nil; d = d.next {
for d := dr; d != nil; d = d.next {
d.nameoff = uint64(Cpos()) - uint64(startoff)
strput(d.name)
}
// write function names
for d = dr; d != nil; d = d.next {
var m *Imp
for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
m.off = uint64(nextsectoff) + uint64(Cpos()) - uint64(startoff)
Wputl(0) // hint
@ -553,10 +539,10 @@ func addimports(datsect *IMAGE_SECTION_HEADER) {
}
// write OriginalFirstThunks
oftbase = uint64(Cpos()) - uint64(startoff)
oftbase := uint64(Cpos()) - uint64(startoff)
n = uint64(Cpos())
for d = dr; d != nil; d = d.next {
for d := dr; d != nil; d = d.next {
d.thunkoff = uint64(Cpos()) - n
for m = d.ms; m != nil; m = m.next {
if pe64 != 0 {
@ -576,17 +562,17 @@ func addimports(datsect *IMAGE_SECTION_HEADER) {
// add pe section and pad it at the end
n = uint64(Cpos()) - uint64(startoff)
isect = addpesection(".idata", int(n), int(n))
isect := addpesection(".idata", int(n), int(n))
isect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE
chksectoff(isect, startoff)
strnput("", int(uint64(isect.SizeOfRawData)-n))
endoff = Cpos()
endoff := Cpos()
// write FirstThunks (allocated in .data section)
ftbase = uint64(dynamic.Value) - uint64(datsect.VirtualAddress) - PEBASE
ftbase := uint64(dynamic.Value) - uint64(datsect.VirtualAddress) - PEBASE
Cseek(int64(uint64(datsect.PointerToRawData) + ftbase))
for d = dr; d != nil; d = d.next {
for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
if pe64 != 0 {
Vputl(m.off)
@ -605,7 +591,7 @@ func addimports(datsect *IMAGE_SECTION_HEADER) {
// finally write import descriptor table
Cseek(startoff)
for d = dr; d != nil; d = d.next {
for d := dr; d != nil; d = d.next {
Lputl(uint32(uint64(isect.VirtualAddress) + oftbase + d.thunkoff))
Lputl(0)
Lputl(0)
@ -640,19 +626,14 @@ func (x pescmp) Swap(i, j int) {
}
func (x pescmp) Less(i, j int) bool {
var s1 *LSym
var s2 *LSym
s1 = x[i]
s2 = x[j]
s1 := x[i]
s2 := x[j]
return stringsCompare(s1.Extname, s2.Extname) < 0
}
func initdynexport() {
var s *LSym
nexport = 0
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Cgoexport&CgoExportDynamic == 0 {
continue
}
@ -669,18 +650,10 @@ func initdynexport() {
}
func addexports() {
var sect *IMAGE_SECTION_HEADER
var e IMAGE_EXPORT_DIRECTORY
var size int
var i int
var va int
var va_name int
var va_addr int
var va_na int
var v int
size = binary.Size(&e) + 10*nexport + len(outfile) + 1
for i = 0; i < nexport; i++ {
size := binary.Size(&e) + 10*nexport + len(outfile) + 1
for i := 0; i < nexport; i++ {
size += len(dexport[i].Extname) + 1
}
@ -688,16 +661,16 @@ func addexports() {
return
}
sect = addpesection(".edata", size, size)
sect := addpesection(".edata", size, size)
sect.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ
chksectoff(sect, Cpos())
va = int(sect.VirtualAddress)
va := int(sect.VirtualAddress)
dd[IMAGE_DIRECTORY_ENTRY_EXPORT].VirtualAddress = uint32(va)
dd[IMAGE_DIRECTORY_ENTRY_EXPORT].Size = sect.VirtualSize
va_name = va + binary.Size(&e) + nexport*4
va_addr = va + binary.Size(&e)
va_na = va + binary.Size(&e) + nexport*8
va_name := va + binary.Size(&e) + nexport*4
va_addr := va + binary.Size(&e)
va_na := va + binary.Size(&e) + nexport*8
e.Characteristics = 0
e.MajorVersion = 0
@ -714,37 +687,35 @@ func addexports() {
binary.Write(&coutbuf, binary.LittleEndian, &e)
// put EXPORT Address Table
for i = 0; i < nexport; i++ {
for i := 0; i < nexport; i++ {
Lputl(uint32(dexport[i].Value - PEBASE))
}
// put EXPORT Name Pointer Table
v = int(e.Name + uint32(len(outfile)) + 1)
v := int(e.Name + uint32(len(outfile)) + 1)
for i = 0; i < nexport; i++ {
for i := 0; i < nexport; i++ {
Lputl(uint32(v))
v += len(dexport[i].Extname) + 1
}
// put EXPORT Ordinal Table
for i = 0; i < nexport; i++ {
for i := 0; i < nexport; i++ {
Wputl(uint16(i))
}
// put Names
strnput(outfile, len(outfile)+1)
for i = 0; i < nexport; i++ {
for i := 0; i < nexport; i++ {
strnput(dexport[i].Extname, len(dexport[i].Extname)+1)
}
strnput("", int(sect.SizeOfRawData-uint32(size)))
}
func dope() {
var rel *LSym
/* relocation table */
rel = Linklookup(Ctxt, ".rel", 0)
rel := Linklookup(Ctxt, ".rel", 0)
rel.Reachable = true
rel.Type = SELFROSECT
@ -768,25 +739,19 @@ func strtbladd(name string) int {
* <http://www.microsoft.com/whdc/system/platform/firmware/PECOFFdwn.mspx>
*/
func newPEDWARFSection(name string, size int64) *IMAGE_SECTION_HEADER {
var h *IMAGE_SECTION_HEADER
var s string
var off int
if size == 0 {
return nil
}
off = strtbladd(name)
s = fmt.Sprintf("/%d", off)
h = addpesection(s, int(size), int(size))
off := strtbladd(name)
s := fmt.Sprintf("/%d", off)
h := addpesection(s, int(size), int(size))
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE
return h
}
func addpesym(s *LSym, name string, type_ int, addr int64, size int64, ver int, gotype *LSym) {
var cs *COFFSym
if s == nil {
return
}
@ -806,7 +771,7 @@ func addpesym(s *LSym, name string, type_ int, addr int64, size int64, ver int,
}
if coffsym != nil {
cs = &coffsym[ncoffsym]
cs := &coffsym[ncoffsym]
cs.sym = s
if len(s.Name) > 8 {
cs.strtbloff = strtbladd(s.Name)
@ -828,11 +793,6 @@ func addpesym(s *LSym, name string, type_ int, addr int64, size int64, ver int,
}
func addpesymtable() {
var h *IMAGE_SECTION_HEADER
var i int
var size int
var s *COFFSym
if Debug['s'] == 0 {
genasmsym(addpesym)
coffsym = make([]COFFSym, ncoffsym)
@ -840,15 +800,16 @@ func addpesymtable() {
genasmsym(addpesym)
}
size = len(strtbl) + 4 + 18*ncoffsym
h = addpesection(".symtab", size, size)
size := len(strtbl) + 4 + 18*ncoffsym
h := addpesection(".symtab", size, size)
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_DISCARDABLE
chksectoff(h, Cpos())
fh.PointerToSymbolTable = uint32(Cpos())
fh.NumberOfSymbols = uint32(ncoffsym)
// put COFF symbol table
for i = 0; i < ncoffsym; i++ {
var s *COFFSym
for i := 0; i < ncoffsym; i++ {
s = &coffsym[i]
if s.strtbloff == 0 {
strnput(s.sym.Name, 8)
@ -867,7 +828,7 @@ func addpesymtable() {
// put COFF string table
Lputl(uint32(len(strtbl)) + 4)
for i = 0; i < len(strtbl); i++ {
for i := 0; i < len(strtbl); i++ {
Cput(uint8(strtbl[i]))
}
strnput("", int(h.SizeOfRawData-uint32(size)))
@ -882,22 +843,19 @@ func setpersrc(sym *LSym) {
}
func addpersrc() {
var h *IMAGE_SECTION_HEADER
var p []byte
var val uint32
var r *Reloc
var ri int
if rsrcsym == nil {
return
}
h = addpesection(".rsrc", int(rsrcsym.Size), int(rsrcsym.Size))
h := addpesection(".rsrc", int(rsrcsym.Size), int(rsrcsym.Size))
h.Characteristics = IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE | IMAGE_SCN_CNT_INITIALIZED_DATA
chksectoff(h, Cpos())
// relocation
for ri = 0; ri < len(rsrcsym.R); ri++ {
var p []byte
var r *Reloc
var val uint32
for ri := 0; ri < len(rsrcsym.R); ri++ {
r = &rsrcsym.R[ri]
p = rsrcsym.P[r.Off:]
val = uint32(int64(h.VirtualAddress) + r.Add)
@ -920,9 +878,6 @@ func addpersrc() {
}
func Asmbpe() {
var t *IMAGE_SECTION_HEADER
var d *IMAGE_SECTION_HEADER
switch Thearch.Thechar {
default:
Diag("unknown PE architecture")
@ -936,12 +891,12 @@ func Asmbpe() {
fh.Machine = IMAGE_FILE_MACHINE_I386
}
t = addpesection(".text", int(Segtext.Length), int(Segtext.Length))
t := addpesection(".text", int(Segtext.Length), int(Segtext.Length))
t.Characteristics = IMAGE_SCN_CNT_CODE | IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_EXECUTE | IMAGE_SCN_MEM_READ
chksectseg(t, &Segtext)
textsect = pensect
d = addpesection(".data", int(Segdata.Length), int(Segdata.Filelen))
d := addpesection(".data", int(Segdata.Length), int(Segdata.Filelen))
d.Characteristics = IMAGE_SCN_CNT_INITIALIZED_DATA | IMAGE_SCN_MEM_READ | IMAGE_SCN_MEM_WRITE
chksectseg(d, &Segdata)
datasect = pensect

View file

@ -63,21 +63,18 @@ var headers = []struct {
}
func linknew(arch *LinkArch) *Link {
var ctxt *Link
var p string
var buf string
ctxt = new(Link)
ctxt := new(Link)
ctxt.Hash = make(map[symVer]*LSym)
ctxt.Arch = arch
ctxt.Version = HistVersion
ctxt.Goroot = obj.Getgoroot()
p = obj.Getgoarch()
p := obj.Getgoarch()
if p != arch.Name {
log.Fatalf("invalid goarch %s (want %s)", p, arch.Name)
}
var buf string
buf, _ = os.Getwd()
if buf == "" {
buf = "/???"
@ -149,7 +146,7 @@ func linknew(arch *LinkArch) *Link {
// On arm, record goarm.
if ctxt.Arch.Thechar == '5' {
p = obj.Getgoarm()
p := obj.Getgoarm()
if p != "" {
ctxt.Goarm = int32(obj.Atoi(p))
} else {
@ -161,9 +158,7 @@ func linknew(arch *LinkArch) *Link {
}
func linknewsym(ctxt *Link, symb string, v int) *LSym {
var s *LSym
s = new(LSym)
s := new(LSym)
*s = LSym{}
s.Dynid = -1
@ -215,9 +210,7 @@ func Linkrlookup(ctxt *Link, name string, v int) *LSym {
var headstr_buf string
func Headstr(v int) string {
var i int
for i = 0; i < len(headers); i++ {
for i := 0; i < len(headers); i++ {
if v == headers[i].val {
return headers[i].name
}
@ -227,9 +220,7 @@ func Headstr(v int) string {
}
func headtype(name string) int {
var i int
for i = 0; i < len(headers); i++ {
for i := 0; i < len(headers); i++ {
if name == headers[i].name {
return headers[i].val
}

View file

@ -37,9 +37,6 @@ import "strings"
var maxelfstr int
func putelfstr(s string) int {
var off int
var n int
if len(Elfstrdat) == 0 && s != "" {
// first entry must be empty string
putelfstr("")
@ -48,12 +45,12 @@ func putelfstr(s string) int {
// Rewrite · to . for ASCII-only tools like DTrace (sigh)
s = strings.Replace(s, "·", ".", -1)
n = len(s) + 1
n := len(s) + 1
for len(Elfstrdat)+n > cap(Elfstrdat) {
Elfstrdat = append(Elfstrdat[:cap(Elfstrdat)], 0)[:len(Elfstrdat)]
}
off = len(Elfstrdat)
off := len(Elfstrdat)
Elfstrdat = Elfstrdat[:off+n]
copy(Elfstrdat[off:], s)
@ -88,11 +85,7 @@ var numelfsym int = 1 // 0 is reserved
var elfbind int
func putelfsym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *LSym) {
var bind int
var type_ int
var off int
var other int
var xo *LSym
switch t {
default:
@ -108,7 +101,7 @@ func putelfsym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *L
type_ = STT_OBJECT
}
xo = x
xo := x
for xo.Outer != nil {
xo = xo.Outer
}
@ -126,7 +119,7 @@ func putelfsym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *L
// One pass for each binding: STB_LOCAL, STB_GLOBAL,
// maybe one day STB_WEAK.
bind = STB_GLOBAL
bind := STB_GLOBAL
if ver != 0 || (x.Type&SHIDDEN != 0) {
bind = STB_LOCAL
@ -144,11 +137,11 @@ func putelfsym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *L
return
}
off = putelfstr(s)
off := putelfstr(s)
if Linkmode == LinkExternal {
addr -= int64((xo.Sect.(*Section)).Vaddr)
}
other = 2
other := 2
if x.Type&SHIDDEN != 0 {
other = 0
}
@ -164,9 +157,7 @@ func putelfsectionsym(s *LSym, shndx int) {
}
func putelfsymshndx(sympos int64, shndx int) {
var here int64
here = Cpos()
here := Cpos()
switch Thearch.Thechar {
case '6':
Cseek(sympos + 6)
@ -180,9 +171,6 @@ func putelfsymshndx(sympos int64, shndx int) {
}
func Asmelfsym() {
var s *LSym
var name string
// the first symbol entry is reserved
putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0)
@ -192,7 +180,7 @@ func Asmelfsym() {
genasmsym(putelfsym)
if Linkmode == LinkExternal && HEADTYPE != Hopenbsd {
s = Linklookup(Ctxt, "runtime.tlsg", 0)
s := Linklookup(Ctxt, "runtime.tlsg", 0)
if s.Sect == nil {
Ctxt.Cursym = nil
Diag("missing section for %s", s.Name)
@ -214,7 +202,8 @@ func Asmelfsym() {
elfglobalsymndx = numelfsym
genasmsym(putelfsym)
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
var name string
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if s.Type != SHOSTOBJ && (s.Type != SDYNIMPORT || !s.Reachable) {
continue
}
@ -230,9 +219,6 @@ func Asmelfsym() {
}
func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *LSym) {
var i int
var l int
switch t {
case 'T',
'L',
@ -249,7 +235,7 @@ func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_
'z',
'Z',
'm':
l = 4
l := 4
if HEADTYPE == Hplan9 && Thearch.Thechar == '6' && Debug['8'] == 0 {
Lputb(uint32(addr >> 32))
l = 8
@ -258,6 +244,7 @@ func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_
Lputb(uint32(addr))
Cput(uint8(t + 0x80)) /* 0x80 is variable length */
var i int
if t == 'z' || t == 'Z' {
Cput(uint8(s[0]))
for i = 1; s[i] != 0 || s[i+1] != 0; i += 2 {
@ -327,12 +314,6 @@ func Vputl(v uint64) {
}
func symtab() {
var s *LSym
var symtype *LSym
var symtypelink *LSym
var symgostring *LSym
var symgofunc *LSym
dosymtype()
// Define these so that they'll get put into the symbol table.
@ -357,7 +338,7 @@ func symtab() {
xdefine("runtime.esymtab", SRODATA, 0)
// garbage collection symbols
s = Linklookup(Ctxt, "runtime.gcdata", 0)
s := Linklookup(Ctxt, "runtime.gcdata", 0)
s.Type = SRODATA
s.Size = 0
@ -376,21 +357,21 @@ func symtab() {
s.Type = STYPE
s.Size = 0
s.Reachable = true
symtype = s
symtype := s
s = Linklookup(Ctxt, "go.string.*", 0)
s.Type = SGOSTRING
s.Size = 0
s.Reachable = true
symgostring = s
symgostring := s
s = Linklookup(Ctxt, "go.func.*", 0)
s.Type = SGOFUNC
s.Size = 0
s.Reachable = true
symgofunc = s
symgofunc := s
symtypelink = Linklookup(Ctxt, "runtime.typelink", 0)
symtypelink := Linklookup(Ctxt, "runtime.typelink", 0)
symt = Linklookup(Ctxt, "runtime.symtab", 0)
symt.Type = SSYMTAB
@ -401,7 +382,7 @@ func symtab() {
// within a type they sort by size, so the .* symbols
// just defined above will be first.
// hide the specific symbols.
for s = Ctxt.Allsym; s != nil; s = s.Allsym {
for s := Ctxt.Allsym; s != nil; s = s.Allsym {
if !s.Reachable || s.Special != 0 || s.Type != SRODATA {
continue
}

View file

@ -26,17 +26,14 @@ func cstring(x []byte) string {
func plan9quote(s string) string {
if s == "" {
goto needquote
return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
for i := 0; i < len(s); i++ {
if s[i] <= ' ' || s[i] == '\'' {
goto needquote
return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
}
return s
needquote:
return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
func tokenize(s string) []string {

View file

@ -304,13 +304,7 @@ func casesz(ctxt *obj.Link, p *obj.Prog) int32 {
// In rare cases, asmoutnacl might split p into two instructions.
// origPC is the PC for this Prog (no padding is taken into account).
func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
var size int
var reg int
var q *obj.Prog
var a *obj.Addr
var a2 *obj.Addr
size = int(o.size)
size := int(o.size)
// instruction specific
switch p.As {
@ -437,11 +431,12 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
// split it into two instructions:
// ADD $-100004, R13
// MOVW R14, 0(R13)
q = ctxt.NewProg()
q := ctxt.NewProg()
p.Scond &^= C_WBIT
*q = *p
a = &p.To
a := &p.To
var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
} else {
@ -479,12 +474,13 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
}
if (p.To.Type == obj.TYPE_MEM && p.To.Reg != REG_R13 && p.To.Reg != REG_R9) || (p.From.Type == obj.TYPE_MEM && p.From.Reg != REG_R13 && p.From.Reg != REG_R9) { // MOVW Rx, X(Ry), y != 13 && y != 9 // MOVW X(Rx), Ry, x != 13 && x != 9
var a *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a = &p.To
} else {
a = &p.From
}
reg = int(a.Reg)
reg := int(a.Reg)
if size == 4 {
// if addr.reg == 0, then it is probably load from x(FP) with small x, no need to modify.
if reg == 0 {
@ -514,8 +510,9 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
if p.Scond&(C_PBIT|C_WBIT) != 0 {
ctxt.Diag("unsupported instruction (.P/.W): %v", p)
}
q = ctxt.NewProg()
q := ctxt.NewProg()
*q = *p
var a2 *obj.Addr
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
} else {
@ -577,16 +574,6 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
func span5(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var op *obj.Prog
var o *Optab
var m int
var bflag int
var i int
var v int
var times int
var c int32
var opc int32
var out [6 + 3]uint32
var bp []byte
p = cursym.Text
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
@ -600,10 +587,13 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset + 4)
c = 0
c := int32(0)
op = p
p = p.Link
var i int
var m int
var o *Optab
for ; p != nil || ctxt.Blitrl != nil; (func() { op = p; p = p.Link })() {
if p == nil {
if checkpool(ctxt, op, 0) {
@ -676,8 +666,11 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
* generate extra passes putting branches
* around jmps to fix. this is rare.
*/
times = 0
times := 0
var bflag int
var opc int32
var out [6 + 3]uint32
for {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
@ -774,8 +767,9 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
ctxt.Autosize = int32(p.To.Offset + 4)
obj.Symgrow(ctxt, cursym, cursym.Size)
bp = cursym.P
bp := cursym.P
c = int32(p.Pc) // even p->link might need extra padding
var v int
for p = p.Link; p != nil; p = p.Link {
ctxt.Pc = p.Pc
ctxt.Curp = p
@ -844,14 +838,12 @@ func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) bool {
}
func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
var q *obj.Prog
if ctxt.Blitrl != nil {
if skip != 0 {
if false && skip == 1 {
fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
}
q = ctxt.NewProg()
q := ctxt.NewProg()
q.As = AB
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Link
@ -863,7 +855,7 @@ func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
}
if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
// if pool is not multiple of 16 bytes, add an alignment marker
q = ctxt.NewProg()
q := ctxt.NewProg()
q.As = ADATABUNDLEEND
ctxt.Elitrl.Link = q
@ -893,11 +885,9 @@ func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
}
func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
var q *obj.Prog
var t obj.Prog
var c int
c = aclass(ctxt, a)
c := aclass(ctxt, a)
t.Ctxt = ctxt
t.As = AWORD
@ -928,7 +918,7 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
}
if t.Pcrel == nil {
for q = ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
for q := ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
if q.Pcrel == nil && q.To == t.To {
p.Pcond = q
return
@ -938,7 +928,7 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
// start a new data bundle
q = ctxt.NewProg()
q := ctxt.NewProg()
q.As = ADATABUNDLE
q.Pc = int64(pool.size)
pool.size += 4
@ -952,7 +942,7 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
ctxt.Elitrl = q
}
q = ctxt.NewProg()
q := ctxt.NewProg()
*q = t
q.Pc = int64(pool.size)
@ -975,9 +965,7 @@ func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
}
func immrot(v uint32) int32 {
var i int
for i = 0; i < 16; i++ {
for i := 0; i < 16; i++ {
if v&^0xff == 0 {
return int32(uint32(int32(i)<<8) | v | 1<<25)
}
@ -1012,9 +1000,6 @@ func immhalf(v int32) bool {
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
var s *obj.LSym
var t int
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
@ -1060,7 +1045,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
case obj.NAME_AUTO:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
t = int(immaddr(int32(ctxt.Instoffset)))
t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) {
if immfloat(int32(t)) {
@ -1079,7 +1064,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
case obj.NAME_PARAM:
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
t = int(immaddr(int32(ctxt.Instoffset)))
t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) {
if immfloat(int32(t)) {
@ -1098,7 +1083,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
case obj.TYPE_NONE:
ctxt.Instoffset = a.Offset
t = int(immaddr(int32(ctxt.Instoffset)))
t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
if immhalf(int32(ctxt.Instoffset)) { /* n.b. that it will also satisfy immrot */
if immfloat(int32(t)) {
@ -1110,7 +1095,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if immfloat(int32(t)) {
return C_FOREG /* n.b. that it will also satisfy immrot */
}
t = int(immrot(uint32(ctxt.Instoffset)))
t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_SROREG
}
@ -1150,7 +1135,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
return aconsize(ctxt)
}
t = int(immrot(uint32(ctxt.Instoffset)))
t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_RCON
}
@ -1162,7 +1147,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
s = a.Sym
s := a.Sym
if s == nil {
break
}
@ -1188,9 +1173,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
}
func aconsize(ctxt *obj.Link) int {
var t int
t = int(immrot(uint32(ctxt.Instoffset)))
t := int(immrot(uint32(ctxt.Instoffset)))
if t != 0 {
return C_RACON
}
@ -1202,16 +1185,7 @@ func prasm(p *obj.Prog) {
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
var a1 int
var a2 int
var a3 int
var r int
var c1 []byte
var c3 []byte
var o []Optab
var e []Optab
a1 = int(p.Optab)
a1 := int(p.Optab)
if a1 != 0 {
return &optab[a1-1:][0]
}
@ -1222,19 +1196,19 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
}
a1--
a3 = int(p.To.Class)
a3 := int(p.To.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.To) + 1
p.To.Class = int8(a3)
}
a3--
a2 = C_NONE
a2 := C_NONE
if p.Reg != 0 {
a2 = C_REG
}
r = int(p.As)
o = oprange[r].start
r := int(p.As)
o := oprange[r].start
if o == nil {
o = oprange[r].stop /* just generate an error */
}
@ -1244,9 +1218,9 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
}
e = oprange[r].stop
c1 = xcmp[a1][:]
c3 = xcmp[a3][:]
e := oprange[r].stop
c1 := xcmp[a1][:]
c3 := xcmp[a3][:]
for ; -cap(o) < -cap(e); o = o[1:] {
if int(o[0].a2) == a2 {
if c1[o[0].a1] != 0 {
@ -1340,13 +1314,9 @@ func (x ocmp) Swap(i, j int) {
}
func (x ocmp) Less(i, j int) bool {
var p1 *Optab
var p2 *Optab
var n int
p1 = &x[i]
p2 = &x[j]
n = int(p1.as) - int(p2.as)
p1 := &x[i]
p2 := &x[j]
n := int(p1.as) - int(p2.as)
if n != 0 {
return n < 0
}
@ -1366,11 +1336,9 @@ func (x ocmp) Less(i, j int) bool {
}
func buildop(ctxt *obj.Link) {
var i int
var n int
var r int
for i = 0; i < C_GOK; i++ {
for i := 0; i < C_GOK; i++ {
for n = 0; n < C_GOK; n++ {
if cmp(n, i) {
xcmp[i][n] = 1
@ -1388,7 +1356,8 @@ func buildop(ctxt *obj.Link) {
}
sort.Sort(ocmp(optab[:n]))
for i = 0; i < n; i++ {
var r int
for i := 0; i < n; i++ {
r = int(optab[i].as)
oprange[r].start = optab[i:]
for int(optab[i].as) == r {
@ -1536,26 +1505,13 @@ func buildop(ctxt *obj.Link) {
}
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
var o1 uint32
var o2 uint32
var o3 uint32
var o4 uint32
var o5 uint32
var o6 uint32
var v int32
var r int
var rf int
var rt int
var rt2 int
var rel *obj.Reloc
ctxt.Printp = p
o1 = 0
o2 = 0
o3 = 0
o4 = 0
o5 = 0
o6 = 0
o1 := uint32(0)
o2 := uint32(0)
o3 := uint32(0)
o4 := uint32(0)
o5 := uint32(0)
o6 := uint32(0)
ctxt.Armsize += int32(o.size)
if false { /*debug['P']*/
fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_)
@ -1573,9 +1529,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 1: /* op R,[R],R */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
rf = int(p.From.Reg)
rt = int(p.To.Reg)
r = int(p.Reg)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
r := int(p.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
@ -1591,8 +1547,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
rt = int(p.To.Reg)
r = int(p.Reg)
rt := int(p.To.Reg)
r := int(p.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
@ -1611,7 +1567,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = oprrr(ctxt, AADD, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -1621,9 +1577,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 5: /* bra s */
o1 = opbra(ctxt, int(p.As), int(p.Scond))
v = -8
v := int32(-8)
if p.To.Sym != nil {
rel = obj.Addrel(ctxt.Cursym)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
rel.Sym = p.To.Sym
@ -1654,7 +1610,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
o1 = oprrr(ctxt, ABL, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 0
rel = obj.Addrel(ctxt.Cursym)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 0
rel.Type = obj.R_CALLIND
@ -1663,7 +1619,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
aclass(ctxt, &p.From)
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@ -1674,7 +1630,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 9: /* sll R,[R],R -> mov (R<<R),R */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@ -1697,7 +1653,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if p.To.Sym != nil {
// This case happens with words generated
// in the PC stream as part of the literal pool.
rel = obj.Addrel(ctxt.Cursym)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
@ -1742,7 +1698,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
o2 = oprrr(ctxt, int(p.As), int(p.Scond))
o2 |= REGTMP & 15
r = int(p.Reg)
r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
r = 0
} else if r == 0 {
@ -1762,7 +1718,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = oprrr(ctxt, ASRA, int(p.Scond))
}
r = int(p.To.Reg)
r := int(p.To.Reg)
o1 |= (uint32(p.From.Reg)&15)<<0 | (uint32(r)&15)<<12
o2 |= uint32(r)&15 | (uint32(r)&15)<<12
if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
@ -1776,9 +1732,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 15: /* mul r,[r,]r */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
rf = int(p.From.Reg)
rt = int(p.To.Reg)
r = int(p.Reg)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
r := int(p.Reg)
if r == 0 {
r = rt
}
@ -1803,16 +1759,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 17:
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
rf = int(p.From.Reg)
rt = int(p.To.Reg)
rt2 = int(p.To.Offset)
r = int(p.Reg)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
rt2 := int(p.To.Offset)
r := int(p.Reg)
o1 |= (uint32(rf)&15)<<8 | (uint32(r)&15)<<0 | (uint32(rt)&15)<<16 | (uint32(rt2)&15)<<12
case 20: /* mov/movb/movbu R,O(R) */
aclass(ctxt, &p.To)
r = int(p.To.Reg)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@ -1821,7 +1777,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 21: /* mov/movbu O(R),R -> lr */
aclass(ctxt, &p.From)
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -1836,7 +1792,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o1 == 0 {
break
}
r = int(p.To.Reg)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@ -1851,7 +1807,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o1 == 0 {
break
}
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -1869,7 +1825,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = oprrr(ctxt, AADD, int(p.Scond))
o2 |= REGTMP & 15
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -1961,18 +1917,18 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = 0xe8fd8000
case 50: /* floating point store */
v = regoff(ctxt, &p.To)
v := regoff(ctxt, &p.To)
r = int(p.To.Reg)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
o1 = ofsr(ctxt, int(p.As), int(p.From.Reg), v, r, int(p.Scond), p)
case 51: /* floating point load */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -1984,7 +1940,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o1 == 0 {
break
}
r = int(p.To.Reg)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@ -1997,7 +1953,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o1 == 0 {
break
}
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -2007,9 +1963,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 54: /* floating point arith */
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
rf = int(p.From.Reg)
rt = int(p.To.Reg)
r = int(p.Reg)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
r := int(p.Reg)
if r == 0 {
r = rt
if p.As == AMOVF || p.As == AMOVD || p.As == AMOVFD || p.As == AMOVDF || p.As == ASQRTF || p.As == ASQRTD || p.As == AABSF || p.As == AABSD {
@ -2033,8 +1989,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = oprrr(ctxt, AAND, int(p.Scond))
o1 |= uint32(immrot(0xff))
rt = int(p.To.Reg)
r = int(p.From.Reg)
rt := int(p.To.Reg)
r := int(p.From.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
@ -2095,7 +2051,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 63: /* bcase */
if p.Pcond != nil {
rel = obj.Addrel(ctxt.Cursym)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
if p.To.Sym != nil && p.To.Sym.Type != 0 {
@ -2171,7 +2127,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 70: /* movh/movhu R,O(R) -> strh */
aclass(ctxt, &p.To)
r = int(p.To.Reg)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@ -2180,7 +2136,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 71: /* movb/movh/movhu O(R),R -> ldrsb/ldrsh/ldrh */
aclass(ctxt, &p.From)
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -2197,7 +2153,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o1 == 0 {
break
}
r = int(p.To.Reg)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@ -2209,7 +2165,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if o1 == 0 {
break
}
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -2278,8 +2234,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = oprrr(ctxt, ASUBF, int(p.Scond))
}
v = 0x70 // 1.0
r = (int(p.To.Reg) & 15) << 0
v := int32(0x70) // 1.0
r := (int(p.To.Reg) & 15) << 0
// movf $1.0, r
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
@ -2298,7 +2254,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
o1 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
o1 |= (uint32(p.To.Reg) & 15) << 12
v = int32(chipfloat5(ctxt, p.From.U.Dval))
v := int32(chipfloat5(ctxt, p.From.U.Dval))
o1 |= (uint32(v) & 0xf) << 0
o1 |= (uint32(v) & 0xf0) << 12
@ -2475,18 +2431,14 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
var o1 uint32
var rt int
var r int
aclass(ctxt, &p.From)
o1 = oprrr(ctxt, int(p.As), int(p.Scond))
o1 := oprrr(ctxt, int(p.As), int(p.Scond))
o1 |= uint32(p.From.Offset)
rt = int(p.To.Reg)
rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
rt = 0
}
r = int(p.Reg)
r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
r = 0
} else if r == 0 {
@ -2497,9 +2449,7 @@ func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
}
func oprrr(ctxt *obj.Link, a int, sc int) uint32 {
var o uint32
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_SBIT != 0 {
o |= 1 << 20
}
@ -2716,12 +2666,10 @@ func opbra(ctxt *obj.Link, a int, sc int) uint32 {
}
func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
var o uint32
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on LDR/STR instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
@ -2750,12 +2698,10 @@ func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
}
func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
var o uint32
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on LDRH/STRH instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
@ -2778,9 +2724,7 @@ func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
}
func osr(ctxt *obj.Link, a int, r int, v int32, b int, sc int) uint32 {
var o uint32
o = olr(ctxt, v, b, r, sc) ^ (1 << 20)
o := olr(ctxt, v, b, r, sc) ^ (1 << 20)
if a != AMOVW {
o |= 1 << 22
}
@ -2788,9 +2732,7 @@ func osr(ctxt *obj.Link, a int, r int, v int32, b int, sc int) uint32 {
}
func oshr(ctxt *obj.Link, r int, v int32, b int, sc int) uint32 {
var o uint32
o = olhr(ctxt, v, b, r, sc) ^ (1 << 20)
o := olhr(ctxt, v, b, r, sc) ^ (1 << 20)
return o
}
@ -2811,12 +2753,10 @@ func olhrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
}
func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
var o uint32
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on FLDR/FSTR instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_PBIT == 0 {
o |= 1 << 24
}
@ -2855,11 +2795,10 @@ func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uin
}
func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
var v int32
var o1 uint32
if p.Pcond == nil {
aclass(ctxt, a)
v = immrot(^uint32(ctxt.Instoffset))
v := immrot(^uint32(ctxt.Instoffset))
if v == 0 {
ctxt.Diag("missing literal")
prasm(p)
@ -2870,7 +2809,7 @@ func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
o1 |= uint32(v)
o1 |= (uint32(dr) & 15) << 12
} else {
v = int32(p.Pcond.Pc - p.Pc - 8)
v := int32(p.Pcond.Pc - p.Pc - 8)
o1 = olr(ctxt, v, REGPC, dr, int(p.Scond)&C_SCOND)
}
@ -2886,29 +2825,23 @@ func chipzero5(ctxt *obj.Link, e float64) int {
}
func chipfloat5(ctxt *obj.Link, e float64) int {
var n int
var h1 uint32
var l uint32
var h uint32
var ei uint64
// We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
if ctxt.Goarm < 7 {
goto no
return -1
}
ei = math.Float64bits(e)
l = uint32(ei)
h = uint32(ei >> 32)
ei := math.Float64bits(e)
l := uint32(ei)
h := uint32(ei >> 32)
if l != 0 || h&0xffff != 0 {
goto no
return -1
}
h1 = h & 0x7fc00000
h1 := h & 0x7fc00000
if h1 != 0x40000000 && h1 != 0x3fc00000 {
goto no
return -1
}
n = 0
n := 0
// sign bit (a)
if h&0x80000000 != 0 {
@ -2925,7 +2858,4 @@ func chipfloat5(ctxt *obj.Link, e float64) int {
//print("match %.8lux %.8lux %d\n", l, h, n);
return n
no:
return -1
}

View file

@ -61,16 +61,9 @@ var extra = []string{
var bigP *obj.Prog
func Pconv(p *obj.Prog) string {
var str string
var sc string
var fp string
var a int
var s int
a = int(p.As)
s = int(p.Scond)
sc = extra[(s&C_SCOND)^C_SCOND_XOR]
a := int(p.As)
s := int(p.Scond)
sc := extra[(s&C_SCOND)^C_SCOND_XOR]
if s&C_SBIT != 0 {
sc += ".S"
}
@ -83,6 +76,7 @@ func Pconv(p *obj.Prog) string {
if s&C_UBIT != 0 { /* ambiguous with FBIT */
sc += ".U"
}
var str string
if a == obj.ADATA {
str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v",
p.Pc, p.Line(), Aconv(a), obj.Dconv(p, &p.From), p.From3.Offset, obj.Dconv(p, &p.To))
@ -97,30 +91,23 @@ func Pconv(p *obj.Prog) string {
p.Pc, p.Line(), Aconv(a), sc, obj.Dconv(p, &p.From), Rconv(int(p.Reg)), obj.Dconv(p, &p.To))
}
var fp string
fp += str
return fp
}
func Aconv(a int) string {
var s string
var fp string
s = "???"
s := "???"
if a >= obj.AXXX && a < ALAST {
s = Anames[a]
}
var fp string
fp += s
return fp
}
func RAconv(a *obj.Addr) string {
var str string
var fp string
var i int
var v int
str = fmt.Sprintf("GOK-reglist")
str := fmt.Sprintf("GOK-reglist")
switch a.Type {
case obj.TYPE_CONST:
if a.Reg != 0 {
@ -129,9 +116,9 @@ func RAconv(a *obj.Addr) string {
if a.Sym != nil {
break
}
v = int(a.Offset)
v := int(a.Offset)
str = ""
for i = 0; i < NREG; i++ {
for i := 0; i < NREG; i++ {
if v&(1<<uint(i)) != 0 {
if str == "" {
str += "[R"
@ -145,6 +132,7 @@ func RAconv(a *obj.Addr) string {
str += "]"
}
var fp string
fp += str
return fp
}
@ -182,13 +170,11 @@ func Rconv(r int) string {
}
func DRconv(a int) string {
var s string
var fp string
s = "C_??"
s := "C_??"
if a >= C_NONE && a <= C_NCLASS {
s = cnames5[a]
}
var fp string
fp += s
return fp
}

View file

@ -41,9 +41,6 @@ import (
var progedit_tlsfallback *obj.LSym
func progedit(ctxt *obj.Link, p *obj.Prog) {
var literal string
var s *obj.LSym
p.From.Class = 0
p.To.Class = 0
@ -111,12 +108,10 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
switch p.As {
case AMOVF:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
var i32 uint32
var f32 float32
f32 = float32(p.From.U.Dval)
i32 = math.Float32bits(f32)
literal = fmt.Sprintf("$f32.%08x", i32)
s = obj.Linklookup(ctxt, literal, 0)
f32 := float32(p.From.U.Dval)
i32 := math.Float32bits(f32)
literal := fmt.Sprintf("$f32.%08x", i32)
s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
@ -131,10 +126,9 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
case AMOVD:
if p.From.Type == obj.TYPE_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
var i64 uint64
i64 = math.Float64bits(p.From.U.Dval)
literal = fmt.Sprintf("$f64.%016x", i64)
s = obj.Linklookup(ctxt, literal, 0)
i64 := math.Float64bits(p.From.U.Dval)
literal := fmt.Sprintf("$f64.%016x", i64)
s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
@ -175,9 +169,7 @@ const (
)
func linkcase(casep *obj.Prog) {
var p *obj.Prog
for p = casep; p != nil; p = p.Link {
for p := casep; p != nil; p = p.Link {
if p.As == ABCASE {
for ; p != nil && p.As == ABCASE; p = p.Link {
p.Pcrel = casep
@ -188,25 +180,14 @@ func linkcase(casep *obj.Prog) {
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var pl *obj.Prog
var p1 *obj.Prog
var p2 *obj.Prog
var q *obj.Prog
var q1 *obj.Prog
var q2 *obj.Prog
var o int
var autosize int32
var autoffset int32
autosize = 0
autosize := int32(0)
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
}
q = nil
q := (*obj.Prog)(nil)
ctxt.Cursym = cursym
@ -216,8 +197,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
softfloat(ctxt, cursym)
p = cursym.Text
autoffset = int32(p.To.Offset)
p := cursym.Text
autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
@ -259,8 +240,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
// MOVW.nil R3, 0(R1) +4
// CMP R1, R2
// BNE L
pl = obj.Appendp(ctxt, p)
p = pl
pl := obj.Appendp(ctxt, p)
p := pl
p.As = AMOVW
p.From.Type = obj.TYPE_REG
@ -289,7 +270,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
* expand RET
* expand BECOME pseudo
*/
for p = cursym.Text; p != nil; p = p.Link {
var q1 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
case ACASE:
if ctxt.Flag_shared != 0 {
@ -358,7 +340,11 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
q = p
}
for p = cursym.Text; p != nil; p = p.Link {
var o int
var p1 *obj.Prog
var p2 *obj.Prog
var q2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
o = int(p.As)
switch o {
case obj.ATEXT:
@ -667,24 +653,20 @@ func isfloatreg(a *obj.Addr) bool {
}
func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var next *obj.Prog
var symsfloat *obj.LSym
var wasfloat int
if ctxt.Goarm > 5 {
return
}
symsfloat = obj.Linklookup(ctxt, "_sfloat", 0)
symsfloat := obj.Linklookup(ctxt, "_sfloat", 0)
wasfloat = 0
for p = cursym.Text; p != nil; p = p.Link {
wasfloat := 0
for p := cursym.Text; p != nil; p = p.Link {
if p.Pcond != nil {
p.Pcond.Mark |= LABEL
}
}
for p = cursym.Text; p != nil; p = p.Link {
var next *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
case AMOVW:
if isfloatreg(&p.To) || isfloatreg(&p.From) {
@ -880,13 +862,10 @@ func initdiv(ctxt *obj.Link) {
}
func follow(ctxt *obj.Link, s *obj.LSym) {
var firstp *obj.Prog
var lastp *obj.Prog
ctxt.Cursym = s
firstp = ctxt.NewProg()
lastp = firstp
firstp := ctxt.NewProg()
lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link

File diff suppressed because it is too large Load diff

View file

@ -43,7 +43,6 @@ var bigP *obj.Prog
func Pconv(p *obj.Prog) string {
var str string
var fp string
switch p.As {
case obj.ADATA:
@ -72,6 +71,7 @@ func Pconv(p *obj.Prog) string {
}
}
var fp string
fp += str
return fp
}

View file

@ -51,10 +51,6 @@ func canuselocaltls(ctxt *obj.Link) bool {
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
var literal string
var s *obj.LSym
var q *obj.Prog
// See obj6.c for discussion of TLS.
if canuselocaltls(ctxt) {
// Reduce TLS initial exec model to TLS local exec model.
@ -92,7 +88,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// MOVL off(BX)(TLS*1), BX
// This allows the C compilers to emit references to m and g using the direct off(TLS) form.
if p.As == AMOVL && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_DI {
q = obj.Appendp(ctxt, p)
q := obj.Appendp(ctxt, p)
q.As = p.As
q.From.Type = obj.TYPE_MEM
q.From.Reg = p.To.Reg
@ -159,12 +155,10 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
ACOMISS,
AUCOMISS:
if p.From.Type == obj.TYPE_FCONST {
var i32 uint32
var f32 float32
f32 = float32(p.From.U.Dval)
i32 = math.Float32bits(f32)
literal = fmt.Sprintf("$f32.%08x", i32)
s = obj.Linklookup(ctxt, literal, 0)
f32 := float32(p.From.U.Dval)
i32 := math.Float32bits(f32)
literal := fmt.Sprintf("$f32.%08x", i32)
s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
@ -208,10 +202,9 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
ACOMISD,
AUCOMISD:
if p.From.Type == obj.TYPE_FCONST {
var i64 uint64
i64 = math.Float64bits(p.From.U.Dval)
literal = fmt.Sprintf("$f64.%016x", i64)
s = obj.Linklookup(ctxt, literal, 0)
i64 := math.Float64bits(p.From.U.Dval)
literal := fmt.Sprintf("$f64.%016x", i64)
s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
@ -227,14 +220,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var q *obj.Prog
var p1 *obj.Prog
var p2 *obj.Prog
var autoffset int32
var deltasp int32
var a int
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
@ -250,8 +235,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
return
}
p = cursym.Text
autoffset = int32(p.To.Offset)
p := cursym.Text
autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
@ -259,7 +244,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
cursym.Locals = autoffset
cursym.Args = p.To.U.Argsize
q = nil
q := (*obj.Prog)(nil)
if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
@ -292,7 +277,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
if q != nil {
q.Pcond = p
}
deltasp = autoffset
deltasp := autoffset
if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
// if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
@ -329,7 +314,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p = obj.Appendp(ctxt, p)
p.As = AJEQ
p.To.Type = obj.TYPE_BRANCH
p1 = p
p1 := p
p = obj.Appendp(ctxt, p)
p.As = ALEAL
@ -350,7 +335,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p = obj.Appendp(ctxt, p)
p.As = AJNE
p.To.Type = obj.TYPE_BRANCH
p2 = p
p2 := p
p = obj.Appendp(ctxt, p)
p.As = AMOVL
@ -400,6 +385,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p.As = ASTOSL
}
var a int
for ; p != nil; p = p.Link {
a = int(p.From.Name)
if a == obj.NAME_AUTO {
@ -479,8 +465,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
var next *obj.Prog
p.As = AMOVL
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_TLS
@ -488,7 +472,7 @@ func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_CX
next = p.Link
next := p.Link
progedit(ctxt, p)
for p.Link != next {
p = p.Link
@ -508,9 +492,6 @@ func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
if ctxt.Debugstack != 0 {
// 8l -K means check not only for stack
// overflow but stack underflow.
@ -530,7 +511,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok
p.As = AJCC
p.To.Type = obj.TYPE_BRANCH
p.To.Offset = 4
q1 = p
q1 := p
p = obj.Appendp(ctxt, p)
p.As = AINT
@ -542,7 +523,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok
q1.Pcond = p
}
q1 = nil
q1 := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP <= stackguard
@ -651,7 +632,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok
p.As = AJHI
p.To.Type = obj.TYPE_BRANCH
p.To.Offset = 4
q = p
q := p
p = obj.Appendp(ctxt, p)
p.As = obj.ACALL
@ -679,13 +660,10 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok
}
func follow(ctxt *obj.Link, s *obj.LSym) {
var firstp *obj.Prog
var lastp *obj.Prog
ctxt.Cursym = s
firstp = ctxt.NewProg()
lastp = firstp
firstp := ctxt.NewProg()
lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link

View file

@ -407,18 +407,7 @@ var oprange [ALAST]Oprang
var xcmp [C_NCLASS][C_NCLASS]uint8
func span9(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var q *obj.Prog
var o *Optab
var m int
var bflag int
var c int64
var otxt int64
var out [6]uint32
var i int32
var bp []byte
p = cursym.Text
p := cursym.Text
if p == nil || p.Link == nil { // handle external functions and ELF section symbols
return
}
@ -429,9 +418,11 @@ func span9(ctxt *obj.Link, cursym *obj.LSym) {
buildop(ctxt)
}
c = 0
c := int64(0)
p.Pc = c
var m int
var o *Optab
for p = p.Link; p != nil; p = p.Link {
ctxt.Curp = p
p.Pc = c
@ -455,8 +446,10 @@ func span9(ctxt *obj.Link, cursym *obj.LSym) {
* generate extra passes putting branches
* around jmps to fix. this is rare.
*/
bflag = 1
bflag := 1
var otxt int64
var q *obj.Prog
for bflag != 0 {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
@ -517,8 +510,10 @@ func span9(ctxt *obj.Link, cursym *obj.LSym) {
obj.Symgrow(ctxt, cursym, cursym.Size)
bp = cursym.P
for p = cursym.Text.Link; p != nil; p = p.Link {
bp := cursym.P
var i int32
var out [6]uint32
for p := cursym.Text.Link; p != nil; p = p.Link {
ctxt.Pc = p.Pc
ctxt.Curp = p
o = oplook(ctxt, p)
@ -542,8 +537,6 @@ func isuint32(v uint64) bool {
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
var s *obj.LSym
switch a.Type {
case obj.TYPE_NONE:
return C_NONE
@ -646,7 +639,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
s = a.Sym
s := a.Sym
if s == nil {
break
}
@ -720,18 +713,7 @@ func prasm(p *obj.Prog) {
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
var a1 int
var a2 int
var a3 int
var a4 int
var r int
var c1 []byte
var c3 []byte
var c4 []byte
var o []Optab
var e []Optab
a1 = int(p.Optab)
a1 := int(p.Optab)
if a1 != 0 {
return &optab[a1-1:][0]
}
@ -742,36 +724,36 @@ func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
}
a1--
a3 = int(p.From3.Class)
a3 := int(p.From3.Class)
if a3 == 0 {
a3 = aclass(ctxt, &p.From3) + 1
p.From3.Class = int8(a3)
}
a3--
a4 = int(p.To.Class)
a4 := int(p.To.Class)
if a4 == 0 {
a4 = aclass(ctxt, &p.To) + 1
p.To.Class = int8(a4)
}
a4--
a2 = C_NONE
a2 := C_NONE
if p.Reg != 0 {
a2 = C_REG
}
//print("oplook %P %d %d %d %d\n", p, a1, a2, a3, a4);
r = int(p.As)
r := int(p.As)
o = oprange[r].start
o := oprange[r].start
if o == nil {
o = oprange[r].stop /* just generate an error */
}
e = oprange[r].stop
c1 = xcmp[a1][:]
c3 = xcmp[a3][:]
c4 = xcmp[a4][:]
e := oprange[r].stop
c1 := xcmp[a1][:]
c3 := xcmp[a3][:]
c4 := xcmp[a4][:]
for ; -cap(o) < -cap(e); o = o[1:] {
if int(o[0].a2) == a2 {
if c1[o[0].a1] != 0 {
@ -881,13 +863,9 @@ func (x ocmp) Swap(i, j int) {
}
func (x ocmp) Less(i, j int) bool {
var p1 *Optab
var p2 *Optab
var n int
p1 = &x[i]
p2 = &x[j]
n = int(p1.as) - int(p2.as)
p1 := &x[i]
p2 := &x[j]
n := int(p1.as) - int(p2.as)
if n != 0 {
return n < 0
}
@ -911,11 +889,9 @@ func (x ocmp) Less(i, j int) bool {
}
func buildop(ctxt *obj.Link) {
var i int
var n int
var r int
for i = 0; i < C_NCLASS; i++ {
for i := 0; i < C_NCLASS; i++ {
for n = 0; n < C_NCLASS; n++ {
if cmp(n, i) {
xcmp[i][n] = 1
@ -925,7 +901,8 @@ func buildop(ctxt *obj.Link) {
for n = 0; optab[n].as != obj.AXXX; n++ {
}
sort.Sort(ocmp(optab[:n]))
for i = 0; i < n; i++ {
var r int
for i := 0; i < n; i++ {
r = int(optab[i].as)
oprange[r].start = optab[i:]
for int(optab[i].as) == r {
@ -1393,9 +1370,7 @@ func oclass(a *obj.Addr) int {
// add R_ADDRPOWER relocation to symbol s for the two instructions o1 and o2.
func addaddrreloc(ctxt *obj.Link, s *obj.LSym, o1 *uint32, o2 *uint32) {
var rel *obj.Reloc
rel = obj.Addrel(ctxt.Cursym)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 8
rel.Sym = s
@ -1407,13 +1382,11 @@ func addaddrreloc(ctxt *obj.Link, s *obj.LSym, o1 *uint32, o2 *uint32) {
* 32-bit masks
*/
func getmask(m []byte, v uint32) bool {
var i int
m[1] = 0
m[0] = m[1]
if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
if getmask(m, ^v) {
i = int(m[0])
i := int(m[0])
m[0] = m[1] + 1
m[1] = byte(i - 1)
return true
@ -1422,7 +1395,7 @@ func getmask(m []byte, v uint32) bool {
return false
}
for i = 0; i < 32; i++ {
for i := 0; i < 32; i++ {
if v&(1<<uint(31-i)) != 0 {
m[0] = byte(i)
for {
@ -1455,11 +1428,9 @@ func maskgen(ctxt *obj.Link, p *obj.Prog, m []byte, v uint32) {
* 64-bit masks (rldic etc)
*/
func getmask64(m []byte, v uint64) bool {
var i int
m[1] = 0
m[0] = m[1]
for i = 0; i < 64; i++ {
for i := 0; i < 64; i++ {
if v&(uint64(1)<<uint(63-i)) != 0 {
m[0] = byte(i)
for {
@ -1489,9 +1460,7 @@ func maskgen64(ctxt *obj.Link, p *obj.Prog, m []byte, v uint64) {
}
func loadu32(r int, d int64) uint32 {
var v int32
v = int32(d >> 16)
v := int32(d >> 16)
if isuint32(uint64(d)) {
return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
}
@ -1506,24 +1475,11 @@ func high16adjusted(d int32) uint16 {
}
func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
var o1 uint32
var o2 uint32
var o3 uint32
var o4 uint32
var o5 uint32
var v int32
var t int32
var d int64
var r int
var a int
var mask [2]uint8
var rel *obj.Reloc
o1 = 0
o2 = 0
o3 = 0
o4 = 0
o5 = 0
o1 := uint32(0)
o2 := uint32(0)
o3 := uint32(0)
o4 := uint32(0)
o5 := uint32(0)
//print("%P => case %d\n", p, o->type);
switch o.type_ {
@ -1536,7 +1492,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
//nerrors--;
ctxt.Diag("literal operation on R0\n%v", p)
@ -1549,7 +1505,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
case 2: /* int/cr/fp op Rb,[Ra],Rd */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
@ -1557,17 +1513,17 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
case 3: /* mov $soreg/addcon/ucon, r ==> addis/addi $i,reg',r */
d = vregoff(ctxt, &p.From)
d := vregoff(ctxt, &p.From)
v = int32(d)
r = int(p.From.Reg)
v := int32(d)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
ctxt.Diag("literal operation on R0\n%v", p)
}
a = OP_ADDI
a := OP_ADDI
if o.a1 == C_UCON {
if d&0xffff != 0 {
log.Fatalf("invalid handling of %v", p)
@ -1588,9 +1544,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
case 4: /* add/mul $scon,[r1],r2 */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@ -1606,7 +1562,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = uint32(oprrr(ctxt, int(p.As)))
case 6: /* logical op Rb,[Rs,]Ra; no literal */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
@ -1614,12 +1570,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
case 7: /* mov r, soreg ==> stw o(r) */
r = int(p.To.Reg)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
v = regoff(ctxt, &p.To)
v := regoff(ctxt, &p.To)
if p.To.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
@ -1633,12 +1589,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
if p.From.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
@ -1652,12 +1608,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
if p.From.Type == obj.TYPE_MEM && p.Reg != 0 {
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
@ -1669,7 +1625,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
@ -1677,7 +1633,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
case 11: /* br/bl lbra */
v = 0
v := int32(0)
if p.Pcond != nil {
v = int32(p.Pcond.Pc - p.Pc)
@ -1693,7 +1649,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = OP_BR(uint32(opirr(ctxt, int(p.As))), uint32(v), 0)
if p.To.Sym != nil {
rel = obj.Addrel(ctxt.Cursym)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 4
rel.Sym = p.To.Sym
@ -1709,7 +1665,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 12: /* movb r,r (extsb); movw r,r (extsw) */
if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST {
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
ctxt.Diag("literal operation on R0\n%v", p)
}
@ -1738,13 +1694,15 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
d = vregoff(ctxt, &p.From3)
d := vregoff(ctxt, &p.From3)
var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
var a int
switch p.As {
case ARLDCL,
ARLDCLCC:
@ -1773,16 +1731,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 17, /* bc bo,bi,lbra (same for now) */
16: /* bc bo,bi,sbra */
a = 0
a := 0
if p.From.Type == obj.TYPE_CONST {
a = int(regoff(ctxt, &p.From))
}
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = 0
}
v = 0
v := int32(0)
if p.Pcond != nil {
v = int32(p.Pcond.Pc - p.Pc)
}
@ -1797,12 +1755,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = OP_BC(uint32(opirr(ctxt, int(p.As))), uint32(a), uint32(r), uint32(v), 0)
case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
var v int32
if p.As == ABC || p.As == ABCL {
v = regoff(ctxt, &p.To) & 31
} else {
v = 20 /* unconditional */
}
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = 0
}
@ -1814,12 +1773,13 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = OP_BCR(o2, uint32(v), uint32(r))
case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
var v int32
if p.As == ABC || p.As == ABCL {
v = regoff(ctxt, &p.From) & 31
} else {
v = 20 /* unconditional */
}
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = 0
}
@ -1841,7 +1801,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = OP_BCR(o1, uint32(v), uint32(r))
case 19: /* mov $lcon,r ==> cau+or */
d = vregoff(ctxt, &p.From)
d := vregoff(ctxt, &p.From)
if p.From.Sym == nil {
o1 = loadu32(int(p.To.Reg), d)
@ -1855,9 +1815,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
//if(dlm) reloc(&p->from, p->pc, 0);
case 20: /* add $ucon,,r */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@ -1870,10 +1830,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if p.To.Reg == REGTMP || p.Reg == REGTMP {
ctxt.Diag("cant synthesize large constant\n%v", p)
}
d = vregoff(ctxt, &p.From)
d := vregoff(ctxt, &p.From)
o1 = loadu32(REGTMP, d)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@ -1888,10 +1848,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if p.To.Reg == REGTMP || p.Reg == REGTMP {
ctxt.Diag("cant synthesize large constant\n%v", p)
}
d = vregoff(ctxt, &p.From)
d := vregoff(ctxt, &p.From)
o1 = loadu32(REGTMP, d)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@ -1905,17 +1865,18 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
/*24*/
case 25:
/* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
if v < 0 {
v = 0
} else if v > 63 {
v = 63
}
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
var a int
switch p.As {
case ASLD,
ASLDCC:
@ -1950,8 +1911,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if p.To.Reg == REGTMP {
ctxt.Diag("can't synthesize large constant\n%v", p)
}
v = regoff(ctxt, &p.From)
r = int(p.From.Reg)
v := regoff(ctxt, &p.From)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -1959,16 +1920,16 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
v = regoff(ctxt, &p.From3)
v := regoff(ctxt, &p.From3)
r = int(p.From.Reg)
r := int(p.From.Reg)
o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
ctxt.Diag("can't synthesize large constant\n%v", p)
}
v = regoff(ctxt, &p.From3)
v := regoff(ctxt, &p.From3)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
@ -1979,10 +1940,12 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
//if(dlm) reloc(&p->from3, p->pc, 0);
case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
d = vregoff(ctxt, &p.From3)
d := vregoff(ctxt, &p.From3)
var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
var a int
switch p.As {
case ARLDC,
ARLDCCC:
@ -2020,9 +1983,10 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 30: /* rldimi $sh,s,$mask,a */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
d = vregoff(ctxt, &p.From3)
d := vregoff(ctxt, &p.From3)
var mask [2]uint8
maskgen64(ctxt, p, mask[:], uint64(d))
if int32(mask[1]) != (63 - v) {
ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
@ -2037,7 +2001,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 31: /* dword */
d = vregoff(ctxt, &p.From)
d := vregoff(ctxt, &p.From)
if ctxt.Arch.ByteOrder == binary.BigEndian {
o1 = uint32(d >> 32)
@ -2048,7 +2012,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
if p.From.Sym != nil {
rel = obj.Addrel(ctxt.Cursym)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 8
rel.Sym = p.From.Sym
@ -2059,7 +2023,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 32: /* fmul frc,fra,frd */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
@ -2067,7 +2031,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
case 33: /* fabs [frb,]frd; fmr. frb,frd */
r = int(p.From.Reg)
r := int(p.From.Reg)
if oclass(&p.From) == C_NONE {
r = int(p.To.Reg)
@ -2078,9 +2042,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6
case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
v = regoff(ctxt, &p.To)
v := regoff(ctxt, &p.To)
r = int(p.To.Reg)
r := int(p.To.Reg)
if r == 0 {
r = int(o.param)
}
@ -2088,9 +2052,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -2098,9 +2062,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(o.param)
}
@ -2118,7 +2082,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(regoff(ctxt, &p.From3))&0x7F)<<11
case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = 0
@ -2126,7 +2090,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, uint32(r), uint32(p.From.Reg))
case 44: /* indexed store */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = 0
@ -2134,7 +2098,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 45: /* indexed load */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = 0
@ -2145,7 +2109,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = uint32(oprrr(ctxt, int(p.As)))
case 47: /* op Ra, Rd; also op [Ra,] Rd */
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(p.To.Reg)
@ -2153,7 +2117,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
case 48: /* op Rs, Ra */
r = int(p.From.Reg)
r := int(p.From.Reg)
if r == 0 {
r = int(p.To.Reg)
@ -2162,20 +2126,20 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 49: /* op Rb; op $n, Rb */
if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
v = regoff(ctxt, &p.From) & 1
v := regoff(ctxt, &p.From) & 1
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
} else {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.From.Reg))
}
case 50: /* rem[u] r1[,r2],r3 */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
v = oprrr(ctxt, int(p.As))
t = v & (1<<10 | 1) /* OE|Rc */
v := oprrr(ctxt, int(p.As))
t := v & (1<<10 | 1) /* OE|Rc */
o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
@ -2187,19 +2151,19 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 51: /* remd[u] r1[,r2],r3 */
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
v = oprrr(ctxt, int(p.As))
t = v & (1<<10 | 1) /* OE|Rc */
v := oprrr(ctxt, int(p.As))
t := v & (1<<10 | 1) /* OE|Rc */
o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
case 52: /* mtfsbNx cr(n) */
v = regoff(ctxt, &p.From) & 31
v := regoff(ctxt, &p.From) & 31
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(v), 0, 0)
@ -2221,9 +2185,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(p.From.Reg))
case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@ -2233,9 +2197,9 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 57: /* slw $sh,[s,]a -> rlwinm ... */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
@ -2252,6 +2216,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
} else if v > 32 {
v = 32
}
var mask [2]uint8
if p.As == ASRW || p.As == ASRWCC { /* shift right */
mask[0] = uint8(v)
mask[1] = 31
@ -2267,48 +2232,51 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
}
case 58: /* logical $andcon,[s],a */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
case 59: /* or/and $ucon,,r */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
r = int(p.Reg)
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */
case 60: /* tw to,a,b */
r = int(regoff(ctxt, &p.From) & 31)
r := int(regoff(ctxt, &p.From) & 31)
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
case 61: /* tw to,a,$simm */
r = int(regoff(ctxt, &p.From) & 31)
r := int(regoff(ctxt, &p.From) & 31)
v = regoff(ctxt, &p.To)
v := regoff(ctxt, &p.To)
o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(v))
case 62: /* rlwmi $sh,s,$mask,a */
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
var mask [2]uint8
maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
case 63: /* rlwmi b,s,$mask,a */
var mask [2]uint8
maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
case 64: /* mtfsf fr[, $m] {,fpcsr} */
var v int32
if p.From3.Type != obj.TYPE_NONE {
v = regoff(ctxt, &p.From3) & 255
} else {
@ -2323,6 +2291,8 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(regoff(ctxt, &p.From))&31)<<12
case 66: /* mov spr,r1; mov r1,spr, also dcr */
var r int
var v int32
if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
r = int(p.From.Reg)
v = int32(p.To.Reg)
@ -2351,13 +2321,14 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 68: /* mfcr rD; mfocrf CRM,rD */
if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
} else {
o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
}
case 69: /* mtcrf CRM,rS */
var v int32
if p.From3.Type != obj.TYPE_NONE {
if p.To.Reg != 0 {
ctxt.Diag("can't use both mask and CR(n)\n%v", p)
@ -2374,6 +2345,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
case 70: /* [f]cmp r,r,cr*/
var r int
if p.Reg == 0 {
r = 0
} else {
@ -2382,6 +2354,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
case 71: /* cmp[l] r,i,cr*/
var r int
if p.Reg == 0 {
r = 0
} else {
@ -2420,7 +2393,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
/* relocation operations */
case 74:
v = regoff(ctxt, &p.To)
v := regoff(ctxt, &p.To)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
@ -2429,7 +2402,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
//if(dlm) reloc(&p->to, p->pc, 1);
case 75:
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
@ -2437,7 +2410,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
//if(dlm) reloc(&p->from, p->pc, 1);
case 76:
v = regoff(ctxt, &p.From)
v := regoff(ctxt, &p.From)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
addaddrreloc(ctxt, p.From.Sym, &o1, &o2)

View file

@ -54,14 +54,9 @@ const (
var bigP *obj.Prog
func Pconv(p *obj.Prog) string {
var str string
var fp string
a := int(p.As)
var a int
a = int(p.As)
str = ""
str := ""
if a == obj.ADATA {
str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v",
p.Pc, p.Line(), Aconv(a), obj.Dconv(p, &p.From), p.From3.Offset, obj.Dconv(p, &p.To))
@ -107,23 +102,23 @@ func Pconv(p *obj.Prog) string {
}
if p.Spadj != 0 {
var fp string
fp += fmt.Sprintf("%s # spadj=%d", str, p.Spadj)
return fp
}
}
var fp string
fp += str
return fp
}
func Aconv(a int) string {
var s string
var fp string
s = "???"
s := "???"
if a >= obj.AXXX && a < ALAST {
s = Anames[a]
}
var fp string
fp += s
return fp
}
@ -177,13 +172,11 @@ func Rconv(r int) string {
}
func DRconv(a int) string {
var s string
var fp string
s = "C_??"
s := "C_??"
if a >= C_NONE && a <= C_NCLASS {
s = cnames9[a]
}
var fp string
fp += s
return fp
}

View file

@ -37,9 +37,6 @@ import (
)
func progedit(ctxt *obj.Link, p *obj.Prog) {
var literal string
var s *obj.LSym
p.From.Class = 0
p.To.Class = 0
@ -59,12 +56,10 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
switch p.As {
case AFMOVS:
if p.From.Type == obj.TYPE_FCONST {
var i32 uint32
var f32 float32
f32 = float32(p.From.U.Dval)
i32 = math.Float32bits(f32)
literal = fmt.Sprintf("$f32.%08x", i32)
s = obj.Linklookup(ctxt, literal, 0)
f32 := float32(p.From.U.Dval)
i32 := math.Float32bits(f32)
literal := fmt.Sprintf("$f32.%08x", i32)
s := obj.Linklookup(ctxt, literal, 0)
s.Size = 4
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
@ -74,10 +69,9 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
case AFMOVD:
if p.From.Type == obj.TYPE_FCONST {
var i64 uint64
i64 = math.Float64bits(p.From.U.Dval)
literal = fmt.Sprintf("$f64.%016x", i64)
s = obj.Linklookup(ctxt, literal, 0)
i64 := math.Float64bits(p.From.U.Dval)
literal := fmt.Sprintf("$f64.%016x", i64)
s := obj.Linklookup(ctxt, literal, 0)
s.Size = 8
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
@ -88,8 +82,8 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// Put >32-bit constants in memory and load them
case AMOVD:
if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && int64(int32(p.From.Offset)) != p.From.Offset {
literal = fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
s = obj.Linklookup(ctxt, literal, 0)
literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
s := obj.Linklookup(ctxt, literal, 0)
s.Size = 8
p.From.Type = obj.TYPE_MEM
p.From.Sym = s
@ -121,17 +115,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var q *obj.Prog
var p1 *obj.Prog
var p2 *obj.Prog
var q1 *obj.Prog
var o int
var mov int
var aoffset int
var textstksiz int64
var autosize int32
if ctxt.Symmorestack[0] == nil {
ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
@ -144,8 +127,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
return
}
p = cursym.Text
textstksiz = p.To.Offset
p := cursym.Text
textstksiz := p.To.Offset
cursym.Args = p.To.U.Argsize
cursym.Locals = int32(textstksiz)
@ -161,8 +144,9 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
obj.Bflush(ctxt.Bso)
q = nil
for p = cursym.Text; p != nil; p = p.Link {
q := (*obj.Prog)(nil)
var q1 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
switch p.As {
/* too hard, just leave alone */
case obj.ATEXT:
@ -326,8 +310,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
}
autosize = 0
for p = cursym.Text; p != nil; p = p.Link {
autosize := int32(0)
var aoffset int
var mov int
var o int
var p1 *obj.Prog
var p2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
o = int(p.As)
switch o {
case obj.ATEXT:
@ -643,9 +632,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
*/
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
// MOVD g_stackguard(g), R3
p = obj.Appendp(ctxt, p)
@ -659,7 +645,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R3
q = nil
q := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP < stackguard
// CMP stackguard, SP
@ -750,7 +736,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.
// q1: BLT done
p = obj.Appendp(ctxt, p)
q1 = p
q1 := p
p.As = ABLT
p.To.Type = obj.TYPE_BRANCH
@ -795,13 +781,10 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.
}
func follow(ctxt *obj.Link, s *obj.LSym) {
var firstp *obj.Prog
var lastp *obj.Prog
ctxt.Cursym = s
firstp = ctxt.NewProg()
lastp = firstp
firstp := ctxt.NewProg()
lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link

View file

@ -123,8 +123,6 @@ func Headstr(v int) string {
}
func Linknew(arch *LinkArch) *Link {
var buf string
linksetexp()
ctxt := new(Link)
@ -137,6 +135,7 @@ func Linknew(arch *LinkArch) *Link {
ctxt.Windows = 1
}
var buf string
buf, _ = os.Getwd()
if buf == "" {
buf = "/???"

File diff suppressed because it is too large Load diff

View file

@ -55,7 +55,6 @@ var bigP *obj.Prog
func Pconv(p *obj.Prog) string {
var str string
var fp string
switch p.As {
case obj.ADATA:
@ -84,6 +83,7 @@ func Pconv(p *obj.Prog) string {
}
}
var fp string
fp += str
return fp
}

View file

@ -49,10 +49,6 @@ func canuselocaltls(ctxt *obj.Link) bool {
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
var literal string
var s *obj.LSym
var q *obj.Prog
// Thread-local storage references use the TLS pseudo-register.
// As a register, TLS refers to the thread-local storage base, and it
// can only be loaded into another register:
@ -121,7 +117,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// MOVQ off(BX)(TLS*1), BX
// This allows the C compilers to emit references to m and g using the direct off(TLS) form.
if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
q = obj.Appendp(ctxt, p)
q := obj.Appendp(ctxt, p)
q.As = p.As
q.From = p.From
q.From.Type = obj.TYPE_MEM
@ -214,12 +210,10 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
ACOMISS,
AUCOMISS:
if p.From.Type == obj.TYPE_FCONST {
var i32 uint32
var f32 float32
f32 = float32(p.From.U.Dval)
i32 = math.Float32bits(f32)
literal = fmt.Sprintf("$f32.%08x", i32)
s = obj.Linklookup(ctxt, literal, 0)
f32 := float32(p.From.U.Dval)
i32 := math.Float32bits(f32)
literal := fmt.Sprintf("$f32.%08x", i32)
s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint32(ctxt, s, i32)
@ -262,10 +256,9 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
ACOMISD,
AUCOMISD:
if p.From.Type == obj.TYPE_FCONST {
var i64 uint64
i64 = math.Float64bits(p.From.U.Dval)
literal = fmt.Sprintf("$f64.%016x", i64)
s = obj.Linklookup(ctxt, literal, 0)
i64 := math.Float64bits(p.From.U.Dval)
literal := fmt.Sprintf("$f64.%016x", i64)
s := obj.Linklookup(ctxt, literal, 0)
if s.Type == 0 {
s.Type = obj.SRODATA
obj.Adduint64(ctxt, s, i64)
@ -315,17 +308,6 @@ func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog
var q *obj.Prog
var p1 *obj.Prog
var p2 *obj.Prog
var autoffset int32
var deltasp int32
var a int
var pcsize int
var bpsize int
var textarg int64
if ctxt.Tlsg == nil {
ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
}
@ -344,12 +326,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
return
}
p = cursym.Text
autoffset = int32(p.To.Offset)
p := cursym.Text
autoffset := int32(p.To.Offset)
if autoffset < 0 {
autoffset = 0
}
var bpsize int
if obj.Framepointer_enabled != 0 && autoffset > 0 {
// Make room for to save a base pointer. If autoffset == 0,
// this might do something special like a tail jump to
@ -362,12 +345,12 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
bpsize = 0
}
textarg = int64(p.To.U.Argsize)
textarg := int64(p.To.U.Argsize)
cursym.Args = int32(textarg)
cursym.Locals = int32(p.To.Offset)
if autoffset < obj.StackSmall && p.From3.Offset&obj.NOSPLIT == 0 {
for q = p; q != nil; q = q.Link {
for q := p; q != nil; q = q.Link {
if q.As == obj.ACALL {
goto noleaf
}
@ -380,7 +363,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
noleaf:
}
q = nil
q := (*obj.Prog)(nil)
if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
p = load_g_cx(ctxt, p) // load g into CX
@ -415,7 +398,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
if q != nil {
q.Pcond = p
}
deltasp = autoffset
deltasp := autoffset
if bpsize > 0 {
// Save caller's BP
@ -486,7 +469,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p = obj.Appendp(ctxt, p)
p.As = AJEQ
p.To.Type = obj.TYPE_BRANCH
p1 = p
p1 := p
p = obj.Appendp(ctxt, p)
p.As = ALEAQ
@ -517,7 +500,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p = obj.Appendp(ctxt, p)
p.As = AJNE
p.To.Type = obj.TYPE_BRANCH
p2 = p
p2 := p
p = obj.Appendp(ctxt, p)
p.As = AMOVQ
@ -573,6 +556,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p.As = ASTOSQ
}
var a int
var pcsize int
for ; p != nil; p = p.Link {
pcsize = int(p.Mode) / 8
a = int(p.From.Name)
@ -691,8 +676,6 @@ func indir_cx(ctxt *obj.Link, a *obj.Addr) {
// prologue (caller must call appendp first) and in the epilogue.
// Returns last new instruction.
func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
var next *obj.Prog
p.As = AMOVQ
if ctxt.Arch.Ptrsize == 4 {
p.As = AMOVL
@ -703,7 +686,7 @@ func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_CX
next = p.Link
next := p.Link
progedit(ctxt, p)
for p.Link != next {
p = p.Link
@ -723,17 +706,10 @@ func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
var cmp int
var lea int
var mov int
var sub int
cmp = ACMPQ
lea = ALEAQ
mov = AMOVQ
sub = ASUBQ
cmp := ACMPQ
lea := ALEAQ
mov := AMOVQ
sub := ASUBQ
if ctxt.Headtype == obj.Hnacl {
cmp = ACMPL
@ -742,7 +718,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noc
sub = ASUBL
}
q1 = nil
q1 := (*obj.Prog)(nil)
if framesize <= obj.StackSmall {
// small stack: SP <= stackguard
// CMPQ SP, stackguard
@ -845,7 +821,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noc
p.As = AJHI
p.To.Type = obj.TYPE_BRANCH
q = p
q := p
p = obj.Appendp(ctxt, p)
p.As = obj.ACALL
@ -873,13 +849,10 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noc
}
func follow(ctxt *obj.Link, s *obj.LSym) {
var firstp *obj.Prog
var lastp *obj.Prog
ctxt.Cursym = s
firstp = ctxt.NewProg()
lastp = firstp
firstp := ctxt.NewProg()
lastp := firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link