cmd/internal/gc: move cgen, regalloc, et al to portable code

This CL moves the bulk of the code that has been copy-and-pasted
since the initial 386 port back into a shared place, cutting 5 copies to 1.

The motivation here is not cleanup per se but instead to reduce the
cost of introducing changes in shared concepts like regalloc or general
expression evaluation. For example, a change after this one will
implement x.(*T) without a call into the runtime. This CL makes that
followup work 5x easier.

The single copy still has more special cases for architecture details
than I'd like, but having them called out explicitly like this at least
opens the door to generalizing the conditions and smoothing out
the distinctions in the future.

This is a LARGE CL. I started by trying to pull in one function at a time
in a sequence of CLs and it became clear that everything was so
interrelated that it had to be moved as a whole. Apologies for the size.

It is not clear how many more releases this code will matter for;
eventually it will be replaced by Keith's SSA work. But as noted above,
the deduplication was necessary to reduce the cost of working on
the current code while we have it.

Passes tests on amd64, 386, arm, and ppc64le.
Can build arm64 binaries but not tested there.
Being able to build binaries means it is probably very close.

Change-Id: I735977f04c0614f80215fb12966dfe9bbd1f5861
Reviewed-on: https://go-review.googlesource.com/7853
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
This commit is contained in:
Russ Cox 2015-03-18 17:26:36 -04:00
parent 11dba2ec2d
commit b115c35ee3
39 changed files with 3935 additions and 10636 deletions

File diff suppressed because it is too large Load diff

View file

@ -26,7 +26,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
var t1 gc.Node
if l.Addable == 0 {
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
gc.Cgen(l, &t1)
l = &t1
}
@ -42,11 +42,11 @@ func cgen64(n *gc.Node, res *gc.Node) {
var hi2 gc.Node
split64(res, &lo2, &hi2)
regalloc(&t1, lo1.Type, nil)
gc.Regalloc(&t1, lo1.Type, nil)
var al gc.Node
regalloc(&al, lo1.Type, nil)
gc.Regalloc(&al, lo1.Type, nil)
var ah gc.Node
regalloc(&ah, hi1.Type, nil)
gc.Regalloc(&ah, hi1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
@ -60,22 +60,22 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(arm.ASBC, &ah, &t1)
gins(arm.AMOVW, &t1, &hi2)
regfree(&t1)
regfree(&al)
regfree(&ah)
gc.Regfree(&t1)
gc.Regfree(&al)
gc.Regfree(&ah)
splitclean()
splitclean()
return
case gc.OCOM:
regalloc(&t1, lo1.Type, nil)
gc.Regalloc(&t1, lo1.Type, nil)
gmove(ncon(^uint32(0)), &t1)
var lo2 gc.Node
var hi2 gc.Node
split64(res, &lo2, &hi2)
var n1 gc.Node
regalloc(&n1, lo1.Type, nil)
gc.Regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &n1)
gins(arm.AEOR, &t1, &n1)
@ -85,8 +85,8 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &hi2)
regfree(&t1)
regfree(&n1)
gc.Regfree(&t1)
gc.Regfree(&n1)
splitclean()
splitclean()
return
@ -111,7 +111,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
if r != nil && r.Addable == 0 {
var t2 gc.Node
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
gc.Cgen(r, &t2)
r = &t2
}
@ -122,9 +122,9 @@ func cgen64(n *gc.Node, res *gc.Node) {
}
var al gc.Node
regalloc(&al, lo1.Type, nil)
gc.Regalloc(&al, lo1.Type, nil)
var ah gc.Node
regalloc(&ah, hi1.Type, nil)
gc.Regalloc(&ah, hi1.Type, nil)
// Do op. Leave result in ah:al.
switch n.Op {
@ -134,10 +134,10 @@ func cgen64(n *gc.Node, res *gc.Node) {
// TODO: Constants
case gc.OADD:
var bl gc.Node
regalloc(&bl, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
var bh gc.Node
regalloc(&bh, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi2, &bh)
@ -145,16 +145,16 @@ func cgen64(n *gc.Node, res *gc.Node) {
p1 := gins(arm.AADD, &bl, &al)
p1.Scond |= arm.C_SBIT
gins(arm.AADC, &bh, &ah)
regfree(&bl)
regfree(&bh)
gc.Regfree(&bl)
gc.Regfree(&bh)
// TODO: Constants.
case gc.OSUB:
var bl gc.Node
regalloc(&bl, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
var bh gc.Node
regalloc(&bh, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo2, &bl)
@ -162,20 +162,20 @@ func cgen64(n *gc.Node, res *gc.Node) {
p1 := gins(arm.ASUB, &bl, &al)
p1.Scond |= arm.C_SBIT
gins(arm.ASBC, &bh, &ah)
regfree(&bl)
regfree(&bh)
gc.Regfree(&bl)
gc.Regfree(&bh)
// TODO(kaib): this can be done with 4 regs and does not need 6
case gc.OMUL:
var bl gc.Node
regalloc(&bl, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&bl, gc.Types[gc.TPTR32], nil)
var bh gc.Node
regalloc(&bh, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&bh, gc.Types[gc.TPTR32], nil)
var cl gc.Node
regalloc(&cl, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&cl, gc.Types[gc.TPTR32], nil)
var ch gc.Node
regalloc(&ch, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&ch, gc.Types[gc.TPTR32], nil)
// load args into bh:bl and bh:bl.
gins(arm.AMOVW, &hi1, &bh)
@ -220,11 +220,11 @@ func cgen64(n *gc.Node, res *gc.Node) {
//print("%P\n", p1);
regfree(&bh)
gc.Regfree(&bh)
regfree(&bl)
regfree(&ch)
regfree(&cl)
gc.Regfree(&bl)
gc.Regfree(&ch)
gc.Regfree(&cl)
// We only rotate by a constant c in [0,64).
// if c >= 32:
@ -240,9 +240,9 @@ func cgen64(n *gc.Node, res *gc.Node) {
v := uint64(gc.Mpgetfix(r.Val.U.Xval))
var bl gc.Node
regalloc(&bl, lo1.Type, nil)
gc.Regalloc(&bl, lo1.Type, nil)
var bh gc.Node
regalloc(&bh, hi1.Type, nil)
gc.Regalloc(&bh, hi1.Type, nil)
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
v -= 32
@ -270,14 +270,14 @@ func cgen64(n *gc.Node, res *gc.Node) {
gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
}
regfree(&bl)
regfree(&bh)
gc.Regfree(&bl)
gc.Regfree(&bh)
case gc.OLSH:
var bl gc.Node
regalloc(&bl, lo1.Type, nil)
gc.Regalloc(&bl, lo1.Type, nil)
var bh gc.Node
regalloc(&bh, hi1.Type, nil)
gc.Regalloc(&bh, hi1.Type, nil)
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
@ -323,8 +323,8 @@ func cgen64(n *gc.Node, res *gc.Node) {
goto olsh_break
}
regalloc(&s, gc.Types[gc.TUINT32], nil)
regalloc(&creg, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) {
// shift is >= 1<<32
var cl gc.Node
@ -355,7 +355,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
gmove(&n1, &creg)
gcmp(arm.ACMP, &s, &creg)
gins(arm.ACMP, &s, &creg)
// MOVW.LO bl<<s, al
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
@ -392,7 +392,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
gmove(&n1, &creg)
gcmp(arm.ACMP, &s, &creg)
gins(arm.ACMP, &s, &creg)
// EOR.LO al, al
p1 = gins(arm.AEOR, &al, &al)
@ -427,18 +427,18 @@ func cgen64(n *gc.Node, res *gc.Node) {
gc.Patch(p3, gc.Pc)
gc.Patch(p4, gc.Pc)
gc.Patch(p5, gc.Pc)
regfree(&s)
regfree(&creg)
gc.Regfree(&s)
gc.Regfree(&creg)
olsh_break:
regfree(&bl)
regfree(&bh)
gc.Regfree(&bl)
gc.Regfree(&bh)
case gc.ORSH:
var bl gc.Node
regalloc(&bl, lo1.Type, nil)
gc.Regalloc(&bl, lo1.Type, nil)
var bh gc.Node
regalloc(&bh, hi1.Type, nil)
gc.Regalloc(&bh, hi1.Type, nil)
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
@ -507,8 +507,8 @@ func cgen64(n *gc.Node, res *gc.Node) {
goto orsh_break
}
regalloc(&s, gc.Types[gc.TUINT32], nil)
regalloc(&creg, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&s, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) {
// shift is >= 1<<32
var ch gc.Node
@ -546,7 +546,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
gmove(&n1, &creg)
gcmp(arm.ACMP, &s, &creg)
gins(arm.ACMP, &s, &creg)
// MOVW.LO bl>>s, al
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)
@ -591,7 +591,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
gmove(&n1, &creg)
gcmp(arm.ACMP, &s, &creg)
gins(arm.ACMP, &s, &creg)
// MOVW.LO creg>>1, creg
p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
@ -633,12 +633,12 @@ func cgen64(n *gc.Node, res *gc.Node) {
gc.Patch(p3, gc.Pc)
gc.Patch(p4, gc.Pc)
gc.Patch(p5, gc.Pc)
regfree(&s)
regfree(&creg)
gc.Regfree(&s)
gc.Regfree(&creg)
orsh_break:
regfree(&bl)
regfree(&bh)
gc.Regfree(&bl)
gc.Regfree(&bh)
// TODO(kaib): literal optimizations
// make constant the right side (it usually is anyway).
@ -736,7 +736,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gc.OAND,
gc.OOR:
var n1 gc.Node
regalloc(&n1, lo1.Type, nil)
gc.Regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
@ -744,7 +744,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(optoas(int(n.Op), lo1.Type), &n1, &al)
gins(arm.AMOVW, &hi2, &n1)
gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
regfree(&n1)
gc.Regfree(&n1)
}
if gc.Is64(r.Type) {
@ -758,9 +758,9 @@ func cgen64(n *gc.Node, res *gc.Node) {
splitclean()
//out:
regfree(&al)
gc.Regfree(&al)
regfree(&ah)
gc.Regfree(&ah)
}
/*
@ -782,13 +782,13 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
// if they differ, we're done.
t := hi1.Type
regalloc(&r1, gc.Types[gc.TINT32], nil)
regalloc(&r2, gc.Types[gc.TINT32], nil)
gc.Regalloc(&r1, gc.Types[gc.TINT32], nil)
gc.Regalloc(&r2, gc.Types[gc.TINT32], nil)
gins(arm.AMOVW, &hi1, &r1)
gins(arm.AMOVW, &hi2, &r2)
gcmp(arm.ACMP, &r1, &r2)
regfree(&r1)
regfree(&r2)
gins(arm.ACMP, &r1, &r2)
gc.Regfree(&r1)
gc.Regfree(&r2)
var br *obj.Prog
switch op {
@ -838,13 +838,13 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
// compare least significant word
t = lo1.Type
regalloc(&r1, gc.Types[gc.TINT32], nil)
regalloc(&r2, gc.Types[gc.TINT32], nil)
gc.Regalloc(&r1, gc.Types[gc.TINT32], nil)
gc.Regalloc(&r2, gc.Types[gc.TINT32], nil)
gins(arm.AMOVW, &lo1, &r1)
gins(arm.AMOVW, &lo2, &r2)
gcmp(arm.ACMP, &r1, &r2)
regfree(&r1)
regfree(&r2)
gins(arm.ACMP, &r1, &r2)
gc.Regfree(&r1)
gc.Regfree(&r2)
// jump again
gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)

View file

@ -45,33 +45,40 @@ func main() {
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = arm.REGSP
gc.Thearch.REGCTXT = arm.REGCTXT
gc.Thearch.REGCALLX = arm.REG_R1
gc.Thearch.REGCALLX2 = arm.REG_R2
gc.Thearch.REGRETURN = arm.REG_R0
gc.Thearch.REGMIN = arm.REG_R0
gc.Thearch.REGMAX = arm.REGEXT
gc.Thearch.FREGMIN = arm.REG_F0
gc.Thearch.FREGMAX = arm.FREGEXT
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.ReservedRegs = resvd
gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Bgen = bgen
gc.Thearch.Cgen = cgen
gc.Thearch.Cgen_call = cgen_call
gc.Thearch.Cgen_callinter = cgen_callinter
gc.Thearch.Cgen_ret = cgen_ret
gc.Thearch.Cgen64 = cgen64
gc.Thearch.Cgen_hmul = cgen_hmul
gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Cmp64 = cmp64
gc.Thearch.Defframe = defframe
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Gclean = gclean
gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
gc.Thearch.Ginscall = ginscall
gc.Thearch.Ginscon = ginscon
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
gc.Thearch.Igen = igen
gc.Thearch.Cgenindex = cgenindex
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
gc.Thearch.Regalloc = regalloc
gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Stackcopy = stackcopy
gc.Thearch.Sudoaddable = sudoaddable
gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB

View file

@ -1,32 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "cmd/internal/obj/arm"
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
const (
REGALLOC_R0 = arm.REG_R0
REGALLOC_RMAX = arm.REGEXT
REGALLOC_F0 = arm.REG_F0
REGALLOC_FMAX = arm.FREGEXT
)
var reg [REGALLOC_FMAX + 1]uint8
/*
* cgen
*/
/*
* list.c
*/
/*
* reg.c
*/

View file

@ -114,324 +114,6 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int
return q
}
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
if f.Type != nil {
extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the BL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction before that return PC.
// To avoid that instruction being an unrelated instruction,
// insert a NOP so that we will have the right line number.
// ARM NOP 0x00000000 is really AND.EQ R0, R0, R0.
// Use the latter form because the NOP pseudo-instruction
// would be removed by the linker.
var r gc.Node
gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
p := gins(arm.AAND, &r, &r)
p.Scond = arm.C_SCOND_EQ
}
p := gins(arm.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
}
var r gc.Node
gc.Nodreg(&r, gc.Types[gc.Tptr], arm.REG_R7)
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], arm.REG_R1)
gmove(f, &r)
r.Op = gc.OINDREG
gmove(&r, &r1)
r.Op = gc.OREGISTER
r1.Op = gc.OINDREG
gins(arm.ABL, &r, &r1)
case 3: // normal call of c function pointer
gins(arm.ABL, nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
var r gc.Node
regalloc(&r, gc.Types[gc.Tptr], nil)
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
gins(arm.AMOVW, &con, &r)
p := gins(arm.AMOVW, &r, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REGSP
p.To.Offset = 4
gins(arm.AMOVW, f, &r)
p = gins(arm.AMOVW, &r, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REGSP
p.To.Offset = 8
regfree(&r)
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
gc.Nodconst(&con, gc.Types[gc.TINT32], 0)
p := gins(arm.ACMP, &con, nil)
p.Reg = arm.REG_R0
p = gc.Gbranch(arm.ABEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
}
}
/*
* n is call to interface method.
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
i = i.Left // interface
// Release res register during genlist and cgen,
// which might have their own function calls.
r := -1
if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
r = int(res.Val.U.Reg)
reg[r]--
}
if i.Addable == 0 {
var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
}
gc.Genlist(n.List) // args
if r >= 0 {
reg[r]++
}
var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], res)
var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], &nodr)
nodo.Op = gc.OINDREG
agen(i, &nodr) // REG = &inter
var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm.REGSP)
nodsp.Xoffset = int64(gc.Widthptr)
if proc != 0 {
nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
}
nodo.Xoffset += int64(gc.Widthptr)
cgen(&nodo, &nodsp) // {4 or 12}(SP) = 4(REG) -- i.data
nodo.Xoffset -= int64(gc.Widthptr)
cgen(&nodo, &nodr) // REG = 0(REG) -- i.tab
gc.Cgen_checknil(&nodr) // in case offset is huge
nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
if proc == 0 {
// plain call: use direct c function pointer - more efficient
cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
nodr.Op = gc.OINDREG
proc = 3
} else {
// go/defer. generate go func value.
p := gins(arm.AMOVW, &nodo, &nodr)
p.From.Type = obj.TYPE_ADDR // REG = &(20+offset(REG)) -- i.tab->fun[f]
}
nodr.Type = n.Left.Type
ginscall(&nodr, proc)
regfree(&nodr)
regfree(&nodo)
}
/*
* generate function call;
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
if n == nil {
return
}
var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
gc.Tempname(&afun, gc.Types[gc.Tptr])
cgen(n.Left, &afun)
}
gc.Genlist(n.List) // assign the args
t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
}
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
var nod gc.Node
nod.Op = gc.OINDREG
nod.Val.U.Reg = arm.REGSP
nod.Addable = 1
nod.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
nod.Type = fp.Type
gc.Cgen_as(res, &nod)
}
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if gc.Isptr[t.Etype] {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
var nod1 gc.Node
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = arm.REGSP
nod1.Addable = 1
nod1.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
agen(&nod1, &nod2)
gins(arm.AMOVW, &nod2, res)
regfree(&nod2)
} else {
agen(&nod1, res)
}
}
/*
* generate return.
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
if n != nil {
gc.Genlist(n.List) // copy out args
}
if gc.Hasdefer != 0 {
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Name = obj.NAME_EXTERN
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(n.Left.Sym)
}
}
/*
* generate high multiply
* res = (nl * nr) >> wordsize
@ -446,11 +128,11 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := nl.Type
w := int(t.Width * 8)
var n1 gc.Node
regalloc(&n1, t, res)
cgen(nl, &n1)
gc.Regalloc(&n1, t, res)
gc.Cgen(nl, &n1)
var n2 gc.Node
regalloc(&n2, t, nil)
cgen(nr, &n2)
gc.Regalloc(&n2, t, nil)
gc.Cgen(nr, &n2)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16:
@ -483,9 +165,9 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
}
cgen(&n1, res)
regfree(&n1)
regfree(&n2)
gc.Cgen(&n1, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
/*
@ -503,31 +185,31 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if op == gc.OLROT {
v := int(gc.Mpgetfix(nr.Val.U.Xval))
var n1 gc.Node
regalloc(&n1, nl.Type, res)
gc.Regalloc(&n1, nl.Type, res)
if w == 32 {
cgen(nl, &n1)
gc.Cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
var n2 gc.Node
regalloc(&n2, nl.Type, nil)
cgen(nl, &n2)
gc.Regalloc(&n2, nl.Type, nil)
gc.Cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
regfree(&n2)
gc.Regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
regfree(&n1)
gc.Regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc == 0 {
} else // nothing to do
@ -551,7 +233,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
regfree(&n1)
gc.Regfree(&n1)
return
}
@ -564,21 +246,21 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var nt gc.Node
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &nt)
n1 = nt
} else {
cgen(nr, &nt)
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
gc.Cgen(nr, &nt)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
var hi gc.Node
var lo gc.Node
split64(&nt, &lo, &hi)
regalloc(&n1, gc.Types[gc.TUINT32], nil)
regalloc(&n3, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil)
gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
@ -587,18 +269,18 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
p1 := gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
regfree(&n3)
gc.Regfree(&n3)
} else {
if nl.Ullman >= nr.Ullman {
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
regalloc(&n1, nr.Type, nil)
cgen(nr, &n1)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
gc.Regalloc(&n1, nr.Type, nil)
gc.Cgen(nr, &n1)
} else {
regalloc(&n1, nr.Type, nil)
cgen(nr, &n1)
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
gc.Regalloc(&n1, nr.Type, nil)
gc.Cgen(nr, &n1)
gc.Regalloc(&n2, nl.Type, res)
gc.Cgen(nl, &n2)
}
}
@ -609,11 +291,11 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// test and fix up large shifts
// TODO: if(!bounded), don't emit some of this.
regalloc(&n3, tr, nil)
gc.Regalloc(&n3, tr, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
gmove(&t, &n3)
gcmp(arm.ACMP, &n1, &n3)
gins(arm.ACMP, &n1, &n3)
if op == gc.ORSH {
var p1 *obj.Prog
var p2 *obj.Prog
@ -634,7 +316,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
p2.Scond = arm.C_SCOND_LO
}
regfree(&n3)
gc.Regfree(&n3)
gc.Patch(p3, gc.Pc)
@ -644,8 +326,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
gmove(&n2, res)
regfree(&n1)
regfree(&n2)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
func clearfat(nl *gc.Node) {
@ -667,22 +349,22 @@ func clearfat(nl *gc.Node) {
var r0 gc.Node
r0.Op = gc.OREGISTER
r0.Val.U.Reg = REGALLOC_R0
r0.Val.U.Reg = arm.REG_R0
var r1 gc.Node
r1.Op = gc.OREGISTER
r1.Val.U.Reg = REGALLOC_R0 + 1
r1.Val.U.Reg = arm.REG_R1
var dst gc.Node
regalloc(&dst, gc.Types[gc.Tptr], &r1)
agen(nl, &dst)
gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1)
gc.Agen(nl, &dst)
var nc gc.Node
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
var nz gc.Node
regalloc(&nz, gc.Types[gc.TUINT32], &r0)
cgen(&nc, &nz)
gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0)
gc.Cgen(&nc, &nz)
if q > 128 {
var end gc.Node
regalloc(&end, gc.Types[gc.Tptr], nil)
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p := gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
@ -697,7 +379,7 @@ func clearfat(nl *gc.Node) {
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
regfree(&end)
gc.Regfree(&end)
} else if q >= 4 && !gc.Nacl {
f := gc.Sysfunc("duffzero")
p := gins(obj.ADUFFZERO, nil, f)
@ -729,8 +411,8 @@ func clearfat(nl *gc.Node) {
c--
}
regfree(&dst)
regfree(&nz)
gc.Regfree(&dst)
gc.Regfree(&nz)
}
// Called after regopt and peep have run.
@ -775,3 +457,40 @@ func expandchecks(firstp *obj.Prog) {
p.Reg = int16(reg)
}
}
func ginsnop() {
var r gc.Node
gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
p := gins(arm.AAND, &r, &r)
p.Scond = arm.C_SCOND_EQ
}
/*
* generate
* as $c, n
*/
func ginscon(as int, c int64, n *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
var n2 gc.Node
gc.Regalloc(&n2, gc.Types[gc.TINT32], nil)
gmove(&n1, &n2)
gins(as, &n2, n)
gc.Regfree(&n2)
}
// addr += index*width if possible.
func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
switch width {
case 2:
gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr)
return true
case 4:
gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr)
return true
case 8:
gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr)
return true
}
return false
}

View file

@ -43,185 +43,8 @@ import (
var unmappedzero int = 4096
var resvd = []int{
9, // reserved for m
10, // reserved for g
arm.REGSP, // reserved for SP
}
func ginit() {
for i := 0; i < len(reg); i++ {
reg[i] = 0
}
for i := 0; i < len(resvd); i++ {
reg[resvd[i]]++
}
}
func gclean() {
for i := 0; i < len(resvd); i++ {
reg[resvd[i]]--
}
for i := 0; i < len(reg); i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated\n", obj.Rconv(i))
}
}
}
func anyregalloc() bool {
var j int
for i := 0; i < len(reg); i++ {
if reg[i] == 0 {
goto ok
}
for j = 0; j < len(resvd); j++ {
if resvd[j] == i {
goto ok
}
}
return true
ok:
}
return false
}
var regpc [REGALLOC_FMAX + 1]uint32
/*
* allocate register of type t, leave in n.
* if o != N, o is desired fixed register.
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
if false && gc.Debug['r'] != 0 {
fixfree := 0
for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
if reg[i] == 0 {
fixfree++
}
}
floatfree := 0
for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
if reg[i] == 0 {
floatfree++
}
}
fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree)
}
if t == nil {
gc.Fatal("regalloc: t nil")
}
et := int(gc.Simtype[t.Etype])
if gc.Is64(t) {
gc.Fatal("regalloc: 64 bit type %v")
}
var i int
switch et {
case gc.TINT8,
gc.TUINT8,
gc.TINT16,
gc.TUINT16,
gc.TINT32,
gc.TUINT32,
gc.TPTR32,
gc.TBOOL:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= REGALLOC_R0 && i <= REGALLOC_RMAX {
goto out
}
}
for i = REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
if reg[i] == 0 {
regpc[i] = uint32(obj.Getcallerpc(&n))
goto out
}
}
fmt.Printf("registers allocated at\n")
for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
fmt.Printf("%d %p\n", i, regpc[i])
}
gc.Fatal("out of fixed registers")
goto err
case gc.TFLOAT32,
gc.TFLOAT64:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= REGALLOC_F0 && i <= REGALLOC_FMAX {
goto out
}
}
for i = REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
if reg[i] == 0 {
goto out
}
}
gc.Fatal("out of floating point registers")
goto err
case gc.TCOMPLEX64,
gc.TCOMPLEX128:
gc.Tempname(n, t)
return
}
gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))
err:
gc.Nodreg(n, t, arm.REG_R0)
return
out:
reg[i]++
gc.Nodreg(n, t, i)
}
func regfree(n *gc.Node) {
if false && gc.Debug['r'] != 0 {
fixfree := 0
for i := REGALLOC_R0; i <= REGALLOC_RMAX; i++ {
if reg[i] == 0 {
fixfree++
}
}
floatfree := 0
for i := REGALLOC_F0; i <= REGALLOC_FMAX; i++ {
if reg[i] == 0 {
floatfree++
}
}
fmt.Printf("regalloc fix %d float %d\n", fixfree, floatfree)
}
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
i := int(n.Val.U.Reg)
if i == arm.REGSP {
return
}
if i < 0 || i >= len(reg) || i >= len(regpc) {
gc.Fatal("regfree: reg out of range")
}
if reg[i] <= 0 {
gc.Fatal("regfree: reg %v not allocated", obj.Rconv(i))
}
reg[i]--
if reg[i] == 0 {
regpc[i] = 0
}
arm.REG_R9, // formerly reserved for m; might be okay to reuse now; not sure about NaCl
arm.REG_R10, // reserved for g
}
/*
@ -262,7 +85,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
igen(n, &n1, nil)
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
@ -271,7 +94,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
cgen(n.Heapaddr, &n1)
gc.Cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
@ -311,7 +134,7 @@ func splitclean() {
}
nsclean--
if sclean[nsclean].Op != gc.OEMPTY {
regfree(&sclean[nsclean])
gc.Regfree(&sclean[nsclean])
}
}
@ -349,10 +172,10 @@ func gmove(f *gc.Node, t *gc.Node) {
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TINT32], &f.Val)
var r1 gc.Node
regalloc(&r1, con.Type, t)
gc.Regalloc(&r1, con.Type, t)
gins(arm.AMOVW, &con, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
case gc.TUINT16,
@ -360,10 +183,10 @@ func gmove(f *gc.Node, t *gc.Node) {
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TUINT32], &f.Val)
var r1 gc.Node
regalloc(&r1, con.Type, t)
gc.Regalloc(&r1, con.Type, t)
gins(arm.AMOVW, &con, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
@ -481,10 +304,10 @@ func gmove(f *gc.Node, t *gc.Node) {
split64(f, &flo, &fhi)
var r1 gc.Node
regalloc(&r1, t.Type, nil)
gc.Regalloc(&r1, t.Type, nil)
gins(arm.AMOVW, &flo, &r1)
gins(arm.AMOVW, &r1, t)
regfree(&r1)
gc.Regfree(&r1)
splitclean()
return
@ -500,15 +323,15 @@ func gmove(f *gc.Node, t *gc.Node) {
var thi gc.Node
split64(t, &tlo, &thi)
var r1 gc.Node
regalloc(&r1, flo.Type, nil)
gc.Regalloc(&r1, flo.Type, nil)
var r2 gc.Node
regalloc(&r2, fhi.Type, nil)
gc.Regalloc(&r2, fhi.Type, nil)
gins(arm.AMOVW, &flo, &r1)
gins(arm.AMOVW, &fhi, &r2)
gins(arm.AMOVW, &r1, &tlo)
gins(arm.AMOVW, &r2, &thi)
regfree(&r1)
regfree(&r2)
gc.Regfree(&r1)
gc.Regfree(&r2)
splitclean()
splitclean()
return
@ -575,9 +398,9 @@ func gmove(f *gc.Node, t *gc.Node) {
split64(t, &tlo, &thi)
var r1 gc.Node
regalloc(&r1, tlo.Type, nil)
gc.Regalloc(&r1, tlo.Type, nil)
var r2 gc.Node
regalloc(&r2, thi.Type, nil)
gc.Regalloc(&r2, thi.Type, nil)
gmove(f, &r1)
p1 := gins(arm.AMOVW, &r1, &r2)
p1.From.Type = obj.TYPE_SHIFT
@ -588,8 +411,8 @@ func gmove(f *gc.Node, t *gc.Node) {
gins(arm.AMOVW, &r1, &tlo)
gins(arm.AMOVW, &r2, &thi)
regfree(&r1)
regfree(&r2)
gc.Regfree(&r1)
gc.Regfree(&r2)
splitclean()
return
@ -601,10 +424,10 @@ func gmove(f *gc.Node, t *gc.Node) {
gmove(f, &tlo)
var r1 gc.Node
regalloc(&r1, thi.Type, nil)
gc.Regalloc(&r1, thi.Type, nil)
gins(arm.AMOVW, ncon(0), &r1)
gins(arm.AMOVW, &r1, &thi)
regfree(&r1)
gc.Regfree(&r1)
splitclean()
return
@ -651,9 +474,9 @@ func gmove(f *gc.Node, t *gc.Node) {
}
var r1 gc.Node
regalloc(&r1, gc.Types[ft], f)
gc.Regalloc(&r1, gc.Types[ft], f)
var r2 gc.Node
regalloc(&r2, gc.Types[tt], t)
gc.Regalloc(&r2, gc.Types[tt], t)
gins(fa, f, &r1) // load to fpu
p1 := gins(a, &r1, &r1) // convert to w
switch tt {
@ -665,8 +488,8 @@ func gmove(f *gc.Node, t *gc.Node) {
gins(arm.AMOVW, &r1, &r2) // copy to cpu
gins(ta, &r2, t) // store
regfree(&r1)
regfree(&r2)
gc.Regfree(&r1)
gc.Regfree(&r2)
return
/*
@ -708,9 +531,9 @@ func gmove(f *gc.Node, t *gc.Node) {
}
var r1 gc.Node
regalloc(&r1, gc.Types[ft], f)
gc.Regalloc(&r1, gc.Types[ft], f)
var r2 gc.Node
regalloc(&r2, gc.Types[tt], t)
gc.Regalloc(&r2, gc.Types[tt], t)
gins(fa, f, &r1) // load to cpu
gins(arm.AMOVW, &r1, &r2) // copy to fpu
p1 := gins(a, &r2, &r2) // convert
@ -722,8 +545,8 @@ func gmove(f *gc.Node, t *gc.Node) {
}
gins(ta, &r2, t) // store
regfree(&r1)
regfree(&r2)
gc.Regfree(&r1)
gc.Regfree(&r2)
return
case gc.TUINT64<<16 | gc.TFLOAT32,
@ -742,20 +565,20 @@ func gmove(f *gc.Node, t *gc.Node) {
case gc.TFLOAT32<<16 | gc.TFLOAT64:
var r1 gc.Node
regalloc(&r1, gc.Types[gc.TFLOAT64], t)
gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t)
gins(arm.AMOVF, f, &r1)
gins(arm.AMOVFD, &r1, &r1)
gins(arm.AMOVD, &r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
case gc.TFLOAT64<<16 | gc.TFLOAT32:
var r1 gc.Node
regalloc(&r1, gc.Types[gc.TFLOAT64], t)
gc.Regalloc(&r1, gc.Types[gc.TFLOAT64], t)
gins(arm.AMOVD, f, &r1)
gins(arm.AMOVDF, &r1, &r1)
gins(arm.AMOVF, &r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
@ -767,21 +590,21 @@ func gmove(f *gc.Node, t *gc.Node) {
// requires register destination
rdst:
{
regalloc(&r1, t.Type, t)
gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
regalloc(&r1, cvt, t)
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
// truncate 64 bit integer
@ -790,10 +613,10 @@ trunc64:
var flo gc.Node
split64(f, &flo, &fhi)
regalloc(&r1, t.Type, nil)
gc.Regalloc(&r1, t.Type, nil)
gins(a, &flo, &r1)
gins(a, &r1, t)
regfree(&r1)
gc.Regfree(&r1)
splitclean()
return
}
@ -826,27 +649,67 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
gc.Fatal("gins OINDEX not implemented")
}
// regalloc(&nod, &regnode, Z);
// gc.Regalloc(&nod, &regnode, Z);
// v = constnode.vconst;
// cgen(f->right, &nod);
// gc.Cgen(f->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
// regfree(&nod);
// gc.Regfree(&nod);
if t != nil && t.Op == gc.OINDEX {
gc.Fatal("gins OINDEX not implemented")
}
// regalloc(&nod, &regnode, Z);
// gc.Regalloc(&nod, &regnode, Z);
// v = constnode.vconst;
// cgen(t->right, &nod);
// gc.Cgen(t->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
// regfree(&nod);
// gc.Regfree(&nod);
p := gc.Prog(as)
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
switch as {
case arm.ABL:
if p.To.Type == obj.TYPE_REG {
p.To.Type = obj.TYPE_MEM
}
case arm.ACMP, arm.ACMPF, arm.ACMPD:
if t != nil {
if f.Op != gc.OREGISTER {
/* generate a comparison
TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
*/
gc.Fatal("bad operands to gcmp")
}
p.From = p.To
p.To = obj.Addr{}
raddr(f, p)
}
case arm.AMULU:
if f != nil && f.Op != gc.OREGISTER {
gc.Fatal("bad operands to mul")
}
case arm.AMOVW:
if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR || p.From.Type == obj.TYPE_CONST) && (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) {
gc.Fatal("gins double memory")
}
case arm.AADD:
if p.To.Type == obj.TYPE_MEM {
gc.Fatal("gins arith to mem")
}
case arm.ARSB:
if p.From.Type == obj.TYPE_NONE {
gc.Fatal("rsb with no from")
}
}
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
@ -871,19 +734,6 @@ func raddr(n *gc.Node, p *obj.Prog) {
}
}
/* generate a comparison
TODO(kaib): one of the args can actually be a small constant. relax the constraint and fix call sites.
*/
func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
if lhs.Op != gc.OREGISTER {
gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
}
p := gins(as, rhs, nil)
raddr(lhs, p)
return p
}
/* generate a constant shift
* arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
*/
@ -1033,6 +883,10 @@ func optoas(op int, t *gc.Type) int {
case gc.OCMP<<16 | gc.TFLOAT64:
a = arm.ACMPD
case gc.OPS<<16 | gc.TFLOAT32,
gc.OPS<<16 | gc.TFLOAT64:
a = arm.ABVS
case gc.OAS<<16 | gc.TBOOL:
a = arm.AMOVB
@ -1217,10 +1071,10 @@ var cleani int = 0
func sudoclean() {
if clean[cleani-1].Op != gc.OEMPTY {
regfree(&clean[cleani-1])
gc.Regfree(&clean[cleani-1])
}
if clean[cleani-2].Op != gc.OEMPTY {
regfree(&clean[cleani-2])
gc.Regfree(&clean[cleani-2])
}
cleani -= 2
}
@ -1254,7 +1108,7 @@ func dotaddable(n *gc.Node, n1 *gc.Node) bool {
* after successful sudoaddable,
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
@ -1322,14 +1176,14 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
return true
}
regalloc(reg, gc.Types[gc.Tptr], nil)
gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
agen(nn, reg)
gc.Agen(nn, reg)
n1.Xoffset = oary[0]
} else {
cgen(nn, reg)
gc.Cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}

View file

@ -1330,10 +1330,10 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
// R1 is ptr to memory, used and set, cannot be substituted.
case obj.ADUFFZERO:
if v.Type == obj.TYPE_REG {
if v.Reg == REGALLOC_R0 {
if v.Reg == arm.REG_R0 {
return 1
}
if v.Reg == REGALLOC_R0+1 {
if v.Reg == arm.REG_R0+1 {
return 2
}
}
@ -1344,10 +1344,10 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
// R1, R2 areptr to src, dst, used and set, cannot be substituted.
case obj.ADUFFCOPY:
if v.Type == obj.TYPE_REG {
if v.Reg == REGALLOC_R0 {
if v.Reg == arm.REG_R0 {
return 3
}
if v.Reg == REGALLOC_R0+1 || v.Reg == REGALLOC_R0+2 {
if v.Reg == arm.REG_R0+1 || v.Reg == arm.REG_R0+2 {
return 2
}
}

File diff suppressed because it is too large Load diff

View file

@ -64,39 +64,52 @@ func betypeinit() {
}
func main() {
if obj.Getgoos() == "nacl" {
resvd = append(resvd, x86.REG_BP, x86.REG_SI)
} else if obj.Framepointer_enabled != 0 {
resvd = append(resvd, x86.REG_BP)
}
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = x86.REGSP
gc.Thearch.REGCTXT = x86.REGCTXT
gc.Thearch.REGCALLX = x86.REG_BX
gc.Thearch.REGCALLX2 = x86.REG_AX
gc.Thearch.REGRETURN = x86.REG_AX
gc.Thearch.REGMIN = x86.REG_AX
gc.Thearch.REGMAX = x86.REG_R15
gc.Thearch.FREGMIN = x86.REG_X0
gc.Thearch.FREGMAX = x86.REG_X15
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.ReservedRegs = resvd
gc.Thearch.AddIndex = addindex
gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Bgen = bgen
gc.Thearch.Cgen = cgen
gc.Thearch.Cgen_call = cgen_call
gc.Thearch.Cgen_callinter = cgen_callinter
gc.Thearch.Cgen_ret = cgen_ret
gc.Thearch.Cgen_bmul = cgen_bmul
gc.Thearch.Cgen_hmul = cgen_hmul
gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
gc.Thearch.Dodiv = dodiv
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Gclean = gclean
gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
gc.Thearch.Ginscall = ginscall
gc.Thearch.Ginscon = ginscon
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
gc.Thearch.Regalloc = regalloc
gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Stackcopy = stackcopy
gc.Thearch.Sudoaddable = sudoaddable
gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = FtoB

View file

@ -116,326 +116,6 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int
return q
}
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
if f.Type != nil {
extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an x86 NOP that we will have the right line number.
// x86 NOP 0x90 is really XCHG AX, AX; use that description
// because the NOP pseudo-instruction would be removed by
// the linker.
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
gins(x86.AXCHGL, &reg, &reg)
}
p := gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
}
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.Tptr], x86.REG_DX)
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
gmove(f, &reg)
reg.Op = gc.OINDREG
gmove(&reg, &r1)
reg.Op = gc.OREGISTER
gins(obj.ACALL, &reg, &r1)
case 3: // normal call of c function pointer
gins(obj.ACALL, nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
var stk gc.Node
stk.Op = gc.OINDREG
stk.Val.U.Reg = x86.REG_SP
stk.Xoffset = 0
var reg gc.Node
if gc.Widthptr == 8 {
// size of arguments at 0(SP)
ginscon(x86.AMOVQ, int64(gc.Argsize(f.Type)), &stk)
// FuncVal* at 8(SP)
stk.Xoffset = int64(gc.Widthptr)
gc.Nodreg(&reg, gc.Types[gc.TINT64], x86.REG_AX)
gmove(f, &reg)
gins(x86.AMOVQ, &reg, &stk)
} else {
// size of arguments at 0(SP)
ginscon(x86.AMOVL, int64(gc.Argsize(f.Type)), &stk)
// FuncVal* at 4(SP)
stk.Xoffset = int64(gc.Widthptr)
gc.Nodreg(&reg, gc.Types[gc.TINT32], x86.REG_AX)
gmove(f, &reg)
gins(x86.AMOVL, &reg, &stk)
}
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
gc.Nodreg(&reg, gc.Types[gc.TINT32], x86.REG_AX)
gins(x86.ATESTL, &reg, &reg)
p := gc.Gbranch(x86.AJEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
}
}
/*
* n is call to interface method.
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
i = i.Left // interface
if i.Addable == 0 {
var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
}
gc.Genlist(n.List) // assign the args
// i is now addable, prepare an indirected
// register to hold its address.
var nodi gc.Node
igen(i, &nodi, res) // REG = &inter
var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
nodsp.Xoffset = 0
if proc != 0 {
nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
}
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset += int64(gc.Widthptr)
cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], res)
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset -= int64(gc.Widthptr)
cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
regfree(&nodi)
var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
if n.Left.Xoffset == gc.BADWIDTH {
gc.Fatal("cgen_callinter: badwidth")
}
gc.Cgen_checknil(&nodo) // in case offset is huge
nodo.Op = gc.OINDREG
nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
if proc == 0 {
// plain call: use direct c function pointer - more efficient
cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
proc = 3
} else {
// go/defer. generate go func value.
gins(x86.ALEAQ, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
}
nodr.Type = n.Left.Type
ginscall(&nodr, proc)
regfree(&nodr)
regfree(&nodo)
}
/*
* generate function call;
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
if n == nil {
return
}
var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
gc.Tempname(&afun, gc.Types[gc.Tptr])
cgen(n.Left, &afun)
}
gc.Genlist(n.List) // assign the args
t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
}
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
var nod gc.Node
nod.Op = gc.OINDREG
nod.Val.U.Reg = x86.REG_SP
nod.Addable = 1
nod.Xoffset = fp.Width
nod.Type = fp.Type
gc.Cgen_as(res, &nod)
}
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if gc.Isptr[t.Etype] {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
var nod1 gc.Node
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = x86.REG_SP
nod1.Addable = 1
nod1.Xoffset = fp.Width
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
gins(leaptr, &nod1, &nod2)
gins(movptr, &nod2, res)
regfree(&nod2)
} else {
gins(leaptr, &nod1, res)
}
}
/*
* generate return.
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
if n != nil {
gc.Genlist(n.List) // copy out args
}
if gc.Hasdefer != 0 {
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(n.Left.Sym)
}
}
/*
* generate division.
* generates one of:
@ -477,19 +157,19 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(op, t)
var n3 gc.Node
regalloc(&n3, t0, nil)
gc.Regalloc(&n3, t0, nil)
var ax gc.Node
var oldax gc.Node
if nl.Ullman >= nr.Ullman {
savex(x86.REG_AX, &ax, &oldax, res, t0)
cgen(nl, &ax)
regalloc(&ax, t0, &ax) // mark ax live during cgen
cgen(nr, &n3)
regfree(&ax)
gc.Cgen(nl, &ax)
gc.Regalloc(&ax, t0, &ax) // mark ax live during cgen
gc.Cgen(nr, &n3)
gc.Regfree(&ax)
} else {
cgen(nr, &n3)
gc.Cgen(nr, &n3)
savex(x86.REG_AX, &ax, &oldax, res, t0)
cgen(nl, &ax)
gc.Cgen(nl, &ax)
}
if t != t0 {
@ -515,7 +195,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
ginscall(panicdiv, -1)
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
@ -550,7 +230,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gins(optoas(gc.OEXTEND, t), nil, nil)
}
gins(a, &n3, nil)
regfree(&n3)
gc.Regfree(&n3)
if op == gc.ODIV {
gmove(&ax, res)
} else {
@ -582,7 +262,7 @@ func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
gc.Nodreg(x, t, dr)
if r > 1 && !gc.Samereg(x, res) {
regalloc(oldx, gc.Types[gc.TINT64], nil)
gc.Regalloc(oldx, gc.Types[gc.TINT64], nil)
x.Type = gc.Types[gc.TINT64]
gmove(x, oldx)
x.Type = t
@ -596,153 +276,10 @@ func restx(x *gc.Node, oldx *gc.Node) {
x.Type = gc.Types[gc.TINT64]
reg[x.Val.U.Reg] = uint8(oldx.Ostk)
gmove(oldx, x)
regfree(oldx)
gc.Regfree(oldx)
}
}
/*
* generate division according to op, one of:
* res = nl / nr
* res = nl % nr
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var w int
if nr.Op != gc.OLITERAL {
goto longdiv
}
w = int(nl.Type.Width * 8)
// Front end handled 32-bit division. We only need to handle 64-bit.
// try to do division by multiply by (2^w)/d
// see hacker's delight chapter 10
switch gc.Simtype[nl.Type.Etype] {
default:
goto longdiv
case gc.TUINT64:
var m gc.Magic
m.W = w
m.Ud = uint64(gc.Mpgetfix(nr.Val.U.Xval))
gc.Umagic(&m)
if m.Bad != 0 {
break
}
if op == gc.OMOD {
goto longmod
}
var n1 gc.Node
cgenr(nl, &n1, nil)
var n2 gc.Node
gc.Nodconst(&n2, nl.Type, int64(m.Um))
var n3 gc.Node
regalloc(&n3, nl.Type, res)
cgen_hmul(&n1, &n2, &n3)
if m.Ua != 0 {
// need to add numerator accounting for overflow
gins(optoas(gc.OADD, nl.Type), &n1, &n3)
gc.Nodconst(&n2, nl.Type, 1)
gins(optoas(gc.ORROTC, nl.Type), &n2, &n3)
gc.Nodconst(&n2, nl.Type, int64(m.S)-1)
gins(optoas(gc.ORSH, nl.Type), &n2, &n3)
} else {
gc.Nodconst(&n2, nl.Type, int64(m.S))
gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift dx
}
gmove(&n3, res)
regfree(&n1)
regfree(&n3)
return
case gc.TINT64:
var m gc.Magic
m.W = w
m.Sd = gc.Mpgetfix(nr.Val.U.Xval)
gc.Smagic(&m)
if m.Bad != 0 {
break
}
if op == gc.OMOD {
goto longmod
}
var n1 gc.Node
cgenr(nl, &n1, res)
var n2 gc.Node
gc.Nodconst(&n2, nl.Type, m.Sm)
var n3 gc.Node
regalloc(&n3, nl.Type, nil)
cgen_hmul(&n1, &n2, &n3)
if m.Sm < 0 {
// need to add numerator
gins(optoas(gc.OADD, nl.Type), &n1, &n3)
}
gc.Nodconst(&n2, nl.Type, int64(m.S))
gins(optoas(gc.ORSH, nl.Type), &n2, &n3) // shift n3
gc.Nodconst(&n2, nl.Type, int64(w)-1)
gins(optoas(gc.ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
gins(optoas(gc.OSUB, nl.Type), &n1, &n3) // added
if m.Sd < 0 {
// this could probably be removed
// by factoring it into the multiplier
gins(optoas(gc.OMINUS, nl.Type), nil, &n3)
}
gmove(&n3, res)
regfree(&n1)
regfree(&n3)
return
}
goto longdiv
// division and mod using (slow) hardware instruction
longdiv:
dodiv(op, nl, nr, res)
return
// mod using formula A%B = A-(A/B*B) but
// we know that there is a fast algorithm for A/B
longmod:
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
var n2 gc.Node
regalloc(&n2, nl.Type, nil)
cgen_div(gc.ODIV, &n1, nr, &n2)
a := optoas(gc.OMUL, nl.Type)
if w == 8 {
// use 2-operand 16-bit multiply
// because there is no 2-operand 8-bit multiply
a = x86.AIMULW
}
if !gc.Smallintconst(nr) {
var n3 gc.Node
regalloc(&n3, nl.Type, nil)
cgen(nr, &n3)
gins(a, &n3, &n2)
regfree(&n3)
} else {
gins(a, nr, &n2)
}
gins(optoas(gc.OSUB, nl.Type), &n2, &n1)
gmove(&n1, res)
regfree(&n1)
regfree(&n2)
}
/*
* generate high multiply:
* res = (nl*nr) >> width
@ -757,15 +294,15 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
var n1 gc.Node
cgenr(nl, &n1, res)
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
cgenr(nr, &n2, nil)
gc.Cgenr(nr, &n2, nil)
var ax gc.Node
gc.Nodreg(&ax, t, x86.REG_AX)
gmove(&n1, &ax)
gins(a, &n2, nil)
regfree(&n2)
regfree(&n1)
gc.Regfree(&n2)
gc.Regfree(&n1)
var dx gc.Node
if t.Width == 1 {
@ -790,8 +327,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nr.Op == gc.OLITERAL {
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
@ -804,21 +341,21 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gins(a, nr, &n1)
}
gmove(&n1, res)
regfree(&n1)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
cgen(nl, &n4)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
cgen(nr, &n5)
gc.Cgen(nr, &n5)
nr = &n5
}
@ -835,16 +372,16 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
tcount = gc.Types[gc.TUINT32]
}
regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
var n3 gc.Node
regalloc(&n3, tcount, &n1) // to clear high bits of CX
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var cx gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
var oldcx gc.Node
if rcx > 0 && !gc.Samereg(&cx, res) {
regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gc.Regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
}
@ -852,21 +389,21 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n2 gc.Node
if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
gc.Regalloc(&n2, nl.Type, nil)
} else {
regalloc(&n2, nl.Type, res)
gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
cgen(nl, &n2)
cgen(nr, &n1)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
cgen(nr, &n1)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
cgen(nl, &n2)
gc.Cgen(nl, &n2)
}
regfree(&n3)
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
@ -889,13 +426,13 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if oldcx.Op != 0 {
cx.Type = gc.Types[gc.TUINT64]
gmove(&oldcx, &cx)
regfree(&oldcx)
gc.Regfree(&oldcx)
}
gmove(&n2, res)
regfree(&n1)
regfree(&n2)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
/*
@ -904,7 +441,11 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
if optoas(op, nl.Type) != x86.AIMULB {
return false
}
// largest ullman on left.
if nl.Ullman < nr.Ullman {
tmp := nl
@ -914,12 +455,12 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// generate operands in "8-bit" registers.
var n1b gc.Node
regalloc(&n1b, nl.Type, res)
gc.Regalloc(&n1b, nl.Type, res)
cgen(nl, &n1b)
gc.Cgen(nl, &n1b)
var n2b gc.Node
regalloc(&n2b, nr.Type, nil)
cgen(nr, &n2b)
gc.Regalloc(&n2b, nr.Type, nil)
gc.Cgen(nr, &n2b)
// perform full-width multiplication.
t := gc.Types[gc.TUINT64]
@ -937,8 +478,9 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// truncate.
gmove(&n1, res)
regfree(&n1b)
regfree(&n2b)
gc.Regfree(&n1b)
gc.Regfree(&n2b)
return true
}
func clearfat(nl *gc.Node) {
@ -965,7 +507,7 @@ func clearfat(nl *gc.Node) {
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
agenr(nl, &n1, nil)
gc.Agenr(nl, &n1, nil)
n1.Op = gc.OINDREG
var z gc.Node
@ -1001,14 +543,14 @@ func clearfat(nl *gc.Node) {
n1.Xoffset++
}
regfree(&n1)
gc.Regfree(&n1)
return
}
var oldn1 gc.Node
var n1 gc.Node
savex(x86.REG_DI, &n1, &oldn1, nil, gc.Types[gc.Tptr])
agen(nl, &n1)
gc.Agen(nl, &n1)
var ax gc.Node
var oldax gc.Node
@ -1115,3 +657,17 @@ func expandchecks(firstp *obj.Prog) {
p2.To.Offset = 0
}
}
// addr += index*width if possible.
func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
switch width {
case 1, 2, 4, 8:
p1 := gins(x86.ALEAQ, index, addr)
p1.From.Type = obj.TYPE_MEM
p1.From.Scale = int16(width)
p1.From.Index = p1.From.Reg
p1.From.Reg = p1.To.Reg
return true
}
return false
}

View file

@ -52,171 +52,6 @@ var resvd = []int{
x86.REG_SP, // for stack
}
func ginit() {
for i := 0; i < len(reg); i++ {
reg[i] = 1
}
for i := x86.REG_AX; i <= x86.REG_R15; i++ {
reg[i] = 0
}
for i := x86.REG_X0; i <= x86.REG_X15; i++ {
reg[i] = 0
}
for i := 0; i < len(resvd); i++ {
reg[resvd[i]]++
}
if gc.Nacl {
reg[x86.REG_BP]++
reg[x86.REG_R15]++
} else if obj.Framepointer_enabled != 0 {
// BP is part of the calling convention of framepointer_enabled.
reg[x86.REG_BP]++
}
}
func gclean() {
for i := 0; i < len(resvd); i++ {
reg[resvd[i]]--
}
if gc.Nacl {
reg[x86.REG_BP]--
reg[x86.REG_R15]--
} else if obj.Framepointer_enabled != 0 {
reg[x86.REG_BP]--
}
for i := x86.REG_AX; i <= x86.REG_R15; i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated\n", obj.Rconv(i))
}
}
for i := x86.REG_X0; i <= x86.REG_X15; i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated\n", obj.Rconv(i))
}
}
}
func anyregalloc() bool {
var j int
for i := x86.REG_AX; i <= x86.REG_R15; i++ {
if reg[i] == 0 {
goto ok
}
for j = 0; j < len(resvd); j++ {
if resvd[j] == i {
goto ok
}
}
return true
ok:
}
return false
}
var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32
/*
* allocate register of type t, leave in n.
* if o != N, o is desired fixed register.
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
if t == nil {
gc.Fatal("regalloc: t nil")
}
et := int(gc.Simtype[t.Etype])
var i int
switch et {
case gc.TINT8,
gc.TUINT8,
gc.TINT16,
gc.TUINT16,
gc.TINT32,
gc.TUINT32,
gc.TINT64,
gc.TUINT64,
gc.TPTR32,
gc.TPTR64,
gc.TBOOL:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= x86.REG_AX && i <= x86.REG_R15 {
goto out
}
}
for i = x86.REG_AX; i <= x86.REG_R15; i++ {
if reg[i] == 0 {
regpc[i-x86.REG_AX] = uint32(obj.Getcallerpc(&n))
goto out
}
}
gc.Flusherrors()
for i := 0; i+x86.REG_AX <= x86.REG_R15; i++ {
fmt.Printf("%d %p\n", i, regpc[i])
}
gc.Fatal("out of fixed registers")
case gc.TFLOAT32,
gc.TFLOAT64:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= x86.REG_X0 && i <= x86.REG_X15 {
goto out
}
}
for i = x86.REG_X0; i <= x86.REG_X15; i++ {
if reg[i] == 0 {
goto out
}
}
gc.Fatal("out of floating registers")
case gc.TCOMPLEX64,
gc.TCOMPLEX128:
gc.Tempname(n, t)
return
}
gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
return
out:
reg[i]++
gc.Nodreg(n, t, i)
}
func regfree(n *gc.Node) {
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
i := int(n.Val.U.Reg)
if i == x86.REG_SP {
return
}
if i < 0 || i >= len(reg) {
gc.Fatal("regfree: reg out of range")
}
if reg[i] <= 0 {
gc.Fatal("regfree: reg not allocated")
}
reg[i]--
if reg[i] == 0 && x86.REG_AX <= i && i <= x86.REG_R15 {
regpc[i-x86.REG_AX] = 0
}
}
/*
* generate
* as $c, reg
@ -258,11 +93,11 @@ func ginscon(as int, c int64, n2 *gc.Node) {
// cannot have 64-bit immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(x86.AMOVQ, &n1, &ntmp)
gins(as, &ntmp, n2)
regfree(&ntmp)
gc.Regfree(&ntmp)
return
}
@ -536,13 +371,13 @@ func gmove(f *gc.Node, t *gc.Node) {
}
bignodes()
var r1 gc.Node
regalloc(&r1, gc.Types[ft], nil)
gc.Regalloc(&r1, gc.Types[ft], nil)
var r2 gc.Node
regalloc(&r2, gc.Types[tt], t)
gc.Regalloc(&r2, gc.Types[tt], t)
var r3 gc.Node
regalloc(&r3, gc.Types[ft], nil)
gc.Regalloc(&r3, gc.Types[ft], nil)
var r4 gc.Node
regalloc(&r4, gc.Types[tt], nil)
gc.Regalloc(&r4, gc.Types[tt], nil)
gins(optoas(gc.OAS, f.Type), f, &r1)
gins(optoas(gc.OCMP, f.Type), &bigf, &r1)
p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1)
@ -556,10 +391,10 @@ func gmove(f *gc.Node, t *gc.Node) {
gins(x86.AXORQ, &r4, &r2)
gc.Patch(p2, gc.Pc)
gmove(&r2, t)
regfree(&r4)
regfree(&r3)
regfree(&r2)
regfree(&r1)
gc.Regfree(&r4)
gc.Regfree(&r3)
gc.Regfree(&r2)
gc.Regfree(&r1)
return
/*
@ -617,13 +452,13 @@ func gmove(f *gc.Node, t *gc.Node) {
var one gc.Node
gc.Nodconst(&one, gc.Types[gc.TUINT64], 1)
var r1 gc.Node
regalloc(&r1, f.Type, f)
gc.Regalloc(&r1, f.Type, f)
var r2 gc.Node
regalloc(&r2, t.Type, t)
gc.Regalloc(&r2, t.Type, t)
var r3 gc.Node
regalloc(&r3, f.Type, nil)
gc.Regalloc(&r3, f.Type, nil)
var r4 gc.Node
regalloc(&r4, f.Type, nil)
gc.Regalloc(&r4, f.Type, nil)
gmove(f, &r1)
gins(x86.ACMPQ, &r1, &zero)
p1 := gc.Gbranch(x86.AJLT, nil, +1)
@ -639,10 +474,10 @@ func gmove(f *gc.Node, t *gc.Node) {
gins(optoas(gc.OADD, t.Type), &r2, &r2)
gc.Patch(p2, gc.Pc)
gmove(&r2, t)
regfree(&r4)
regfree(&r3)
regfree(&r2)
regfree(&r1)
gc.Regfree(&r4)
gc.Regfree(&r3)
gc.Regfree(&r2)
gc.Regfree(&r1)
return
/*
@ -670,22 +505,22 @@ func gmove(f *gc.Node, t *gc.Node) {
rdst:
{
var r1 gc.Node
regalloc(&r1, t.Type, t)
gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
var r1 gc.Node
regalloc(&r1, cvt, t)
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
@ -713,22 +548,35 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
// Node nod;
// if(f != N && f->op == OINDEX) {
// regalloc(&nod, &regnode, Z);
// gc.Regalloc(&nod, &regnode, Z);
// v = constnode.vconst;
// cgen(f->right, &nod);
// gc.Cgen(f->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
// regfree(&nod);
// gc.Regfree(&nod);
// }
// if(t != N && t->op == OINDEX) {
// regalloc(&nod, &regnode, Z);
// gc.Regalloc(&nod, &regnode, Z);
// v = constnode.vconst;
// cgen(t->right, &nod);
// gc.Cgen(t->right, &nod);
// constnode.vconst = v;
// idx.reg = nod.reg;
// regfree(&nod);
// gc.Regfree(&nod);
// }
if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) {
// Turn MOVL $xxx into LEAL xxx.
// These should be equivalent but most of the backend
// only expects to see LEAL, because that's what we had
// historically generated. Various hidden assumptions are baked in by now.
if as == x86.AMOVL {
as = x86.ALEAL
} else {
as = x86.ALEAQ
}
f = f.Left
}
switch as {
case x86.AMOVB,
x86.AMOVW,
@ -782,27 +630,13 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
return p
}
func fixlargeoffset(n *gc.Node) {
if n == nil {
return
}
if n.Op != gc.OINDREG {
return
}
if n.Val.U.Reg == x86.REG_SP { // stack offset cannot be large
return
}
if n.Xoffset != int64(int32(n.Xoffset)) {
// offset too large, add to register instead.
a := *n
a.Op = gc.OREGISTER
a.Type = gc.Types[gc.Tptr]
a.Xoffset = 0
gc.Cgen_checknil(&a)
ginscon(optoas(gc.OADD, gc.Types[gc.Tptr]), n.Xoffset, &a)
n.Xoffset = 0
}
func ginsnop() {
// This is actually not the x86 NOP anymore,
// but at the point where it gets used, AX is dead
// so it's okay if we lose the high bits.
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
gins(x86.AXCHGL, &reg, &reg)
}
/*
@ -854,6 +688,21 @@ func optoas(op int, t *gc.Type) int {
gc.ONE<<16 | gc.TFLOAT64:
a = x86.AJNE
case gc.OPS<<16 | gc.TBOOL,
gc.OPS<<16 | gc.TINT8,
gc.OPS<<16 | gc.TUINT8,
gc.OPS<<16 | gc.TINT16,
gc.OPS<<16 | gc.TUINT16,
gc.OPS<<16 | gc.TINT32,
gc.OPS<<16 | gc.TUINT32,
gc.OPS<<16 | gc.TINT64,
gc.OPS<<16 | gc.TUINT64,
gc.OPS<<16 | gc.TPTR32,
gc.OPS<<16 | gc.TPTR64,
gc.OPS<<16 | gc.TFLOAT32,
gc.OPS<<16 | gc.TFLOAT64:
a = x86.AJPS
case gc.OLT<<16 | gc.TINT8,
gc.OLT<<16 | gc.TINT16,
gc.OLT<<16 | gc.TINT32,
@ -1296,29 +1145,12 @@ var clean [20]gc.Node
var cleani int = 0
func xgen(n *gc.Node, a *gc.Node, o int) bool {
regalloc(a, gc.Types[gc.Tptr], nil)
if o&ODynam != 0 {
if n.Addable != 0 {
if n.Op != gc.OINDREG {
if n.Op != gc.OREGISTER {
return true
}
}
}
}
agen(n, a)
return false
}
func sudoclean() {
if clean[cleani-1].Op != gc.OEMPTY {
regfree(&clean[cleani-1])
gc.Regfree(&clean[cleani-1])
}
if clean[cleani-2].Op != gc.OEMPTY {
regfree(&clean[cleani-2])
gc.Regfree(&clean[cleani-2])
}
cleani -= 2
}
@ -1422,14 +1254,14 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
return true
}
regalloc(reg, gc.Types[gc.Tptr], nil)
gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
n1 := *reg
n1.Op = gc.OINDREG
if oary[0] >= 0 {
agen(nn, reg)
gc.Agen(nn, reg)
n1.Xoffset = oary[0]
} else {
cgen(nn, reg)
gc.Cgen(nn, reg)
gc.Cgen_checknil(reg)
n1.Xoffset = -(oary[0] + 1)
}
@ -1445,7 +1277,7 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
a.Type = obj.TYPE_NONE
a.Index = obj.TYPE_NONE
fixlargeoffset(&n1)
gc.Fixlargeoffset(&n1)
gc.Naddr(a, &n1)
return true

File diff suppressed because it is too large Load diff

View file

@ -44,33 +44,38 @@ func main() {
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = arm64.REGSP
gc.Thearch.REGCTXT = arm64.REGCTXT
gc.Thearch.REGCALLX = arm64.REGRT1
gc.Thearch.REGCALLX2 = arm64.REGRT2
gc.Thearch.REGRETURN = arm64.REG_R0
gc.Thearch.REGMIN = arm64.REG_R0
gc.Thearch.REGMAX = arm64.REG_R31
gc.Thearch.FREGMIN = arm64.REG_F0
gc.Thearch.FREGMAX = arm64.REG_F31
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.ReservedRegs = resvd
gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Bgen = bgen
gc.Thearch.Cgen = cgen
gc.Thearch.Cgen_call = cgen_call
gc.Thearch.Cgen_callinter = cgen_callinter
gc.Thearch.Cgen_ret = cgen_ret
gc.Thearch.Cgen_hmul = cgen_hmul
gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
gc.Thearch.Dodiv = dodiv
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Gclean = gclean
gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
gc.Thearch.Ginscall = ginscall
gc.Thearch.Ginscon = ginscon
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
gc.Thearch.Regalloc = regalloc
gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Stackcopy = stackcopy
gc.Thearch.Sudoaddable = sudoaddable
gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB

View file

@ -116,316 +116,10 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int
return q
}
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
if f.Type != nil {
extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert a arm64 NOP that we will have the right line number.
// The arm64 NOP is really or HINT $0; use that description
// because the NOP pseudo-instruction would be removed by
// the linker.
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT], 0)
gins(arm64.AHINT, &con, nil)
}
p := gins(arm64.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
}
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.Tptr], arm64.REGCTXT)
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], arm64.REGRT1)
gmove(f, &reg)
reg.Op = gc.OINDREG
gmove(&reg, &r1)
r1.Op = gc.OINDREG
gins(arm64.ABL, nil, &r1)
case 3: // normal call of c function pointer
gins(arm64.ABL, nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT64], arm64.REGRT1)
var reg2 gc.Node
gc.Nodreg(&reg2, gc.Types[gc.TINT64], arm64.REGRT2)
gmove(f, &reg)
gmove(&con, &reg2)
p := gins(arm64.AMOVW, &reg2, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REGSP
p.To.Offset = 8
p = gins(arm64.AMOVD, &reg, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm64.REGSP
p.To.Offset = 16
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
gc.Nodreg(&reg, gc.Types[gc.TINT64], arm64.REG_R0) // R0 should match runtime.return0
p := gins(arm64.ACMP, &reg, nil)
p.Reg = arm64.REGZERO
p = gc.Gbranch(arm64.ABEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
}
}
/*
* n is call to interface method.
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
i = i.Left // interface
if i.Addable == 0 {
var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
}
gc.Genlist(n.List) // assign the args
// i is now addable, prepare an indirected
// register to hold its address.
var nodi gc.Node
igen(i, &nodi, res) // REG = &inter
var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm64.REGSP)
nodsp.Xoffset = int64(gc.Widthptr)
if proc != 0 {
nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
}
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset += int64(gc.Widthptr)
cgen(&nodi, &nodsp) // {8 or 24}(SP) = 8(REG) -- i.data
var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], res)
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset -= int64(gc.Widthptr)
cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
regfree(&nodi)
var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
if n.Left.Xoffset == gc.BADWIDTH {
gc.Fatal("cgen_callinter: badwidth")
}
gc.Cgen_checknil(&nodo) // in case offset is huge
nodo.Op = gc.OINDREG
nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
if proc == 0 {
// plain call: use direct c function pointer - more efficient
cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
proc = 3
} else {
// go/defer. generate go func value.
p := gins(arm64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
p.From.Type = obj.TYPE_ADDR
}
nodr.Type = n.Left.Type
ginscall(&nodr, proc)
regfree(&nodr)
regfree(&nodo)
}
/*
* generate function call;
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
if n == nil {
return
}
var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
gc.Tempname(&afun, gc.Types[gc.Tptr])
cgen(n.Left, &afun)
}
gc.Genlist(n.List) // assign the args
t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
}
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
var nod gc.Node
nod.Op = gc.OINDREG
nod.Val.U.Reg = arm64.REGSP
nod.Addable = 1
nod.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved LR at 0(R1)
nod.Type = fp.Type
gc.Cgen_as(res, &nod)
}
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if gc.Isptr[t.Etype] {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
var nod1 gc.Node
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = arm64.REGSP
nod1.Addable = 1
nod1.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
agen(&nod1, &nod2)
gins(arm64.AMOVD, &nod2, res)
regfree(&nod2)
} else {
agen(&nod1, res)
}
}
/*
* generate return.
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
if n != nil {
gc.Genlist(n.List) // copy out args
}
if gc.Hasdefer != 0 {
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Name = obj.NAME_EXTERN
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(n.Left.Sym)
}
func ginsnop() {
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT], 0)
gins(arm64.AHINT, &con, nil)
}
/*
@ -468,15 +162,15 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(gc.ODIV, t)
var tl gc.Node
regalloc(&tl, t0, nil)
gc.Regalloc(&tl, t0, nil)
var tr gc.Node
regalloc(&tr, t0, nil)
gc.Regalloc(&tr, t0, nil)
if nl.Ullman >= nr.Ullman {
cgen(nl, &tl)
cgen(nr, &tr)
gc.Cgen(nl, &tl)
gc.Cgen(nr, &tr)
} else {
cgen(nr, &tr)
cgen(nl, &tl)
gc.Cgen(nr, &tr)
gc.Cgen(nl, &tl)
}
if t != t0 {
@ -497,7 +191,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
ginscall(panicdiv, -1)
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
@ -525,12 +219,12 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
p1 = gins(a, &tr, &tl)
if op == gc.ODIV {
regfree(&tr)
gc.Regfree(&tr)
gmove(&tl, res)
} else {
// A%B = A-(A/B*B)
var tm gc.Node
regalloc(&tm, t, nil)
gc.Regalloc(&tm, t, nil)
// patch div to use the 3 register form
// TODO(minux): add gins3?
@ -538,32 +232,18 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
p1.To.Reg = tm.Val.U.Reg
gins(optoas(gc.OMUL, t), &tr, &tm)
regfree(&tr)
gc.Regfree(&tr)
gins(optoas(gc.OSUB, t), &tm, &tl)
regfree(&tm)
gc.Regfree(&tm)
gmove(&tl, res)
}
regfree(&tl)
gc.Regfree(&tl)
if check != 0 {
gc.Patch(p2, gc.Pc)
}
}
/*
* generate division according to op, one of:
* res = nl / nr
* res = nl % nr
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// TODO(minux): enable division by magic multiply (also need to fix longmod below)
//if(nr->op != OLITERAL)
// division and mod using (slow) hardware instruction
dodiv(op, nl, nr, res)
return
}
/*
* generate high multiply:
* res = (nl*nr) >> width
@ -579,9 +259,9 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := (*gc.Type)(nl.Type)
w := int(int(t.Width * 8))
var n1 gc.Node
cgenr(nl, &n1, res)
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
cgenr(nr, &n2, nil)
gc.Cgenr(nr, &n2, nil)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16,
@ -611,9 +291,9 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
}
cgen(&n1, res)
regfree(&n1)
regfree(&n2)
gc.Cgen(&n1, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
/*
@ -626,8 +306,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nr.Op == gc.OLITERAL {
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval)))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
@ -640,21 +320,21 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gins(a, nr, &n1)
}
gmove(&n1, res)
regfree(&n1)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
cgen(nl, &n4)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
cgen(nr, &n5)
gc.Cgen(nr, &n5)
nr = &n5
}
@ -668,24 +348,24 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
var n1 gc.Node
regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
var n3 gc.Node
regalloc(&n3, tcount, &n1) // to clear high bits of CX
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
regalloc(&n2, nl.Type, res)
gc.Regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
cgen(nl, &n2)
cgen(nr, &n1)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
cgen(nr, &n1)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
cgen(nl, &n2)
gc.Cgen(nl, &n2)
}
regfree(&n3)
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
@ -707,8 +387,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gmove(&n2, res)
regfree(&n1)
regfree(&n2)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
func clearfat(nl *gc.Node) {
@ -736,7 +416,7 @@ func clearfat(nl *gc.Node) {
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], arm64.REGRT1)
reg[arm64.REGRT1-arm64.REG_R0]++
agen(nl, &dst)
gc.Agen(nl, &dst)
var boff uint64
if q > 128 {
@ -745,7 +425,7 @@ func clearfat(nl *gc.Node) {
p.From.Offset = 8
var end gc.Node
regalloc(&end, gc.Types[gc.Tptr], nil)
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p = gins(arm64.AMOVD, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q * 8)
@ -759,7 +439,7 @@ func clearfat(nl *gc.Node) {
p = gcmp(arm64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl)
regfree(&end)
gc.Regfree(&end)
// The loop leaves R16 on the last zeroed dword
boff = 8

View file

@ -52,171 +52,6 @@ var resvd = []int{
arm64.FREGTWO,
}
func ginit() {
for i := 0; i < len(reg); i++ {
reg[i] = 1
}
for i := 0; i < arm64.NREG+arm64.NFREG; i++ {
reg[i] = 0
}
for i := 0; i < len(resvd); i++ {
reg[resvd[i]-arm64.REG_R0]++
}
}
var regpc [len(reg)]uint32
func gclean() {
for i := int(0); i < len(resvd); i++ {
reg[resvd[i]-arm64.REG_R0]--
}
for i := int(0); i < len(reg); i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated, %p\n", obj.Rconv(i+arm64.REG_R0), regpc[i])
}
}
}
func anyregalloc() bool {
var j int
for i := int(0); i < len(reg); i++ {
if reg[i] == 0 {
goto ok
}
for j = 0; j < len(resvd); j++ {
if resvd[j] == i {
goto ok
}
}
return true
ok:
}
return false
}
/*
* allocate register of type t, leave in n.
* if o != N, o is desired fixed register.
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
if t == nil {
gc.Fatal("regalloc: t nil")
}
et := int(int(gc.Simtype[t.Etype]))
if gc.Debug['r'] != 0 {
fixfree := int(0)
fltfree := int(0)
for i := int(arm64.REG_R0); i < arm64.REG_F31; i++ {
if reg[i-arm64.REG_R0] == 0 {
if i < arm64.REG_F0 {
fixfree++
} else {
fltfree++
}
}
}
fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
}
var i int
switch et {
case gc.TINT8,
gc.TUINT8,
gc.TINT16,
gc.TUINT16,
gc.TINT32,
gc.TUINT32,
gc.TINT64,
gc.TUINT64,
gc.TPTR32,
gc.TPTR64,
gc.TBOOL:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= arm64.REGMIN && i <= arm64.REGMAX {
goto out
}
}
for i = arm64.REGMIN; i <= arm64.REGMAX; i++ {
if reg[i-arm64.REG_R0] == 0 {
regpc[i-arm64.REG_R0] = uint32(obj.Getcallerpc(&n))
goto out
}
}
gc.Flusherrors()
for i := int(arm64.REG_R0); i < arm64.REG_R0+arm64.NREG; i++ {
fmt.Printf("R%d %p\n", i, regpc[i-arm64.REG_R0])
}
gc.Fatal("out of fixed registers")
case gc.TFLOAT32,
gc.TFLOAT64:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= arm64.FREGMIN && i <= arm64.FREGMAX {
goto out
}
}
for i = arm64.FREGMIN; i <= arm64.FREGMAX; i++ {
if reg[i-arm64.REG_R0] == 0 {
regpc[i-arm64.REG_R0] = uint32(obj.Getcallerpc(&n))
goto out
}
}
gc.Flusherrors()
for i := int(arm64.REG_F0); i < arm64.REG_F0+arm64.NREG; i++ {
fmt.Printf("F%d %p\n", i, regpc[i-arm64.REG_R0])
}
gc.Fatal("out of floating registers")
case gc.TCOMPLEX64,
gc.TCOMPLEX128:
gc.Tempname(n, t)
return
}
gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
return
out:
reg[i-arm64.REG_R0]++
gc.Nodreg(n, t, i)
}
func regfree(n *gc.Node) {
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
i := int(int(n.Val.U.Reg) - arm64.REG_R0)
if i == arm64.REGSP-arm64.REG_R0 {
return
}
if i < 0 || i >= len(reg) {
gc.Fatal("regfree: reg out of range")
}
if reg[i] <= 0 {
gc.Fatal("regfree: reg not allocated")
}
reg[i]--
if reg[i] == 0 {
regpc[i] = 0
}
}
/*
* generate
* as $c, n
@ -226,19 +61,19 @@ func ginscon(as int, c int64, n2 *gc.Node) {
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) {
if as != arm64.AMOVD && (c < -arm64.BIG || c > arm64.BIG) || as == arm64.AMUL || n2 != nil && n2.Op != gc.OREGISTER {
// cannot have more than 16-bit of immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(arm64.AMOVD, &n1, &ntmp)
gins(as, &ntmp, n2)
regfree(&ntmp)
gc.Regfree(&ntmp)
return
}
gins(as, &n1, n2)
rawgins(as, &n1, n2)
}
/*
@ -263,11 +98,11 @@ func ginscon2(as int, n2 *gc.Node, c int64) {
// MOV n1 into register first
var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(arm64.AMOVD, &n1, &ntmp)
rawgins(arm64.AMOVD, &n1, &ntmp)
gcmp(as, n2, &ntmp)
regfree(&ntmp)
gc.Regfree(&ntmp)
}
/*
@ -309,10 +144,10 @@ func gmove(f *gc.Node, t *gc.Node) {
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
var r1 gc.Node
regalloc(&r1, con.Type, t)
gc.Regalloc(&r1, con.Type, t)
gins(arm64.AMOVD, &con, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
case gc.TUINT32,
@ -321,10 +156,10 @@ func gmove(f *gc.Node, t *gc.Node) {
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
var r1 gc.Node
regalloc(&r1, con.Type, t)
gc.Regalloc(&r1, con.Type, t)
gins(arm64.AMOVD, &con, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
@ -590,28 +425,60 @@ func gmove(f *gc.Node, t *gc.Node) {
// requires register destination
rdst:
regalloc(&r1, t.Type, t)
gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
// requires register intermediate
hard:
regalloc(&r1, cvt, t)
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
func intLiteral(n *gc.Node) (x int64, ok bool) {
if n == nil || n.Op != gc.OLITERAL {
return
}
switch n.Val.Ctype {
case gc.CTINT, gc.CTRUNE:
return gc.Mpgetfix(n.Val.U.Xval), true
case gc.CTBOOL:
return int64(n.Val.U.Bval), true
}
return
}
// gins is called by the front end.
// It synthesizes some multiple-instruction sequences
// so the front end can stay simpler.
func gins(as int, f, t *gc.Node) *obj.Prog {
if as >= obj.A_ARCHSPECIFIC {
if x, ok := intLiteral(f); ok {
ginscon(as, x, t)
return nil // caller must not use
}
}
if as == arm64.ACMP {
if x, ok := intLiteral(t); ok {
ginscon2(as, f, x)
return nil // caller must not use
}
}
return rawgins(as, f, t)
}
/*
* generate one instruction:
* as f, t
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
@ -619,6 +486,32 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
switch as {
case arm64.ACMP, arm64.AFCMPS, arm64.AFCMPD:
if t != nil {
if f.Op != gc.OREGISTER {
gc.Fatal("bad operands to gcmp")
}
p.From = p.To
p.To = obj.Addr{}
raddr(f, p)
}
}
// Bad things the front end has done to us. Crash to find call stack.
switch as {
case arm64.AAND, arm64.AMUL:
if p.From.Type == obj.TYPE_CONST {
gc.Debug['h'] = 1
gc.Fatal("bad inst: %v", p)
}
case arm64.ACMP:
if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
gc.Debug['h'] = 1
gc.Fatal("bad inst: %v", p)
}
}
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
@ -696,7 +589,7 @@ func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
gc.Fatal("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
}
p := gins(as, rhs, nil)
p := rawgins(as, rhs, nil)
raddr(lhs, p)
return p
}

File diff suppressed because it is too large Load diff

View file

@ -27,7 +27,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
case gc.OMINUS:
cgen(n.Left, res)
gc.Cgen(n.Left, res)
var hi1 gc.Node
var lo1 gc.Node
split64(res, &lo1, &hi1)
@ -38,7 +38,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
return
case gc.OCOM:
cgen(n.Left, res)
gc.Cgen(n.Left, res)
var lo1 gc.Node
var hi1 gc.Node
split64(res, &lo1, &hi1)
@ -66,14 +66,14 @@ func cgen64(n *gc.Node, res *gc.Node) {
if l.Addable == 0 {
var t1 gc.Node
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
gc.Cgen(l, &t1)
l = &t1
}
if r != nil && r.Addable == 0 {
var t2 gc.Node
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
gc.Cgen(r, &t2)
r = &t2
}
@ -116,10 +116,10 @@ func cgen64(n *gc.Node, res *gc.Node) {
// let's call the next two EX and FX.
case gc.OMUL:
var ex gc.Node
regalloc(&ex, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&ex, gc.Types[gc.TPTR32], nil)
var fx gc.Node
regalloc(&fx, gc.Types[gc.TPTR32], nil)
gc.Regalloc(&fx, gc.Types[gc.TPTR32], nil)
// load args into DX:AX and EX:CX.
gins(x86.AMOVL, &lo1, &ax)
@ -148,8 +148,8 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(x86.AADDL, &fx, &dx)
gc.Patch(p2, gc.Pc)
regfree(&ex)
regfree(&fx)
gc.Regfree(&ex)
gc.Regfree(&fx)
// We only rotate by a constant c in [0,64).
// if c >= 32:
@ -523,10 +523,10 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(x86.ACMPL, &hi1, &hi2)
} else {
regalloc(&rr, gc.Types[gc.TINT32], nil)
gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
gins(x86.AMOVL, &hi1, &rr)
gins(x86.ACMPL, &rr, &hi2)
regfree(&rr)
gc.Regfree(&rr)
}
var br *obj.Prog
@ -580,10 +580,10 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(x86.ACMPL, &lo1, &lo2)
} else {
regalloc(&rr, gc.Types[gc.TINT32], nil)
gc.Regalloc(&rr, gc.Types[gc.TINT32], nil)
gins(x86.AMOVL, &lo1, &rr)
gins(x86.ACMPL, &rr, &lo2)
regfree(&rr)
gc.Regfree(&rr)
}
// jump again

View file

@ -45,33 +45,44 @@ func main() {
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = x86.REGSP
gc.Thearch.REGCTXT = x86.REGCTXT
gc.Thearch.REGCALLX = x86.REG_BX
gc.Thearch.REGCALLX2 = x86.REG_AX
gc.Thearch.REGRETURN = x86.REG_AX
gc.Thearch.REGMIN = x86.REG_AX
gc.Thearch.REGMAX = x86.REG_DI
gc.Thearch.FREGMIN = x86.REG_X0
gc.Thearch.FREGMAX = x86.REG_X7
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.ReservedRegs = resvd
gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Bgen = bgen
gc.Thearch.Cgen = cgen
gc.Thearch.Cgen_call = cgen_call
gc.Thearch.Cgen_callinter = cgen_callinter
gc.Thearch.Cgen_ret = cgen_ret
gc.Thearch.Bgen_float = bgen_float
gc.Thearch.Cgen64 = cgen64
gc.Thearch.Cgen_bmul = cgen_bmul
gc.Thearch.Cgen_float = cgen_float
gc.Thearch.Cgen_hmul = cgen_hmul
gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Cmp64 = cmp64
gc.Thearch.Defframe = defframe
gc.Thearch.Dodiv = cgen_div
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Gclean = gclean
gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
gc.Thearch.Ginscall = ginscall
gc.Thearch.Ginscon = ginscon
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
gc.Thearch.Igen = igen
gc.Thearch.Igenindex = igenindex
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
gc.Thearch.Regalloc = regalloc
gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Stackcopy = stackcopy
gc.Thearch.Sudoaddable = sudoaddable
gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = FtoB

View file

@ -127,9 +127,9 @@ func clearfat(nl *gc.Node) {
// NOTE: Must use agen, not igen, so that optimizer sees address
// being taken. We are not writing on field boundaries.
var n1 gc.Node
regalloc(&n1, gc.Types[gc.Tptr], nil)
gc.Regalloc(&n1, gc.Types[gc.Tptr], nil)
agen(nl, &n1)
gc.Agen(nl, &n1)
n1.Op = gc.OINDREG
var z gc.Node
gc.Nodconst(&z, gc.Types[gc.TUINT64], 0)
@ -156,13 +156,13 @@ func clearfat(nl *gc.Node) {
n1.Xoffset++
}
regfree(&n1)
gc.Regfree(&n1)
return
}
var n1 gc.Node
gc.Nodreg(&n1, gc.Types[gc.Tptr], x86.REG_DI)
agen(nl, &n1)
gc.Agen(nl, &n1)
gconreg(x86.AMOVL, 0, x86.REG_AX)
if q > 128 || (q >= 4 && gc.Nacl) {
@ -189,312 +189,6 @@ func clearfat(nl *gc.Node) {
}
}
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
if f.Type != nil {
extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert an x86 NOP that we will have the right line number.
// x86 NOP 0x90 is really XCHG AX, AX; use that description
// because the NOP pseudo-instruction will be removed by
// the linker.
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
gins(x86.AXCHGL, &reg, &reg)
}
p := gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
}
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.Tptr], x86.REG_DX)
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], x86.REG_BX)
gmove(f, &reg)
reg.Op = gc.OINDREG
gmove(&reg, &r1)
reg.Op = gc.OREGISTER
gins(obj.ACALL, &reg, &r1)
case 3: // normal call of c function pointer
gins(obj.ACALL, nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
var stk gc.Node
stk.Op = gc.OINDREG
stk.Val.U.Reg = x86.REG_SP
stk.Xoffset = 0
// size of arguments at 0(SP)
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
gins(x86.AMOVL, &con, &stk)
// FuncVal* at 4(SP)
stk.Xoffset = int64(gc.Widthptr)
gins(x86.AMOVL, f, &stk)
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT32], x86.REG_AX)
gins(x86.ATESTL, &reg, &reg)
p := gc.Gbranch(x86.AJEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
}
}
/*
* n is call to interface method.
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
i = i.Left // interface
if i.Addable == 0 {
var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
}
gc.Genlist(n.List) // assign the args
// i is now addable, prepare an indirected
// register to hold its address.
var nodi gc.Node
igen(i, &nodi, res) // REG = &inter
var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], x86.REG_SP)
nodsp.Xoffset = 0
if proc != 0 {
nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
}
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset += int64(gc.Widthptr)
cgen(&nodi, &nodsp) // {0 or 8}(SP) = 4(REG) -- i.data
var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], res)
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset -= int64(gc.Widthptr)
cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
regfree(&nodi)
var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
if n.Left.Xoffset == gc.BADWIDTH {
gc.Fatal("cgen_callinter: badwidth")
}
gc.Cgen_checknil(&nodo)
nodo.Op = gc.OINDREG
nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
if proc == 0 {
// plain call: use direct c function pointer - more efficient
cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
proc = 3
} else {
// go/defer. generate go func value.
gins(x86.ALEAL, &nodo, &nodr) // REG = &(20+offset(REG)) -- i.tab->fun[f]
}
nodr.Type = n.Left.Type
ginscall(&nodr, proc)
regfree(&nodr)
regfree(&nodo)
}
/*
* generate function call;
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
if n == nil {
return
}
var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
gc.Tempname(&afun, gc.Types[gc.Tptr])
cgen(n.Left, &afun)
}
gc.Genlist(n.List) // assign the args
t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
}
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
var nod gc.Node
nod.Op = gc.OINDREG
nod.Val.U.Reg = x86.REG_SP
nod.Addable = 1
nod.Xoffset = fp.Width
nod.Type = fp.Type
gc.Cgen_as(res, &nod)
}
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if gc.Isptr[t.Etype] {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
var nod1 gc.Node
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = x86.REG_SP
nod1.Addable = 1
nod1.Xoffset = fp.Width
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
gins(x86.ALEAL, &nod1, &nod2)
gins(x86.AMOVL, &nod2, res)
regfree(&nod2)
} else {
gins(x86.ALEAL, &nod1, res)
}
}
/*
* generate return.
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
if n != nil {
gc.Genlist(n.List) // copy out args
}
if gc.Hasdefer != 0 {
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Linksym(n.Left.Sym)
}
}
/*
* generate division.
* caller must set:
@ -545,23 +239,23 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
gc.Tempname(&t3, t0)
var t4 gc.Node
gc.Tempname(&t4, t0)
cgen(nl, &t3)
cgen(nr, &t4)
gc.Cgen(nl, &t3)
gc.Cgen(nr, &t4)
// Convert.
gmove(&t3, &t1)
gmove(&t4, &t2)
} else {
cgen(nl, &t1)
cgen(nr, &t2)
gc.Cgen(nl, &t1)
gc.Cgen(nr, &t2)
}
var n1 gc.Node
if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
regalloc(&n1, t, res)
gc.Regalloc(&n1, t, res)
} else {
regalloc(&n1, t, nil)
gc.Regalloc(&n1, t, nil)
}
gmove(&t2, &n1)
gmove(&t1, ax)
@ -578,7 +272,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
ginscall(panicdiv, -1)
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
}
@ -610,7 +304,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
gins(optoas(gc.OEXTEND, t), nil, nil)
}
gins(optoas(op, t), &n1, nil)
regfree(&n1)
gc.Regfree(&n1)
if op == gc.ODIV {
gmove(ax, res)
@ -635,11 +329,11 @@ func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
gmove(x, oldx)
}
regalloc(x, t, x)
gc.Regalloc(x, t, x)
}
func restx(x *gc.Node, oldx *gc.Node) {
regfree(x)
gc.Regfree(x)
if oldx.Op != 0 {
x.Type = gc.Types[gc.TINT32]
@ -691,9 +385,9 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nr.Op == gc.OLITERAL {
var n2 gc.Node
gc.Tempname(&n2, nl.Type)
cgen(nl, &n2)
gc.Cgen(nl, &n2)
var n1 gc.Node
regalloc(&n1, nl.Type, res)
gc.Regalloc(&n1, nl.Type, res)
gmove(&n2, &n1)
sc := uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc >= uint64(nl.Type.Width*8) {
@ -705,7 +399,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gins(a, nr, &n1)
}
gmove(&n1, res)
regfree(&n1)
gc.Regfree(&n1)
return
}
@ -724,21 +418,21 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
n1 = nt
} else {
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
gc.Regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
}
var n2 gc.Node
if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
gc.Regalloc(&n2, nl.Type, nil)
} else {
regalloc(&n2, nl.Type, res)
gc.Regalloc(&n2, nl.Type, res)
}
if nl.Ullman >= nr.Ullman {
cgen(nl, &n2)
cgen(nr, &n1)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
} else {
cgen(nr, &n1)
cgen(nl, &n2)
gc.Cgen(nr, &n1)
gc.Cgen(nl, &n2)
}
// test and fix up large shifts
@ -747,7 +441,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
@ -760,7 +454,7 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], x86.REG_CX)
regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
gc.Regalloc(&n1, gc.Types[gc.TUINT32], &n1) // to hold the shift type in CX
var lo gc.Node
var hi gc.Node
split64(&nt, &lo, &hi)
@ -793,8 +487,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gmove(&n2, res)
regfree(&n1)
regfree(&n2)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
/*
@ -803,7 +497,11 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
* there is no 2-operand byte multiply instruction so
* we do a full-width multiplication and truncate afterwards.
*/
func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) bool {
if optoas(op, nl.Type) != x86.AIMULB {
return false
}
// copy from byte to full registers
t := gc.Types[gc.TUINT32]
@ -820,18 +518,20 @@ func cgen_bmul(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var nt gc.Node
gc.Tempname(&nt, nl.Type)
cgen(nl, &nt)
gc.Cgen(nl, &nt)
var n1 gc.Node
regalloc(&n1, t, res)
cgen(nr, &n1)
gc.Regalloc(&n1, t, res)
gc.Cgen(nr, &n1)
var n2 gc.Node
regalloc(&n2, t, nil)
gc.Regalloc(&n2, t, nil)
gmove(&nt, &n2)
a := optoas(op, t)
gins(a, &n2, &n1)
regfree(&n2)
gc.Regfree(&n2)
gmove(&n1, res)
regfree(&n1)
gc.Regfree(&n1)
return true
}
/*
@ -850,19 +550,19 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
// gen nl in n1.
gc.Tempname(&n1, t)
cgen(nl, &n1)
gc.Cgen(nl, &n1)
// gen nr in n2.
regalloc(&n2, t, res)
gc.Regalloc(&n2, t, res)
cgen(nr, &n2)
gc.Cgen(nr, &n2)
// multiply.
gc.Nodreg(&ax, t, x86.REG_AX)
gmove(&n2, &ax)
gins(a, &n1, nil)
regfree(&n2)
gc.Regfree(&n2)
if t.Width == 1 {
// byte multiply behaves differently.
@ -892,28 +592,28 @@ func cgen_float(n *gc.Node, res *gc.Node) {
gmove(gc.Nodbool(true), res)
p3 := gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gc.Bgen(n, true, 0, p2)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
case gc.OPLUS:
cgen(nl, res)
gc.Cgen(nl, res)
return
case gc.OCONV:
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
cgen(nl, res)
gc.Cgen(nl, res)
return
}
var n2 gc.Node
gc.Tempname(&n2, n.Type)
var n1 gc.Node
mgen(nl, &n1, res)
gc.Mgen(nl, &n1, res)
gmove(&n1, &n2)
gmove(&n2, res)
mfree(&n1)
gc.Mfree(&n1)
return
}
@ -936,19 +636,19 @@ func cgen_float387(n *gc.Node, res *gc.Node) {
if nr != nil {
// binary
if nl.Ullman >= nr.Ullman {
cgen(nl, &f0)
gc.Cgen(nl, &f0)
if nr.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, 0), nr, &f0)
} else {
cgen(nr, &f0)
gc.Cgen(nr, &f0)
gins(foptoas(int(n.Op), n.Type, Fpop), &f0, &f1)
}
} else {
cgen(nr, &f0)
gc.Cgen(nr, &f0)
if nl.Addable != 0 {
gins(foptoas(int(n.Op), n.Type, Frev), nl, &f0)
} else {
cgen(nl, &f0)
gc.Cgen(nl, &f0)
gins(foptoas(int(n.Op), n.Type, Frev|Fpop), &f0, &f1)
}
}
@ -958,7 +658,7 @@ func cgen_float387(n *gc.Node, res *gc.Node) {
}
// unary
cgen(nl, &f0)
gc.Cgen(nl, &f0)
if n.Op != gc.OCONV && n.Op != gc.OPLUS {
gins(foptoas(int(n.Op), n.Type, 0), nil, nil)
@ -1012,27 +712,27 @@ abop: // asymmetric binary
if nl.Ullman >= nr.Ullman {
var nt gc.Node
gc.Tempname(&nt, nl.Type)
cgen(nl, &nt)
gc.Cgen(nl, &nt)
var n2 gc.Node
mgen(nr, &n2, nil)
gc.Mgen(nr, &n2, nil)
var n1 gc.Node
regalloc(&n1, nl.Type, res)
gc.Regalloc(&n1, nl.Type, res)
gmove(&nt, &n1)
gins(a, &n2, &n1)
gmove(&n1, res)
regfree(&n1)
mfree(&n2)
gc.Regfree(&n1)
gc.Mfree(&n2)
} else {
var n2 gc.Node
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
gc.Regalloc(&n2, nr.Type, res)
gc.Cgen(nr, &n2)
var n1 gc.Node
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
gc.Regalloc(&n1, nl.Type, nil)
gc.Cgen(nl, &n1)
gins(a, &n2, &n1)
regfree(&n2)
gc.Regfree(&n2)
gmove(&n1, res)
regfree(&n1)
gc.Regfree(&n1)
}
return
@ -1065,25 +765,25 @@ func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
if nl.Addable == 0 {
var n1 gc.Node
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
gc.Cgen(nl, &n1)
nl = &n1
}
if nr.Addable == 0 {
var tmp gc.Node
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
gc.Cgen(nr, &tmp)
nr = &tmp
}
var n2 gc.Node
regalloc(&n2, nr.Type, nil)
gc.Regalloc(&n2, nr.Type, nil)
gmove(nr, &n2)
nr = &n2
if nl.Op != gc.OREGISTER {
var n3 gc.Node
regalloc(&n3, nl.Type, nil)
gc.Regalloc(&n3, nl.Type, nil)
gmove(nl, &n3)
nl = &n3
}
@ -1099,9 +799,9 @@ func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
gins(foptoas(gc.OCMP, nr.Type, 0), nl, nr)
if nl.Op == gc.OREGISTER {
regfree(nl)
gc.Regfree(nl)
}
regfree(nr)
gc.Regfree(nr)
goto ret
} else {
goto x87
@ -1124,12 +824,12 @@ x87:
et = gc.Simsimtype(nr.Type)
if et == gc.TFLOAT64 {
if nl.Ullman > nr.Ullman {
cgen(nl, &tmp)
cgen(nr, &tmp)
gc.Cgen(nl, &tmp)
gc.Cgen(nr, &tmp)
gins(x86.AFXCHD, &tmp, &n2)
} else {
cgen(nr, &tmp)
cgen(nl, &tmp)
gc.Cgen(nr, &tmp)
gc.Cgen(nl, &tmp)
}
gins(x86.AFUCOMIP, &tmp, &n2)
@ -1146,8 +846,8 @@ x87:
var t2 gc.Node
gc.Tempname(&t2, gc.Types[gc.TFLOAT32])
cgen(nr, &t1)
cgen(nl, &t2)
gc.Cgen(nr, &t1)
gc.Cgen(nl, &t2)
gmove(&t2, &tmp)
gins(x86.AFCOMFP, &t1, &tmp)
gins(x86.AFSTSW, nil, &ax)
@ -1230,3 +930,17 @@ func expandchecks(firstp *obj.Prog) {
p2.To.Offset = 0
}
}
// addr += index*width if possible.
func addindex(index *gc.Node, width int64, addr *gc.Node) bool {
switch width {
case 1, 2, 4, 8:
p1 := gins(x86.ALEAL, index, addr)
p1.From.Type = obj.TYPE_MEM
p1.From.Scale = int16(width)
p1.From.Index = p1.From.Reg
p1.From.Reg = p1.To.Reg
return true
}
return false
}

View file

@ -550,184 +550,6 @@ var resvd = []int{
x86.REG_CX, // for shift
x86.REG_DX, // for divide
x86.REG_SP, // for stack
x86.REG_BL, // because REG_BX can be allocated
x86.REG_BH,
}
func ginit() {
for i := 0; i < len(reg); i++ {
reg[i] = 1
}
for i := x86.REG_AX; i <= x86.REG_DI; i++ {
reg[i] = 0
}
for i := x86.REG_X0; i <= x86.REG_X7; i++ {
reg[i] = 0
}
for i := 0; i < len(resvd); i++ {
reg[resvd[i]]++
}
}
var regpc [x86.MAXREG]uint32
func gclean() {
for i := 0; i < len(resvd); i++ {
reg[resvd[i]]--
}
for i := x86.REG_AX; i <= x86.REG_DI; i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated at %x", obj.Rconv(i), regpc[i])
}
}
for i := x86.REG_X0; i <= x86.REG_X7; i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated\n", obj.Rconv(i))
}
}
}
func anyregalloc() bool {
var j int
for i := x86.REG_AX; i <= x86.REG_DI; i++ {
if reg[i] == 0 {
goto ok
}
for j = 0; j < len(resvd); j++ {
if resvd[j] == i {
goto ok
}
}
return true
ok:
}
for i := x86.REG_X0; i <= x86.REG_X7; i++ {
if reg[i] != 0 {
return true
}
}
return false
}
/*
* allocate register of type t, leave in n.
* if o != N, o is desired fixed register.
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
if t == nil {
gc.Fatal("regalloc: t nil")
}
et := int(gc.Simtype[t.Etype])
var i int
switch et {
case gc.TINT64,
gc.TUINT64:
gc.Fatal("regalloc64")
case gc.TINT8,
gc.TUINT8,
gc.TINT16,
gc.TUINT16,
gc.TINT32,
gc.TUINT32,
gc.TPTR32,
gc.TPTR64,
gc.TBOOL:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= x86.REG_AX && i <= x86.REG_DI {
goto out
}
}
for i = x86.REG_AX; i <= x86.REG_DI; i++ {
if reg[i] == 0 {
goto out
}
}
fmt.Printf("registers allocated at\n")
for i := x86.REG_AX; i <= x86.REG_DI; i++ {
fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i])
}
gc.Fatal("out of fixed registers")
goto err
case gc.TFLOAT32,
gc.TFLOAT64:
if gc.Use_sse == 0 {
i = x86.REG_F0
goto out
}
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= x86.REG_X0 && i <= x86.REG_X7 {
goto out
}
}
for i = x86.REG_X0; i <= x86.REG_X7; i++ {
if reg[i] == 0 {
goto out
}
}
fmt.Printf("registers allocated at\n")
for i := x86.REG_X0; i <= x86.REG_X7; i++ {
fmt.Printf("\t%v\t%#x\n", obj.Rconv(i), regpc[i])
}
gc.Fatal("out of floating registers")
}
gc.Yyerror("regalloc: unknown type %v", gc.Tconv(t, 0))
err:
gc.Nodreg(n, t, 0)
return
out:
if i == x86.REG_SP {
fmt.Printf("alloc SP\n")
}
if reg[i] == 0 {
regpc[i] = uint32(obj.Getcallerpc(&n))
if i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP {
gc.Dump("regalloc-o", o)
gc.Fatal("regalloc %v", obj.Rconv(i))
}
}
reg[i]++
gc.Nodreg(n, t, i)
}
func regfree(n *gc.Node) {
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
i := int(n.Val.U.Reg)
if i == x86.REG_SP {
return
}
if i < 0 || i >= len(reg) {
gc.Fatal("regfree: reg out of range")
}
if reg[i] <= 0 {
gc.Fatal("regfree: reg not allocated")
}
reg[i]--
if reg[i] == 0 && (i == x86.REG_AX || i == x86.REG_CX || i == x86.REG_DX || i == x86.REG_SP) {
gc.Fatal("regfree %v", obj.Rconv(i))
}
}
/*
@ -743,6 +565,16 @@ func gconreg(as int, c int64, reg int) {
gins(as, &n1, &n2)
}
/*
* generate
* as $c, n
*/
func ginscon(as int, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
gins(as, &n1, n2)
}
/*
* swap node contents
*/
@ -790,7 +622,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
default:
var n1 gc.Node
if !dotaddable(n, &n1) {
igen(n, &n1, nil)
gc.Igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
@ -799,7 +631,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
case gc.ONAME:
if n.Class == gc.PPARAMREF {
var n1 gc.Node
cgen(n.Heapaddr, &n1)
gc.Cgen(n.Heapaddr, &n1)
sclean[nsclean-1] = n1
n = &n1
}
@ -839,7 +671,7 @@ func splitclean() {
}
nsclean--
if sclean[nsclean].Op != gc.OEMPTY {
regfree(&sclean[nsclean])
gc.Regfree(&sclean[nsclean])
}
}
@ -1139,31 +971,31 @@ func gmove(f *gc.Node, t *gc.Node) {
// requires register source
rsrc:
regalloc(&r1, f.Type, t)
gc.Regalloc(&r1, f.Type, t)
gmove(f, &r1)
gins(a, &r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
// requires register destination
rdst:
{
regalloc(&r1, t.Type, t)
gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
regalloc(&r1, cvt, t)
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
@ -1408,11 +1240,11 @@ func floatmove(f *gc.Node, t *gc.Node) {
// requires register intermediate
hard:
regalloc(&r1, cvt, t)
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
// requires memory intermediate
@ -1652,11 +1484,11 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
// requires register intermediate
hard:
regalloc(&r1, cvt, t)
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
// requires memory intermediate
@ -1774,11 +1606,11 @@ func floatmove_sse(f *gc.Node, t *gc.Node) {
// requires register intermediate
hard:
regalloc(&r1, cvt, t)
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
// requires memory intermediate
@ -1791,11 +1623,11 @@ hardmem:
// requires register destination
rdst:
regalloc(&r1, t.Type, t)
gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
@ -1830,6 +1662,15 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
gc.Fatal("gins MOVSD into F0")
}
if as == x86.AMOVL && f != nil && f.Op == gc.OADDR && f.Left.Op == gc.ONAME && f.Left.Class != gc.PEXTERN && f.Left.Class != gc.PFUNC {
// Turn MOVL $xxx(FP/SP) into LEAL xxx.
// These should be equivalent but most of the backend
// only expects to see LEAL, because that's what we had
// historically generated. Various hidden assumptions are baked in by now.
as = x86.ALEAL
f = f.Left
}
switch as {
case x86.AMOVB,
x86.AMOVW,
@ -1877,6 +1718,12 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
return p
}
func ginsnop() {
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT], x86.REG_AX)
gins(x86.AXCHGL, &reg, &reg)
}
func dotaddable(n *gc.Node, n1 *gc.Node) bool {
if n.Op != gc.ODOT {
return false

View file

@ -672,7 +672,7 @@ func copyas(a *obj.Addr, v *obj.Addr) bool {
if regtyp(v) {
return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return true
}
@ -687,7 +687,7 @@ func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if regtyp(v) {
return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if (v.Type == obj.TYPE_MEM || v.Type == obj.TYPE_ADDR) && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return true
}
@ -703,7 +703,7 @@ func copyau(a *obj.Addr, v *obj.Addr) bool {
return true
}
if regtyp(v) {
if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && a.Reg == v.Reg {
return true
}
if a.Index == v.Reg {
@ -732,7 +732,7 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if regtyp(v) {
reg := int(v.Reg)
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && int(a.Reg) == reg {
if (s.Reg == x86.REG_BP) && a.Index != obj.TYPE_NONE {
return 1 /* can't use BP-base with index */
}

File diff suppressed because it is too large Load diff

View file

@ -53,33 +53,38 @@ func main() {
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = ppc64.REGSP
gc.Thearch.REGCTXT = ppc64.REGCTXT
gc.Thearch.REGCALLX = ppc64.REG_R3
gc.Thearch.REGCALLX2 = ppc64.REG_R4
gc.Thearch.REGRETURN = ppc64.REG_R3
gc.Thearch.REGMIN = ppc64.REG_R0
gc.Thearch.REGMAX = ppc64.REG_R31
gc.Thearch.FREGMIN = ppc64.REG_F0
gc.Thearch.FREGMAX = ppc64.REG_F31
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.ReservedRegs = resvd
gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Bgen = bgen
gc.Thearch.Cgen = cgen
gc.Thearch.Cgen_call = cgen_call
gc.Thearch.Cgen_callinter = cgen_callinter
gc.Thearch.Cgen_ret = cgen_ret
gc.Thearch.Cgen_hmul = cgen_hmul
gc.Thearch.Cgen_shift = cgen_shift
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
gc.Thearch.Dodiv = dodiv
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Gclean = gclean
gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
gc.Thearch.Ginscall = ginscall
gc.Thearch.Ginscon = ginscon
gc.Thearch.Ginsnop = ginsnop
gc.Thearch.Gmove = gmove
gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
gc.Thearch.Regalloc = regalloc
gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Stackcopy = stackcopy
gc.Thearch.Sudoaddable = sudoaddable
gc.Thearch.Sudoclean = sudoclean
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB

View file

@ -1,28 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "cmd/internal/obj/ppc64"
import "cmd/internal/gc"
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
var reg [ppc64.NREG + ppc64.NFREG]uint8
var panicdiv *gc.Node
/*
* cgen.c
*/
/*
* list.c
*/
/*
* reg.c
*/

View file

@ -112,333 +112,13 @@ func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int
return q
}
/*
* generate: BL reg, f
* where both reg and f are registers.
* On power, f must be moved to CTR first.
*/
func ginsBL(reg *gc.Node, f *gc.Node) {
p := gins(ppc64.AMOVD, f, nil)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_CTR
p = gins(ppc64.ABL, reg, nil)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_CTR
func ginsnop() {
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
gins(ppc64.AOR, &reg, &reg)
}
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
if f.Type != nil {
extra := int32(0)
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the CALL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction byte before the return PC.
// To avoid that being an unrelated instruction,
// insert a ppc64 NOP that we will have the right line number.
// The ppc64 NOP is really or r0, r0, r0; use that description
// because the NOP pseudo-instruction would be removed by
// the linker.
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT], ppc64.REG_R0)
gins(ppc64.AOR, &reg, &reg)
}
p := gins(ppc64.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
}
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.Tptr], ppc64.REGCTXT)
var r1 gc.Node
gc.Nodreg(&r1, gc.Types[gc.Tptr], ppc64.REG_R3)
gmove(f, &reg)
reg.Op = gc.OINDREG
gmove(&reg, &r1)
reg.Op = gc.OREGISTER
ginsBL(&reg, &r1)
case 3: // normal call of c function pointer
ginsBL(nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
var con gc.Node
gc.Nodconst(&con, gc.Types[gc.TINT64], int64(gc.Argsize(f.Type)))
var reg gc.Node
gc.Nodreg(&reg, gc.Types[gc.TINT64], ppc64.REG_R3)
var reg2 gc.Node
gc.Nodreg(&reg2, gc.Types[gc.TINT64], ppc64.REG_R4)
gmove(f, &reg)
gmove(&con, &reg2)
p := gins(ppc64.AMOVW, &reg2, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = ppc64.REGSP
p.To.Offset = 8
p = gins(ppc64.AMOVD, &reg, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = ppc64.REGSP
p.To.Offset = 16
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
gc.Nodreg(&reg, gc.Types[gc.TINT64], ppc64.REG_R3)
p := gins(ppc64.ACMP, &reg, nil)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGZERO
p = gc.Gbranch(ppc64.ABEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
}
}
/*
* n is call to interface method.
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i := n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
f := i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
i = i.Left // interface
if i.Addable == 0 {
var tmpi gc.Node
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
}
gc.Genlist(n.List) // assign the args
// i is now addable, prepare an indirected
// register to hold its address.
var nodi gc.Node
igen(i, &nodi, res) // REG = &inter
var nodsp gc.Node
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], ppc64.REGSP)
nodsp.Xoffset = int64(gc.Widthptr)
if proc != 0 {
nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
}
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset += int64(gc.Widthptr)
cgen(&nodi, &nodsp) // {8 or 24}(SP) = 8(REG) -- i.data
var nodo gc.Node
regalloc(&nodo, gc.Types[gc.Tptr], res)
nodi.Type = gc.Types[gc.Tptr]
nodi.Xoffset -= int64(gc.Widthptr)
cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
regfree(&nodi)
var nodr gc.Node
regalloc(&nodr, gc.Types[gc.Tptr], &nodo)
if n.Left.Xoffset == gc.BADWIDTH {
gc.Fatal("cgen_callinter: badwidth")
}
gc.Cgen_checknil(&nodo) // in case offset is huge
nodo.Op = gc.OINDREG
nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
if proc == 0 {
// plain call: use direct c function pointer - more efficient
cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
proc = 3
} else {
// go/defer. generate go func value.
p := gins(ppc64.AMOVD, &nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
p.From.Type = obj.TYPE_ADDR
}
nodr.Type = n.Left.Type
ginscall(&nodr, proc)
regfree(&nodr)
regfree(&nodo)
}
/*
* generate function call;
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
if n == nil {
return
}
var afun gc.Node
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
gc.Tempname(&afun, gc.Types[gc.Tptr])
cgen(n.Left, &afun)
}
gc.Genlist(n.List) // assign the args
t := n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
var nod gc.Node
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
return
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
}
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
var nod gc.Node
nod.Op = gc.OINDREG
nod.Val.U.Reg = ppc64.REGSP
nod.Addable = 1
nod.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved LR at 0(R1)
nod.Type = fp.Type
gc.Cgen_as(res, &nod)
}
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
t := n.Left.Type
if gc.Isptr[t.Etype] {
t = t.Type
}
var flist gc.Iter
fp := gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
var nod1 gc.Node
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = ppc64.REGSP
nod1.Addable = 1
nod1.Xoffset = fp.Width + int64(gc.Widthptr) // +widthptr: saved lr at 0(SP)
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
var nod2 gc.Node
regalloc(&nod2, gc.Types[gc.Tptr], res)
agen(&nod1, &nod2)
gins(ppc64.AMOVD, &nod2, res)
regfree(&nod2)
} else {
agen(&nod1, res)
}
}
/*
* generate return.
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
if n != nil {
gc.Genlist(n.List) // copy out args
}
if gc.Hasdefer != 0 {
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
p := gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Name = obj.NAME_EXTERN
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(n.Left.Sym)
}
}
var panicdiv *gc.Node
/*
* generate division.
@ -480,15 +160,15 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
a := optoas(gc.ODIV, t)
var tl gc.Node
regalloc(&tl, t0, nil)
gc.Regalloc(&tl, t0, nil)
var tr gc.Node
regalloc(&tr, t0, nil)
gc.Regalloc(&tr, t0, nil)
if nl.Ullman >= nr.Ullman {
cgen(nl, &tl)
cgen(nr, &tr)
gc.Cgen(nl, &tl)
gc.Cgen(nr, &tr)
} else {
cgen(nr, &tr)
cgen(nl, &tl)
gc.Cgen(nr, &tr)
gc.Cgen(nl, &tl)
}
if t != t0 {
@ -511,7 +191,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if panicdiv == nil {
panicdiv = gc.Sysfunc("panicdivide")
}
ginscall(panicdiv, -1)
gc.Ginscall(panicdiv, -1)
gc.Patch(p1, gc.Pc)
var p2 *obj.Prog
@ -539,12 +219,12 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
p1 = gins(a, &tr, &tl)
if op == gc.ODIV {
regfree(&tr)
gc.Regfree(&tr)
gmove(&tl, res)
} else {
// A%B = A-(A/B*B)
var tm gc.Node
regalloc(&tm, t, nil)
gc.Regalloc(&tm, t, nil)
// patch div to use the 3 register form
// TODO(minux): add gins3?
@ -552,32 +232,18 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
p1.To.Reg = tm.Val.U.Reg
gins(optoas(gc.OMUL, t), &tr, &tm)
regfree(&tr)
gc.Regfree(&tr)
gins(optoas(gc.OSUB, t), &tm, &tl)
regfree(&tm)
gc.Regfree(&tm)
gmove(&tl, res)
}
regfree(&tl)
gc.Regfree(&tl)
if check != 0 {
gc.Patch(p2, gc.Pc)
}
}
/*
* generate division according to op, one of:
* res = nl / nr
* res = nl % nr
*/
func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
// TODO(minux): enable division by magic multiply (also need to fix longmod below)
//if(nr->op != OLITERAL)
// division and mod using (slow) hardware instruction
dodiv(op, nl, nr, res)
return
}
/*
* generate high multiply:
* res = (nl*nr) >> width
@ -593,9 +259,9 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
t := (*gc.Type)(nl.Type)
w := int(int(t.Width * 8))
var n1 gc.Node
cgenr(nl, &n1, res)
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
cgenr(nr, &n2, nil)
gc.Cgenr(nr, &n2, nil)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16,
@ -625,9 +291,9 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
}
cgen(&n1, res)
regfree(&n1)
regfree(&n2)
gc.Cgen(&n1, res)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
/*
@ -640,8 +306,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
if nr.Op == gc.OLITERAL {
var n1 gc.Node
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(uint64(gc.Mpgetfix(nr.Val.U.Xval)))
if sc >= uint64(nl.Type.Width*8) {
// large shift gets 2 shifts by width-1
@ -654,21 +320,21 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gins(a, nr, &n1)
}
gmove(&n1, res)
regfree(&n1)
gc.Regfree(&n1)
return
}
if nl.Ullman >= gc.UINF {
var n4 gc.Node
gc.Tempname(&n4, nl.Type)
cgen(nl, &n4)
gc.Cgen(nl, &n4)
nl = &n4
}
if nr.Ullman >= gc.UINF {
var n5 gc.Node
gc.Tempname(&n5, nr.Type)
cgen(nr, &n5)
gc.Cgen(nr, &n5)
nr = &n5
}
@ -682,24 +348,24 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
var n1 gc.Node
regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
var n3 gc.Node
regalloc(&n3, tcount, &n1) // to clear high bits of CX
gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
var n2 gc.Node
regalloc(&n2, nl.Type, res)
gc.Regalloc(&n2, nl.Type, res)
if nl.Ullman >= nr.Ullman {
cgen(nl, &n2)
cgen(nr, &n1)
gc.Cgen(nl, &n2)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
} else {
cgen(nr, &n1)
gc.Cgen(nr, &n1)
gmove(&n1, &n3)
cgen(nl, &n2)
gc.Cgen(nl, &n2)
}
regfree(&n3)
gc.Regfree(&n3)
// test and fix up large shifts
if !bounded {
@ -721,8 +387,8 @@ func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gmove(&n2, res)
regfree(&n1)
regfree(&n2)
gc.Regfree(&n1)
gc.Regfree(&n2)
}
func clearfat(nl *gc.Node) {
@ -741,16 +407,16 @@ func clearfat(nl *gc.Node) {
c := uint64(w % 8) // bytes
q := uint64(w / 8) // dwords
if reg[ppc64.REGRT1-ppc64.REG_R0] > 0 {
gc.Fatal("R%d in use during clearfat", ppc64.REGRT1-ppc64.REG_R0)
if gc.Reginuse(ppc64.REGRT1) {
gc.Fatal("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
}
var r0 gc.Node
gc.Nodreg(&r0, gc.Types[gc.TUINT64], ppc64.REGZERO)
var dst gc.Node
gc.Nodreg(&dst, gc.Types[gc.Tptr], ppc64.REGRT1)
reg[ppc64.REGRT1-ppc64.REG_R0]++
agen(nl, &dst)
gc.Regrealloc(&dst)
gc.Agen(nl, &dst)
var boff uint64
if q > 128 {
@ -759,7 +425,7 @@ func clearfat(nl *gc.Node) {
p.From.Offset = 8
var end gc.Node
regalloc(&end, gc.Types[gc.Tptr], nil)
gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
p = gins(ppc64.AMOVD, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q * 8)
@ -772,7 +438,7 @@ func clearfat(nl *gc.Node) {
p = gins(ppc64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
regfree(&end)
gc.Regfree(&end)
// The loop leaves R3 on the last zeroed dword
boff = 8
@ -807,7 +473,7 @@ func clearfat(nl *gc.Node) {
p.To.Offset = int64(t + boff)
}
reg[ppc64.REGRT1-ppc64.REG_R0]--
gc.Regfree(&dst)
}
// Called after regopt and peep have run.

View file

@ -62,171 +62,6 @@ var resvd = []int{
ppc64.FREGTWO,
}
func ginit() {
for i := 0; i < len(reg); i++ {
reg[i] = 1
}
for i := 0; i < ppc64.NREG+ppc64.NFREG; i++ {
reg[i] = 0
}
for i := 0; i < len(resvd); i++ {
reg[resvd[i]-ppc64.REG_R0]++
}
}
var regpc [len(reg)]uint32
func gclean() {
for i := int(0); i < len(resvd); i++ {
reg[resvd[i]-ppc64.REG_R0]--
}
for i := int(0); i < len(reg); i++ {
if reg[i] != 0 {
gc.Yyerror("reg %v left allocated, %p\n", obj.Rconv(i+ppc64.REG_R0), regpc[i])
}
}
}
func anyregalloc() bool {
var j int
for i := int(0); i < len(reg); i++ {
if reg[i] == 0 {
goto ok
}
for j = 0; j < len(resvd); j++ {
if resvd[j] == i {
goto ok
}
}
return true
ok:
}
return false
}
/*
* allocate register of type t, leave in n.
* if o != N, o is desired fixed register.
* caller must regfree(n).
*/
func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
if t == nil {
gc.Fatal("regalloc: t nil")
}
et := int(int(gc.Simtype[t.Etype]))
if gc.Debug['r'] != 0 {
fixfree := int(0)
fltfree := int(0)
for i := int(ppc64.REG_R0); i < ppc64.REG_F31; i++ {
if reg[i-ppc64.REG_R0] == 0 {
if i < ppc64.REG_F0 {
fixfree++
} else {
fltfree++
}
}
}
fmt.Printf("regalloc fix %d flt %d free\n", fixfree, fltfree)
}
var i int
switch et {
case gc.TINT8,
gc.TUINT8,
gc.TINT16,
gc.TUINT16,
gc.TINT32,
gc.TUINT32,
gc.TINT64,
gc.TUINT64,
gc.TPTR32,
gc.TPTR64,
gc.TBOOL:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= ppc64.REGMIN && i <= ppc64.REGMAX {
goto out
}
}
for i = ppc64.REGMIN; i <= ppc64.REGMAX; i++ {
if reg[i-ppc64.REG_R0] == 0 {
regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
goto out
}
}
gc.Flusherrors()
for i := int(ppc64.REG_R0); i < ppc64.REG_R0+ppc64.NREG; i++ {
fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
}
gc.Fatal("out of fixed registers")
case gc.TFLOAT32,
gc.TFLOAT64:
if o != nil && o.Op == gc.OREGISTER {
i = int(o.Val.U.Reg)
if i >= ppc64.FREGMIN && i <= ppc64.FREGMAX {
goto out
}
}
for i = ppc64.FREGMIN; i <= ppc64.FREGMAX; i++ {
if reg[i-ppc64.REG_R0] == 0 {
regpc[i-ppc64.REG_R0] = uint32(obj.Getcallerpc(&n))
goto out
}
}
gc.Flusherrors()
for i := int(ppc64.REG_F0); i < ppc64.REG_F0+ppc64.NREG; i++ {
fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
}
gc.Fatal("out of floating registers")
case gc.TCOMPLEX64,
gc.TCOMPLEX128:
gc.Tempname(n, t)
return
}
gc.Fatal("regalloc: unknown type %v", gc.Tconv(t, 0))
return
out:
reg[i-ppc64.REG_R0]++
gc.Nodreg(n, t, i)
}
func regfree(n *gc.Node) {
if n.Op == gc.ONAME {
return
}
if n.Op != gc.OREGISTER && n.Op != gc.OINDREG {
gc.Fatal("regfree: not a register")
}
i := int(int(n.Val.U.Reg) - ppc64.REG_R0)
if i == ppc64.REGSP-ppc64.REG_R0 {
return
}
if i < 0 || i >= len(reg) {
gc.Fatal("regfree: reg out of range")
}
if reg[i] <= 0 {
gc.Fatal("regfree: reg not allocated")
}
reg[i]--
if reg[i] == 0 {
regpc[i] = 0
}
}
/*
* generate
* as $c, n
@ -236,19 +71,19 @@ func ginscon(as int, c int64, n2 *gc.Node) {
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) {
if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
// cannot have more than 16-bit of immediate in ADD, etc.
// instead, MOV into register first.
var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(ppc64.AMOVD, &n1, &ntmp)
gins(as, &ntmp, n2)
regfree(&ntmp)
rawgins(ppc64.AMOVD, &n1, &ntmp)
rawgins(as, &ntmp, n2)
gc.Regfree(&ntmp)
return
}
gins(as, &n1, n2)
rawgins(as, &n1, n2)
}
/*
@ -266,24 +101,24 @@ func ginscon2(as int, n2 *gc.Node, c int64) {
case ppc64.ACMP:
if -ppc64.BIG <= c && c <= ppc64.BIG {
gins(as, n2, &n1)
rawgins(as, n2, &n1)
return
}
case ppc64.ACMPU:
if 0 <= c && c <= 2*ppc64.BIG {
gins(as, n2, &n1)
rawgins(as, n2, &n1)
return
}
}
// MOV n1 into register first
var ntmp gc.Node
regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
gins(ppc64.AMOVD, &n1, &ntmp)
gins(as, n2, &ntmp)
regfree(&ntmp)
rawgins(ppc64.AMOVD, &n1, &ntmp)
rawgins(as, n2, &ntmp)
gc.Regfree(&ntmp)
}
/*
@ -351,10 +186,10 @@ func gmove(f *gc.Node, t *gc.Node) {
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TINT64], &f.Val)
var r1 gc.Node
regalloc(&r1, con.Type, t)
gc.Regalloc(&r1, con.Type, t)
gins(ppc64.AMOVD, &con, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
case gc.TUINT32,
@ -363,10 +198,10 @@ func gmove(f *gc.Node, t *gc.Node) {
var con gc.Node
gc.Convconst(&con, gc.Types[gc.TUINT64], &f.Val)
var r1 gc.Node
regalloc(&r1, con.Type, t)
gc.Regalloc(&r1, con.Type, t)
gins(ppc64.AMOVD, &con, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
@ -546,21 +381,21 @@ func gmove(f *gc.Node, t *gc.Node) {
bignodes()
var r1 gc.Node
regalloc(&r1, gc.Types[ft], f)
gc.Regalloc(&r1, gc.Types[ft], f)
gmove(f, &r1)
if tt == gc.TUINT64 {
regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gmove(&bigf, &r2)
gins(ppc64.AFCMPU, &r1, &r2)
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
gins(ppc64.AFSUB, &r2, &r1)
gc.Patch(p1, gc.Pc)
regfree(&r2)
gc.Regfree(&r2)
}
regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
var r3 gc.Node
regalloc(&r3, gc.Types[gc.TINT64], t)
gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
gins(ppc64.AFCTIDZ, &r1, &r2)
p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
p1.To.Type = obj.TYPE_MEM
@ -570,8 +405,8 @@ func gmove(f *gc.Node, t *gc.Node) {
p1.From.Type = obj.TYPE_MEM
p1.From.Reg = ppc64.REGSP
p1.From.Offset = -8
regfree(&r2)
regfree(&r1)
gc.Regfree(&r2)
gc.Regfree(&r1)
if tt == gc.TUINT64 {
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
@ -581,7 +416,7 @@ func gmove(f *gc.Node, t *gc.Node) {
}
gmove(&r3, t)
regfree(&r3)
gc.Regfree(&r3)
return
//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
@ -611,7 +446,7 @@ func gmove(f *gc.Node, t *gc.Node) {
bignodes()
var r1 gc.Node
regalloc(&r1, gc.Types[gc.TINT64], nil)
gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
gmove(f, &r1)
if ft == gc.TUINT64 {
gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
@ -624,7 +459,7 @@ func gmove(f *gc.Node, t *gc.Node) {
gc.Patch(p1, gc.Pc)
}
regalloc(&r2, gc.Types[gc.TFLOAT64], t)
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
@ -634,7 +469,7 @@ func gmove(f *gc.Node, t *gc.Node) {
p1.From.Reg = ppc64.REGSP
p1.From.Offset = -8
gins(ppc64.AFCFID, &r2, &r2)
regfree(&r1)
gc.Regfree(&r1)
if ft == gc.TUINT64 {
p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
@ -643,7 +478,7 @@ func gmove(f *gc.Node, t *gc.Node) {
}
gmove(&r2, t)
regfree(&r2)
gc.Regfree(&r2)
return
/*
@ -670,29 +505,61 @@ func gmove(f *gc.Node, t *gc.Node) {
// requires register destination
rdst:
{
regalloc(&r1, t.Type, t)
gc.Regalloc(&r1, t.Type, t)
gins(a, f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
// requires register intermediate
hard:
regalloc(&r1, cvt, t)
gc.Regalloc(&r1, cvt, t)
gmove(f, &r1)
gmove(&r1, t)
regfree(&r1)
gc.Regfree(&r1)
return
}
func intLiteral(n *gc.Node) (x int64, ok bool) {
if n == nil || n.Op != gc.OLITERAL {
return
}
switch n.Val.Ctype {
case gc.CTINT, gc.CTRUNE:
return gc.Mpgetfix(n.Val.U.Xval), true
case gc.CTBOOL:
return int64(n.Val.U.Bval), true
}
return
}
// gins is called by the front end.
// It synthesizes some multiple-instruction sequences
// so the front end can stay simpler.
func gins(as int, f, t *gc.Node) *obj.Prog {
if as >= obj.A_ARCHSPECIFIC {
if x, ok := intLiteral(f); ok {
ginscon(as, x, t)
return nil // caller must not use
}
}
if as == ppc64.ACMP || as == ppc64.ACMPU {
if x, ok := intLiteral(t); ok {
ginscon2(as, f, x)
return nil // caller must not use
}
}
return rawgins(as, f, t)
}
/*
* generate one instruction:
* as f, t
*/
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
@ -700,6 +567,41 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
gc.Naddr(&p.From, f)
gc.Naddr(&p.To, t)
switch as {
case obj.ACALL:
if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
// Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
pp := gc.Prog(as)
pp.From = p.From
pp.To.Type = obj.TYPE_REG
pp.To.Reg = ppc64.REG_CTR
p.As = ppc64.AMOVD
p.From = p.To
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REG_CTR
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
fmt.Printf("%v\n", pp)
}
return pp
}
// Bad things the front end has done to us. Crash to find call stack.
case ppc64.AAND, ppc64.AMULLD:
if p.From.Type == obj.TYPE_CONST {
gc.Debug['h'] = 1
gc.Fatal("bad inst: %v", p)
}
case ppc64.ACMP, ppc64.ACMPU:
if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
gc.Debug['h'] = 1
gc.Fatal("bad inst: %v", p)
}
}
if gc.Debug['g'] != 0 {
fmt.Printf("%v\n", p)
}
@ -831,10 +733,9 @@ func optoas(op int, t *gc.Type) int {
// ACMPU
gc.OLE<<16 | gc.TUINT16,
gc.OLE<<16 | gc.TUINT32,
gc.OLE<<16 | gc.TUINT64,
gc.OLE<<16 | gc.TFLOAT32,
// AFCMPU
gc.OLE<<16 | gc.TFLOAT64:
gc.OLE<<16 | gc.TUINT64:
// No OLE for floats, because it mishandles NaN.
// Front end must reverse comparison or use OLT and OEQ together.
a = ppc64.ABLE
case gc.OGT<<16 | gc.TINT8,
@ -856,9 +757,9 @@ func optoas(op int, t *gc.Type) int {
gc.OGE<<16 | gc.TUINT8,
gc.OGE<<16 | gc.TUINT16,
gc.OGE<<16 | gc.TUINT32,
gc.OGE<<16 | gc.TUINT64,
gc.OGE<<16 | gc.TFLOAT32,
gc.OGE<<16 | gc.TFLOAT64:
gc.OGE<<16 | gc.TUINT64:
// No OGE for floats, because it mishandles NaN.
// Front end must reverse comparison or use OLT and OEQ together.
a = ppc64.ABGE
case gc.OCMP<<16 | gc.TBOOL,

2567
src/cmd/internal/gc/cgen.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -25,21 +25,21 @@ func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Pro
if nr != nil {
if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
Cgen(nl, &tnl)
nl = &tnl
}
if nr.Addable == 0 {
var tnr Node
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
Cgen(nr, &tnr)
nr = &tnr
}
}
if nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
Cgen(nl, &tnl)
nl = &tnl
}
@ -78,7 +78,7 @@ func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Pro
true_ = !true_
}
Thearch.Bgen(&na, true_, likely, to)
Bgen(&na, true_, likely, to)
}
// break addable nc-complex into nr-real and ni-imaginary
@ -111,7 +111,7 @@ func minus(nl *Node, res *Node) {
ra.Op = OMINUS
ra.Left = nl
ra.Type = nl.Type
Thearch.Cgen(&ra, res)
Cgen(&ra, res)
}
// build and execute tree
@ -150,14 +150,14 @@ func complexadd(op int, nl *Node, nr *Node, res *Node) {
ra.Left = &n1
ra.Right = &n3
ra.Type = n1.Type
Thearch.Cgen(&ra, &n5)
Cgen(&ra, &n5)
ra = Node{}
ra.Op = uint8(op)
ra.Left = &n2
ra.Right = &n4
ra.Type = n2.Type
Thearch.Cgen(&ra, &n6)
Cgen(&ra, &n6)
}
// build and execute tree
@ -197,7 +197,7 @@ func complexmul(nl *Node, nr *Node, res *Node) {
ra.Left = &rm1
ra.Right = &rm2
ra.Type = rm1.Type
Thearch.Cgen(&ra, &tmp)
Cgen(&ra, &tmp)
// imag part
rm1 = Node{}
@ -218,10 +218,10 @@ func complexmul(nl *Node, nr *Node, res *Node) {
ra.Left = &rm1
ra.Right = &rm2
ra.Type = rm1.Type
Thearch.Cgen(&ra, &n6)
Cgen(&ra, &n6)
// tmp ->real part
Thearch.Cgen(&tmp, &n5)
Cgen(&tmp, &n5)
}
func nodfconst(n *Node, t *Type, fval *Mpflt) {
@ -322,8 +322,8 @@ func Complexmove(f *Node, t *Node) {
var n3 Node
subnode(&n3, &n4, t)
Thearch.Cgen(&n1, &n3)
Thearch.Cgen(&n2, &n4)
Cgen(&n1, &n3)
Cgen(&n2, &n4)
}
}
@ -346,9 +346,9 @@ func Complexgen(n *Node, res *Node) {
subnode(&n1, &n2, res)
var tmp Node
Tempname(&tmp, n1.Type)
Thearch.Cgen(n.Left, &tmp)
Thearch.Cgen(n.Right, &n2)
Thearch.Cgen(&tmp, &n1)
Cgen(n.Left, &tmp)
Cgen(n.Right, &n2)
Cgen(&tmp, &n1)
return
}
@ -366,11 +366,11 @@ func Complexgen(n *Node, res *Node) {
var n2 Node
subnode(&n1, &n2, nl)
if n.Op == OREAL {
Thearch.Cgen(&n1, res)
Cgen(&n1, res)
return
}
Thearch.Cgen(&n2, res)
Cgen(&n2, res)
return
}
@ -394,9 +394,9 @@ func Complexgen(n *Node, res *Node) {
if res.Addable == 0 {
var n1 Node
Thearch.Igen(res, &n1, nil)
Thearch.Cgen(n, &n1)
Thearch.Regfree(&n1)
Igen(res, &n1, nil)
Cgen(n, &n1)
Regfree(&n1)
return
}
@ -419,10 +419,10 @@ func Complexgen(n *Node, res *Node) {
OCALLMETH,
OCALLINTER:
var n1 Node
Thearch.Igen(n, &n1, res)
Igen(n, &n1, res)
Complexmove(&n1, res)
Thearch.Regfree(&n1)
Regfree(&n1)
return
case OCONV,
@ -447,21 +447,21 @@ func Complexgen(n *Node, res *Node) {
if nr != nil {
if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
Cgen(nl, &tnl)
nl = &tnl
}
if nr.Addable == 0 {
var tnr Node
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
Cgen(nr, &tnr)
nr = &tnr
}
}
if nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
Cgen(nl, &tnl)
nl = &tnl
}

View file

@ -235,13 +235,13 @@ func cgen_proc(n *Node, proc int) {
Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
case OCALLMETH:
Cgen_callmeth(n.Left, proc)
cgen_callmeth(n.Left, proc)
case OCALLINTER:
Thearch.Cgen_callinter(n.Left, nil, proc)
cgen_callinter(n.Left, nil, proc)
case OCALLFUNC:
Thearch.Cgen_call(n.Left, proc)
cgen_call(n.Left, proc)
}
}
@ -377,7 +377,7 @@ func Clearslim(n *Node) {
}
ullmancalc(&z)
Thearch.Cgen(&z, n)
Cgen(&z, n)
}
/*
@ -393,17 +393,17 @@ func Cgen_eface(n *Node, res *Node) {
*/
tmp := temp(Types[Tptr])
Thearch.Cgen(n.Right, tmp)
Cgen(n.Right, tmp)
Gvardef(res)
dst := *res
dst.Type = Types[Tptr]
dst.Xoffset += int64(Widthptr)
Thearch.Cgen(tmp, &dst)
Cgen(tmp, &dst)
dst.Xoffset -= int64(Widthptr)
Thearch.Cgen(n.Left, &dst)
Cgen(n.Left, &dst)
}
/*
@ -443,7 +443,7 @@ func Cgen_slice(n *Node, res *Node) {
var src Node
if isnil(n.Left) {
Tempname(&src, n.Left.Type)
Thearch.Cgen(n.Left, &src)
Cgen(n.Left, &src)
} else {
src = *n.Left
}
@ -455,11 +455,11 @@ func Cgen_slice(n *Node, res *Node) {
if !Isptr[n.Left.Type.Etype] {
Fatal("slicearr is supposed to work on pointer: %v\n", Nconv(n, obj.FmtSign))
}
Thearch.Cgen(&src, base)
Cgen(&src, base)
Cgen_checknil(base)
} else {
src.Type = Types[Tptr]
Thearch.Cgen(&src, base)
Cgen(&src, base)
}
// committed to the update
@ -468,10 +468,10 @@ func Cgen_slice(n *Node, res *Node) {
// compute len and cap.
// len = n-i, cap = m-i, and offs = i*width.
// computing offs last lets the multiply overwrite i.
Thearch.Cgen((*Node)(len), tmplen)
Cgen((*Node)(len), tmplen)
if n.Op != OSLICESTR {
Thearch.Cgen(cap, tmpcap)
Cgen(cap, tmpcap)
}
// if new cap != 0 { base += add }
@ -489,11 +489,11 @@ func Cgen_slice(n *Node, res *Node) {
Nodconst(&con, tmpcap.Type, 0)
cmp := Nod(OEQ, tmpcap, &con)
typecheck(&cmp, Erv)
Thearch.Bgen(cmp, true, -1, p2)
Bgen(cmp, true, -1, p2)
add := Nod(OADD, base, offs)
typecheck(&add, Erv)
Thearch.Cgen(add, base)
Cgen(add, base)
Patch(p2, Pc)
}
@ -503,14 +503,14 @@ func Cgen_slice(n *Node, res *Node) {
dst.Xoffset += int64(Array_array)
dst.Type = Types[Tptr]
Thearch.Cgen(base, &dst)
Cgen(base, &dst)
// dst.len = hi [ - lo ]
dst = *res
dst.Xoffset += int64(Array_nel)
dst.Type = Types[Simtype[TUINT]]
Thearch.Cgen(tmplen, &dst)
Cgen(tmplen, &dst)
if n.Op != OSLICESTR {
// dst.cap = cap [ - lo ]
@ -518,7 +518,7 @@ func Cgen_slice(n *Node, res *Node) {
dst.Xoffset += int64(Array_cap)
dst.Type = Types[Simtype[TUINT]]
Thearch.Cgen(tmpcap, &dst)
Cgen(tmpcap, &dst)
}
}
@ -620,7 +620,7 @@ func gen(n *Node) {
lno := setlineno(n)
wasregalloc := Thearch.Anyregalloc()
wasregalloc := Anyregalloc()
if n == nil {
goto ret
@ -760,10 +760,10 @@ func gen(n *Node) {
lab.Continpc = continpc
}
gen(n.Nincr) // contin: incr
Patch(p1, Pc) // test:
Thearch.Bgen(n.Ntest, false, -1, breakpc) // if(!test) goto break
Genlist(n.Nbody) // body
gen(n.Nincr) // contin: incr
Patch(p1, Pc) // test:
Bgen(n.Ntest, false, -1, breakpc) // if(!test) goto break
Genlist(n.Nbody) // body
gjmp(continpc)
Patch(breakpc, Pc) // done:
continpc = scontin
@ -774,15 +774,15 @@ func gen(n *Node) {
}
case OIF:
p1 := gjmp(nil) // goto test
p2 := gjmp(nil) // p2: goto else
Patch(p1, Pc) // test:
Thearch.Bgen(n.Ntest, false, int(-n.Likely), p2) // if(!test) goto p2
Genlist(n.Nbody) // then
p3 := gjmp(nil) // goto done
Patch(p2, Pc) // else:
Genlist(n.Nelse) // else
Patch(p3, Pc) // done:
p1 := gjmp(nil) // goto test
p2 := gjmp(nil) // p2: goto else
Patch(p1, Pc) // test:
Bgen(n.Ntest, false, int(-n.Likely), p2) // if(!test) goto p2
Genlist(n.Nbody) // then
p3 := gjmp(nil) // goto done
Patch(p2, Pc) // else:
Genlist(n.Nelse) // else
Patch(p3, Pc) // done:
case OSWITCH:
sbreak := breakpc
@ -832,13 +832,13 @@ func gen(n *Node) {
Cgen_as(n.Left, n.Right)
case OCALLMETH:
Cgen_callmeth(n, 0)
cgen_callmeth(n, 0)
case OCALLINTER:
Thearch.Cgen_callinter(n, nil, 0)
cgen_callinter(n, nil, 0)
case OCALLFUNC:
Thearch.Cgen_call(n, 0)
cgen_call(n, 0)
case OPROC:
cgen_proc(n, 1)
@ -848,7 +848,7 @@ func gen(n *Node) {
case ORETURN,
ORETJMP:
Thearch.Cgen_ret(n)
cgen_ret(n)
case OCHECKNIL:
Cgen_checknil(n.Left)
@ -858,7 +858,7 @@ func gen(n *Node) {
}
ret:
if Thearch.Anyregalloc() != wasregalloc {
if Anyregalloc() != wasregalloc {
Dump("node", n)
Fatal("registers left allocated")
}
@ -908,10 +908,10 @@ func Cgen_as(nl *Node, nr *Node) {
return
}
Thearch.Cgen(nr, nl)
Cgen(nr, nl)
}
func Cgen_callmeth(n *Node, proc int) {
func cgen_callmeth(n *Node, proc int) {
// generate a rewrite in n2 for the method call
// (p.f)(...) goes to (f)(p,...)
@ -929,7 +929,7 @@ func Cgen_callmeth(n *Node, proc int) {
if n2.Left.Op == ONAME {
n2.Left.Class = PFUNC
}
Thearch.Cgen_call(&n2, proc)
cgen_call(&n2, proc)
}
func checklabels() {
@ -1020,14 +1020,14 @@ func Componentgen(nr *Node, nl *Node) bool {
if nr != nil && !cadable(nr) {
goto no
}
Thearch.Igen(nl, &nodl, nil)
Igen(nl, &nodl, nil)
freel = 1
}
if nr != nil {
nodr = *nr
if !cadable(nr) {
Thearch.Igen(nr, &nodr, nil)
Igen(nr, &nodr, nil)
freer = 1
}
} else {
@ -1035,7 +1035,7 @@ func Componentgen(nr *Node, nl *Node) bool {
var tmp Node
Nodconst(&tmp, nl.Type, 0)
Thearch.Regalloc(&nodr, Types[TUINT], nil)
Regalloc(&nodr, Types[TUINT], nil)
Thearch.Gmove(&tmp, &nodr)
freer = 1
}
@ -1190,19 +1190,19 @@ func Componentgen(nr *Node, nl *Node) bool {
no:
if freer != 0 {
Thearch.Regfree(&nodr)
Regfree(&nodr)
}
if freel != 0 {
Thearch.Regfree(&nodl)
Regfree(&nodl)
}
return false
yes:
if freer != 0 {
Thearch.Regfree(&nodr)
Regfree(&nodr)
}
if freel != 0 {
Thearch.Regfree(&nodl)
Regfree(&nodl)
}
return true
}

View file

@ -777,47 +777,60 @@ const (
)
type Arch struct {
Thechar int
Thestring string
Thelinkarch *obj.LinkArch
Typedefs []Typedef
REGSP int
REGCTXT int
MAXWIDTH int64
Anyregalloc func() bool
Betypeinit func()
Bgen func(*Node, bool, int, *obj.Prog)
Cgen func(*Node, *Node)
Cgen_call func(*Node, int)
Cgen_callinter func(*Node, *Node, int)
Cgen_ret func(*Node)
Clearfat func(*Node)
Defframe func(*obj.Prog)
Excise func(*Flow)
Expandchecks func(*obj.Prog)
Gclean func()
Ginit func()
Gins func(int, *Node, *Node) *obj.Prog
Ginscall func(*Node, int)
Gmove func(*Node, *Node)
Igen func(*Node, *Node, *Node)
Linkarchinit func()
Peep func(*obj.Prog)
Proginfo func(*obj.Prog) // fills in Prog.Info
Regalloc func(*Node, *Type, *Node)
Regfree func(*Node)
Regtyp func(*obj.Addr) bool
Sameaddr func(*obj.Addr, *obj.Addr) bool
Smallindir func(*obj.Addr, *obj.Addr) bool
Stackaddr func(*obj.Addr) bool
Excludedregs func() uint64
RtoB func(int) uint64
FtoB func(int) uint64
BtoR func(uint64) int
BtoF func(uint64) int
Optoas func(int, *Type) int
Doregbits func(int) uint64
Regnames func(*int) []string
Thechar int
Thestring string
Thelinkarch *obj.LinkArch
Typedefs []Typedef
REGSP int
REGCTXT int
REGCALLX int // BX
REGCALLX2 int // AX
REGRETURN int // AX
REGMIN int
REGMAX int
FREGMIN int
FREGMAX int
MAXWIDTH int64
ReservedRegs []int
AddIndex func(*Node, int64, *Node) bool // optional
Betypeinit func()
Bgen_float func(*Node, int, int, *obj.Prog) // optional
Cgen64 func(*Node, *Node) // only on 32-bit systems
Cgenindex func(*Node, *Node, bool) *obj.Prog
Cgen_bmul func(int, *Node, *Node, *Node) bool
Cgen_float func(*Node, *Node) // optional
Cgen_hmul func(*Node, *Node, *Node)
Cgen_shift func(int, bool, *Node, *Node, *Node)
Clearfat func(*Node)
Cmp64 func(*Node, *Node, int, int, *obj.Prog) // only on 32-bit systems
Defframe func(*obj.Prog)
Dodiv func(int, *Node, *Node, *Node)
Excise func(*Flow)
Expandchecks func(*obj.Prog)
Gins func(int, *Node, *Node) *obj.Prog
Ginscon func(int, int64, *Node)
Ginsnop func()
Gmove func(*Node, *Node)
Igenindex func(*Node, *Node, bool) *obj.Prog
Linkarchinit func()
Peep func(*obj.Prog)
Proginfo func(*obj.Prog) // fills in Prog.Info
Regtyp func(*obj.Addr) bool
Sameaddr func(*obj.Addr, *obj.Addr) bool
Smallindir func(*obj.Addr, *obj.Addr) bool
Stackaddr func(*obj.Addr) bool
Stackcopy func(*Node, *Node, int64, int64, int64)
Sudoaddable func(int, *Node, *obj.Addr) bool
Sudoclean func()
Excludedregs func() uint64
RtoB func(int) uint64
FtoB func(int) uint64
BtoR func(uint64) int
BtoF func(uint64) int
Optoas func(int, *Type) int
Doregbits func(int) uint64
Regnames func(*int) []string
}
var pcloc int32

View file

@ -30,7 +30,12 @@
package gc
import "cmd/internal/obj"
import (
"cmd/internal/obj"
"fmt"
"runtime"
"strings"
)
var ddumped int
@ -295,6 +300,8 @@ func Naddr(a *obj.Addr, n *Node) {
switch n.Op {
default:
a := a // copy to let escape into Ctxt.Dconv
Debug['h'] = 1
Dump("naddr", n)
Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
case OREGISTER:
@ -559,11 +566,8 @@ fp:
n.Op = OINDREG
n.Val.U.Reg = int16(Thearch.REGSP)
if Thearch.Thechar == '5' {
n.Xoffset += 4
}
if Thearch.Thechar == '7' || Thearch.Thechar == '9' {
n.Xoffset += 8
if HasLinkRegister() {
n.Xoffset += int64(Ctxt.Arch.Ptrsize)
}
case 1: // input arg
@ -571,10 +575,6 @@ fp:
case 2: // offset output arg
Fatal("shouldn't be used")
n.Op = OINDREG
n.Val.U.Reg = int16(Thearch.REGSP)
n.Xoffset += Types[Tptr].Width
}
n.Typecheck = 1
@ -598,3 +598,228 @@ func unpatch(p *obj.Prog) *obj.Prog {
p.To.Offset = 0
return q
}
var reg [100]int // count of references to reg
var regstk [100][]byte // allocation sites, when -v is given
func ginit() {
for r := range reg {
reg[r] = 1
}
for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
reg[r-Thearch.REGMIN] = 0
}
for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
reg[r-Thearch.REGMIN] = 0
}
for _, r := range Thearch.ReservedRegs {
reg[r-Thearch.REGMIN] = 1
}
}
func gclean() {
for _, r := range Thearch.ReservedRegs {
reg[r-Thearch.REGMIN]--
}
for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
n := reg[r-Thearch.REGMIN]
if n != 0 {
Yyerror("reg %v left allocated", obj.Rconv(r))
if Debug['v'] != 0 {
Regdump()
}
}
}
for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
n := reg[r-Thearch.REGMIN]
if n != 0 {
Yyerror("reg %v left allocated", obj.Rconv(r))
if Debug['v'] != 0 {
Regdump()
}
}
}
}
func Anyregalloc() bool {
n := 0
for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
if reg[r-Thearch.REGMIN] == 0 {
n++
}
}
return n > len(Thearch.ReservedRegs)
}
/*
* allocate register of type t, leave in n.
* if o != N, o may be reusable register.
* caller must Regfree(n).
*/
func Regalloc(n *Node, t *Type, o *Node) {
if t == nil {
Fatal("regalloc: t nil")
}
et := int(Simtype[t.Etype])
if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
Fatal("regalloc 64bit")
}
var i int
Switch:
switch et {
default:
Fatal("regalloc: unknown type %v", Tconv(t, 0))
case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL:
if o != nil && o.Op == OREGISTER {
i = int(o.Val.U.Reg)
if Thearch.REGMIN <= i && i <= Thearch.REGMAX {
break Switch
}
}
for i = Thearch.REGMIN; i <= Thearch.REGMAX; i++ {
if reg[i-Thearch.REGMIN] == 0 {
break Switch
}
}
Flusherrors()
Regdump()
Fatal("out of fixed registers")
case TFLOAT32, TFLOAT64:
if o != nil && o.Op == OREGISTER {
i = int(o.Val.U.Reg)
if Thearch.FREGMIN <= i && i <= Thearch.FREGMAX {
break Switch
}
}
for i = Thearch.FREGMIN; i <= Thearch.FREGMAX; i++ {
if reg[i-Thearch.REGMIN] == 0 { // note: REGMIN, not FREGMIN
break Switch
}
}
Flusherrors()
Regdump()
Fatal("out of floating registers")
case TCOMPLEX64, TCOMPLEX128:
Tempname(n, t)
return
}
ix := i - Thearch.REGMIN
if reg[ix] == 0 && Debug['v'] > 0 {
if regstk[ix] == nil {
regstk[ix] = make([]byte, 4096)
}
stk := regstk[ix]
n := runtime.Stack(stk[:cap(stk)], false)
regstk[ix] = stk[:n]
}
reg[ix]++
Nodreg(n, t, i)
}
func Regfree(n *Node) {
if n.Op == ONAME {
return
}
if n.Op != OREGISTER && n.Op != OINDREG {
Fatal("regfree: not a register")
}
i := int(n.Val.U.Reg)
if i == Thearch.REGSP {
return
}
switch {
case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
// ok
default:
Fatal("regfree: reg out of range")
}
i -= Thearch.REGMIN
if reg[i] <= 0 {
Fatal("regfree: reg not allocated")
}
reg[i]--
if reg[i] == 0 {
regstk[i] = regstk[i][:0]
}
}
// Reginuse reports whether r is in use.
func Reginuse(r int) bool {
switch {
case Thearch.REGMIN <= r && r <= Thearch.REGMAX,
Thearch.FREGMIN <= r && r <= Thearch.FREGMAX:
// ok
default:
Fatal("reginuse: reg out of range")
}
return reg[r-Thearch.REGMIN] > 0
}
// Regrealloc(n) undoes the effect of Regfree(n),
// so that a register can be given up but then reclaimed.
func Regrealloc(n *Node) {
if n.Op != OREGISTER && n.Op != OINDREG {
Fatal("regrealloc: not a register")
}
i := int(n.Val.U.Reg)
if i == Thearch.REGSP {
return
}
switch {
case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
// ok
default:
Fatal("regrealloc: reg out of range")
}
i -= Thearch.REGMIN
if reg[i] == 0 && Debug['v'] > 0 {
if regstk[i] == nil {
regstk[i] = make([]byte, 4096)
}
stk := regstk[i]
n := runtime.Stack(stk[:cap(stk)], false)
regstk[i] = stk[:n]
}
reg[i]++
}
func Regdump() {
if Debug['v'] == 0 {
fmt.Printf("run compiler with -v for register allocation sites\n")
return
}
dump := func(r int) {
stk := regstk[r-Thearch.REGMIN]
if len(stk) == 0 {
return
}
fmt.Printf("reg %v allocated at:\n", obj.Rconv(r))
fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1))
}
for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
if reg[r-Thearch.REGMIN] != 0 {
dump(r)
}
}
for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
if reg[r-Thearch.REGMIN] == 0 {
dump(r)
}
}
}

View file

@ -335,10 +335,10 @@ func Cgen_checknil(n *Node) {
if ((Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || n.Addable == 0 || n.Op == OLITERAL {
var reg Node
Thearch.Regalloc(&reg, Types[Tptr], n)
Thearch.Cgen(n, &reg)
Regalloc(&reg, Types[Tptr], n)
Cgen(n, &reg)
Thearch.Gins(obj.ACHECKNIL, &reg, nil)
Thearch.Regfree(&reg)
Regfree(&reg)
return
}
@ -458,7 +458,7 @@ func compile(fn *Node) {
Afunclit(&ptxt.From, Curfn.Nname)
Thearch.Ginit()
ginit()
gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
@ -484,7 +484,7 @@ func compile(fn *Node) {
Genlist(Curfn.Enter)
Genlist(Curfn.Nbody)
Thearch.Gclean()
gclean()
checklabels()
if nerrors != 0 {
goto ret
@ -494,13 +494,13 @@ func compile(fn *Node) {
}
if Curfn.Type.Outtuple != 0 {
Thearch.Ginscall(throwreturn, 0)
Ginscall(throwreturn, 0)
}
Thearch.Ginit()
ginit()
// TODO: Determine when the final cgen_ret can be omitted. Perhaps always?
Thearch.Cgen_ret(nil)
cgen_ret(nil)
if Hasdefer != 0 {
// deferreturn pretends to have one uintptr argument.
@ -510,7 +510,7 @@ func compile(fn *Node) {
}
}
Thearch.Gclean()
gclean()
if nerrors != 0 {
goto ret
}

View file

@ -924,8 +924,6 @@ func varkillwalk(v *TempVar, f0 *Flow, gen uint32) {
// Assume that stack variables with address not taken can be loaded multiple times
// from memory without being rechecked. Other variables need to be checked on
// each load.
type NilVar struct {
}
var killed int // f->data is either nil or &killed

View file

@ -1344,7 +1344,7 @@ loop2:
}
}
if Debug['v'] != 0 && strings.Contains(Curfn.Nname.Sym.Name, "Parse") {
if false && Debug['v'] != 0 && strings.Contains(Curfn.Nname.Sym.Name, "Parse") {
Warn("regions: %d\n", nregion)
}
if nregion >= MaxRgn {

View file

@ -294,6 +294,7 @@ const (
OLROT // left rotate: AROL.
ORROTC // right rotate-carry: ARCR.
ORETJMP // return to other function
OPS // compare parity set (for x86 NaN check)
OEND
)

View file

@ -232,6 +232,7 @@ var optab = []Optab{
{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
{AB, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
{ABL, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0},
{ABL, C_REG, C_NONE, C_REG, 6, 4, 0, 0, 0},
{ABL, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},
{obj.ARET, C_NONE, C_NONE, C_REG, 6, 4, 0, 0, 0},
{obj.ARET, C_NONE, C_NONE, C_ZOREG, 6, 4, 0, 0, 0},

View file

@ -49,7 +49,7 @@ func addexp(s string) {
os.Exit(2)
}
func linksetexp() {
func init() {
for _, f := range strings.Split(goexperiment, ",") {
if f != "" {
addexp(f)

View file

@ -123,8 +123,6 @@ func Headstr(v int) string {
}
func Linknew(arch *LinkArch) *Link {
linksetexp()
ctxt := new(Link)
ctxt.Hash = make(map[SymVer]*LSym)
ctxt.Arch = arch

View file

@ -190,6 +190,18 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
}
}
// Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ.
if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Thechar == '6' || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) {
switch p.As {
case AMOVL:
p.As = ALEAL
p.From.Type = obj.TYPE_MEM
case AMOVQ:
p.As = ALEAQ
p.From.Type = obj.TYPE_MEM
}
}
if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
nacladdr(ctxt, p, &p.From3)
nacladdr(ctxt, p, &p.From)