[dev.cc] cmd/internal/gc, cmd/new6g etc: convert from cmd/gc, cmd/6g etc

First draft of converted Go compiler, using rsc.io/c2go rev 83d795a.

Change-Id: I29f4c7010de07d2ff1947bbca9865879d83c32c3
Reviewed-on: https://go-review.googlesource.com/4851
Reviewed-by: Rob Pike <r@golang.org>
This commit is contained in:
Russ Cox 2015-02-13 14:40:36 -05:00
parent c11882bc3e
commit 8c195bdf12
119 changed files with 88449 additions and 254 deletions

View file

@ -0,0 +1,735 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "cmd/internal/obj"
/*
* machine size and rounding
* alignment is dictated around
* the size of a pointer, set in betypeinit
* (see ../6g/galign.c).
*/
var defercalc int
func Rnd(o int64, r int64) int64 {
if r < 1 || r > 8 || r&(r-1) != 0 {
Fatal("rnd %d", r)
}
return (o + r - 1) &^ (r - 1)
}
func offmod(t *Type) {
var f *Type
var o int32
o = 0
for f = t.Type; f != nil; f = f.Down {
if f.Etype != TFIELD {
Fatal("offmod: not TFIELD: %v", Tconv(f, obj.FmtLong))
}
f.Width = int64(o)
o += int32(Widthptr)
if int64(o) >= Thearch.MAXWIDTH {
Yyerror("interface too large")
o = int32(Widthptr)
}
}
}
func widstruct(errtype *Type, t *Type, o int64, flag int) int64 {
var f *Type
var w int64
var maxalign int32
var starto int64
var lastzero int64
starto = o
maxalign = int32(flag)
if maxalign < 1 {
maxalign = 1
}
lastzero = 0
for f = t.Type; f != nil; f = f.Down {
if f.Etype != TFIELD {
Fatal("widstruct: not TFIELD: %v", Tconv(f, obj.FmtLong))
}
if f.Type == nil {
// broken field, just skip it so that other valid fields
// get a width.
continue
}
dowidth(f.Type)
if int32(f.Type.Align) > maxalign {
maxalign = int32(f.Type.Align)
}
if f.Type.Width < 0 {
Fatal("invalid width %d", f.Type.Width)
}
w = f.Type.Width
if f.Type.Align > 0 {
o = Rnd(o, int64(f.Type.Align))
}
f.Width = o // really offset for TFIELD
if f.Nname != nil {
// this same stackparam logic is in addrescapes
// in typecheck.c. usually addrescapes runs after
// widstruct, in which case we could drop this,
// but function closure functions are the exception.
if f.Nname.Stackparam != nil {
f.Nname.Stackparam.Xoffset = o
f.Nname.Xoffset = 0
} else {
f.Nname.Xoffset = o
}
}
if w == 0 {
lastzero = o
}
o += w
if o >= Thearch.MAXWIDTH {
Yyerror("type %v too large", Tconv(errtype, obj.FmtLong))
o = 8 // small but nonzero
}
}
// For nonzero-sized structs which end in a zero-sized thing, we add
// an extra byte of padding to the type. This padding ensures that
// taking the address of the zero-sized thing can't manufacture a
// pointer to the next object in the heap. See issue 9401.
if flag == 1 && o > starto && o == lastzero {
o++
}
// final width is rounded
if flag != 0 {
o = Rnd(o, int64(maxalign))
}
t.Align = uint8(maxalign)
// type width only includes back to first field's offset
t.Width = o - starto
return o
}
func dowidth(t *Type) {
var et int32
var w int64
var lno int
var t1 *Type
if Widthptr == 0 {
Fatal("dowidth without betypeinit")
}
if t == nil {
return
}
if t.Width > 0 {
return
}
if t.Width == -2 {
lno = int(lineno)
lineno = int32(t.Lineno)
if !(t.Broke != 0) {
t.Broke = 1
Yyerror("invalid recursive type %v", Tconv(t, 0))
}
t.Width = 0
lineno = int32(lno)
return
}
// break infinite recursion if the broken recursive type
// is referenced again
if t.Broke != 0 && t.Width == 0 {
return
}
// defer checkwidth calls until after we're done
defercalc++
lno = int(lineno)
lineno = int32(t.Lineno)
t.Width = -2
t.Align = 0
et = int32(t.Etype)
switch et {
case TFUNC,
TCHAN,
TMAP,
TSTRING:
break
/* simtype == 0 during bootstrap */
default:
if Simtype[t.Etype] != 0 {
et = int32(Simtype[t.Etype])
}
}
w = 0
switch et {
default:
Fatal("dowidth: unknown type: %v", Tconv(t, 0))
/* compiler-specific stuff */
case TINT8,
TUINT8,
TBOOL:
// bool is int8
w = 1
case TINT16,
TUINT16:
w = 2
case TINT32,
TUINT32,
TFLOAT32:
w = 4
case TINT64,
TUINT64,
TFLOAT64,
TCOMPLEX64:
w = 8
t.Align = uint8(Widthreg)
case TCOMPLEX128:
w = 16
t.Align = uint8(Widthreg)
case TPTR32:
w = 4
checkwidth(t.Type)
case TPTR64:
w = 8
checkwidth(t.Type)
case TUNSAFEPTR:
w = int64(Widthptr)
case TINTER: // implemented as 2 pointers
w = 2 * int64(Widthptr)
t.Align = uint8(Widthptr)
offmod(t)
case TCHAN: // implemented as pointer
w = int64(Widthptr)
checkwidth(t.Type)
// make fake type to check later to
// trigger channel argument check.
t1 = typ(TCHANARGS)
t1.Type = t
checkwidth(t1)
case TCHANARGS:
t1 = t.Type
dowidth(t.Type) // just in case
if t1.Type.Width >= 1<<16 {
Yyerror("channel element type too large (>64kB)")
}
t.Width = 1
case TMAP: // implemented as pointer
w = int64(Widthptr)
checkwidth(t.Type)
checkwidth(t.Down)
case TFORW: // should have been filled in
if !(t.Broke != 0) {
Yyerror("invalid recursive type %v", Tconv(t, 0))
}
w = 1 // anything will do
// dummy type; should be replaced before use.
case TANY:
if !(Debug['A'] != 0) {
Fatal("dowidth any")
}
w = 1 // anything will do
case TSTRING:
if sizeof_String == 0 {
Fatal("early dowidth string")
}
w = int64(sizeof_String)
t.Align = uint8(Widthptr)
case TARRAY:
if t.Type == nil {
break
}
if t.Bound >= 0 {
var cap uint64
dowidth(t.Type)
if t.Type.Width != 0 {
cap = (uint64(Thearch.MAXWIDTH) - 1) / uint64(t.Type.Width)
if uint64(t.Bound) > cap {
Yyerror("type %v larger than address space", Tconv(t, obj.FmtLong))
}
}
w = t.Bound * t.Type.Width
t.Align = t.Type.Align
} else if t.Bound == -1 {
w = int64(sizeof_Array)
checkwidth(t.Type)
t.Align = uint8(Widthptr)
} else if t.Bound == -100 {
if !(t.Broke != 0) {
Yyerror("use of [...] array outside of array literal")
t.Broke = 1
}
} else {
Fatal("dowidth %v", Tconv(t, 0)) // probably [...]T
}
case TSTRUCT:
if t.Funarg != 0 {
Fatal("dowidth fn struct %v", Tconv(t, 0))
}
w = widstruct(t, t, 0, 1)
// make fake type to check later to
// trigger function argument computation.
case TFUNC:
t1 = typ(TFUNCARGS)
t1.Type = t
checkwidth(t1)
// width of func type is pointer
w = int64(Widthptr)
// function is 3 cated structures;
// compute their widths as side-effect.
case TFUNCARGS:
t1 = t.Type
w = widstruct(t.Type, *getthis(t1), 0, 0)
w = widstruct(t.Type, *getinarg(t1), w, Widthreg)
w = widstruct(t.Type, *Getoutarg(t1), w, Widthreg)
t1.Argwid = w
if w%int64(Widthreg) != 0 {
Warn("bad type %v %d\n", Tconv(t1, 0), w)
}
t.Align = 1
}
if Widthptr == 4 && w != int64(int32(w)) {
Yyerror("type %v too large", Tconv(t, 0))
}
t.Width = w
if t.Align == 0 {
if w > 8 || w&(w-1) != 0 {
Fatal("invalid alignment for %v", Tconv(t, 0))
}
t.Align = uint8(w)
}
lineno = int32(lno)
if defercalc == 1 {
resumecheckwidth()
} else {
defercalc--
}
}
/*
* when a type's width should be known, we call checkwidth
* to compute it. during a declaration like
*
* type T *struct { next T }
*
* it is necessary to defer the calculation of the struct width
* until after T has been initialized to be a pointer to that struct.
* similarly, during import processing structs may be used
* before their definition. in those situations, calling
* defercheckwidth() stops width calculations until
* resumecheckwidth() is called, at which point all the
* checkwidths that were deferred are executed.
* dowidth should only be called when the type's size
* is needed immediately. checkwidth makes sure the
* size is evaluated eventually.
*/
type TypeList struct {
t *Type
next *TypeList
}
var tlfree *TypeList
var tlq *TypeList
func checkwidth(t *Type) {
var l *TypeList
if t == nil {
return
}
// function arg structs should not be checked
// outside of the enclosing function.
if t.Funarg != 0 {
Fatal("checkwidth %v", Tconv(t, 0))
}
if !(defercalc != 0) {
dowidth(t)
return
}
if t.Deferwidth != 0 {
return
}
t.Deferwidth = 1
l = tlfree
if l != nil {
tlfree = l.next
} else {
l = new(TypeList)
}
l.t = t
l.next = tlq
tlq = l
}
func defercheckwidth() {
// we get out of sync on syntax errors, so don't be pedantic.
if defercalc != 0 && nerrors == 0 {
Fatal("defercheckwidth")
}
defercalc = 1
}
func resumecheckwidth() {
var l *TypeList
if !(defercalc != 0) {
Fatal("resumecheckwidth")
}
for l = tlq; l != nil; l = tlq {
l.t.Deferwidth = 0
tlq = l.next
dowidth(l.t)
l.next = tlfree
tlfree = l
}
defercalc = 0
}
func typeinit() {
var i int
var etype int
var sameas int
var t *Type
var s *Sym
var s1 *Sym
if Widthptr == 0 {
Fatal("typeinit before betypeinit")
}
for i = 0; i < NTYPE; i++ {
Simtype[i] = uint8(i)
}
Types[TPTR32] = typ(TPTR32)
dowidth(Types[TPTR32])
Types[TPTR64] = typ(TPTR64)
dowidth(Types[TPTR64])
t = typ(TUNSAFEPTR)
Types[TUNSAFEPTR] = t
t.Sym = Pkglookup("Pointer", unsafepkg)
t.Sym.Def = typenod(t)
dowidth(Types[TUNSAFEPTR])
Tptr = TPTR32
if Widthptr == 8 {
Tptr = TPTR64
}
for i = TINT8; i <= TUINT64; i++ {
Isint[i] = 1
}
Isint[TINT] = 1
Isint[TUINT] = 1
Isint[TUINTPTR] = 1
Isfloat[TFLOAT32] = 1
Isfloat[TFLOAT64] = 1
Iscomplex[TCOMPLEX64] = 1
Iscomplex[TCOMPLEX128] = 1
Isptr[TPTR32] = 1
Isptr[TPTR64] = 1
isforw[TFORW] = 1
Issigned[TINT] = 1
Issigned[TINT8] = 1
Issigned[TINT16] = 1
Issigned[TINT32] = 1
Issigned[TINT64] = 1
/*
* initialize okfor
*/
for i = 0; i < NTYPE; i++ {
if Isint[i] != 0 || i == TIDEAL {
okforeq[i] = 1
okforcmp[i] = 1
okforarith[i] = 1
okforadd[i] = 1
okforand[i] = 1
okforconst[i] = 1
issimple[i] = 1
Minintval[i] = new(Mpint)
Maxintval[i] = new(Mpint)
}
if Isfloat[i] != 0 {
okforeq[i] = 1
okforcmp[i] = 1
okforadd[i] = 1
okforarith[i] = 1
okforconst[i] = 1
issimple[i] = 1
minfltval[i] = new(Mpflt)
maxfltval[i] = new(Mpflt)
}
if Iscomplex[i] != 0 {
okforeq[i] = 1
okforadd[i] = 1
okforarith[i] = 1
okforconst[i] = 1
issimple[i] = 1
}
}
issimple[TBOOL] = 1
okforadd[TSTRING] = 1
okforbool[TBOOL] = 1
okforcap[TARRAY] = 1
okforcap[TCHAN] = 1
okforconst[TBOOL] = 1
okforconst[TSTRING] = 1
okforlen[TARRAY] = 1
okforlen[TCHAN] = 1
okforlen[TMAP] = 1
okforlen[TSTRING] = 1
okforeq[TPTR32] = 1
okforeq[TPTR64] = 1
okforeq[TUNSAFEPTR] = 1
okforeq[TINTER] = 1
okforeq[TCHAN] = 1
okforeq[TSTRING] = 1
okforeq[TBOOL] = 1
okforeq[TMAP] = 1 // nil only; refined in typecheck
okforeq[TFUNC] = 1 // nil only; refined in typecheck
okforeq[TARRAY] = 1 // nil slice only; refined in typecheck
okforeq[TSTRUCT] = 1 // it's complicated; refined in typecheck
okforcmp[TSTRING] = 1
for i = 0; i < len(okfor); i++ {
okfor[i] = okfornone[:]
}
// binary
okfor[OADD] = okforadd[:]
okfor[OAND] = okforand[:]
okfor[OANDAND] = okforbool[:]
okfor[OANDNOT] = okforand[:]
okfor[ODIV] = okforarith[:]
okfor[OEQ] = okforeq[:]
okfor[OGE] = okforcmp[:]
okfor[OGT] = okforcmp[:]
okfor[OLE] = okforcmp[:]
okfor[OLT] = okforcmp[:]
okfor[OMOD] = okforand[:]
okfor[OMUL] = okforarith[:]
okfor[ONE] = okforeq[:]
okfor[OOR] = okforand[:]
okfor[OOROR] = okforbool[:]
okfor[OSUB] = okforarith[:]
okfor[OXOR] = okforand[:]
okfor[OLSH] = okforand[:]
okfor[ORSH] = okforand[:]
// unary
okfor[OCOM] = okforand[:]
okfor[OMINUS] = okforarith[:]
okfor[ONOT] = okforbool[:]
okfor[OPLUS] = okforarith[:]
// special
okfor[OCAP] = okforcap[:]
okfor[OLEN] = okforlen[:]
// comparison
iscmp[OLT] = 1
iscmp[OGT] = 1
iscmp[OGE] = 1
iscmp[OLE] = 1
iscmp[OEQ] = 1
iscmp[ONE] = 1
mpatofix(Maxintval[TINT8], "0x7f")
mpatofix(Minintval[TINT8], "-0x80")
mpatofix(Maxintval[TINT16], "0x7fff")
mpatofix(Minintval[TINT16], "-0x8000")
mpatofix(Maxintval[TINT32], "0x7fffffff")
mpatofix(Minintval[TINT32], "-0x80000000")
mpatofix(Maxintval[TINT64], "0x7fffffffffffffff")
mpatofix(Minintval[TINT64], "-0x8000000000000000")
mpatofix(Maxintval[TUINT8], "0xff")
mpatofix(Maxintval[TUINT16], "0xffff")
mpatofix(Maxintval[TUINT32], "0xffffffff")
mpatofix(Maxintval[TUINT64], "0xffffffffffffffff")
/* f is valid float if min < f < max. (min and max are not themselves valid.) */
mpatoflt(maxfltval[TFLOAT32], "33554431p103") /* 2^24-1 p (127-23) + 1/2 ulp*/
mpatoflt(minfltval[TFLOAT32], "-33554431p103")
mpatoflt(maxfltval[TFLOAT64], "18014398509481983p970") /* 2^53-1 p (1023-52) + 1/2 ulp */
mpatoflt(minfltval[TFLOAT64], "-18014398509481983p970")
maxfltval[TCOMPLEX64] = maxfltval[TFLOAT32]
minfltval[TCOMPLEX64] = minfltval[TFLOAT32]
maxfltval[TCOMPLEX128] = maxfltval[TFLOAT64]
minfltval[TCOMPLEX128] = minfltval[TFLOAT64]
/* for walk to use in error messages */
Types[TFUNC] = functype(nil, nil, nil)
/* types used in front end */
// types[TNIL] got set early in lexinit
Types[TIDEAL] = typ(TIDEAL)
Types[TINTER] = typ(TINTER)
/* simple aliases */
Simtype[TMAP] = uint8(Tptr)
Simtype[TCHAN] = uint8(Tptr)
Simtype[TFUNC] = uint8(Tptr)
Simtype[TUNSAFEPTR] = uint8(Tptr)
/* pick up the backend thearch.typedefs */
for i = range Thearch.Typedefs {
s = Lookup(Thearch.Typedefs[i].Name)
s1 = Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
etype = Thearch.Typedefs[i].Etype
if etype < 0 || etype >= len(Types) {
Fatal("typeinit: %s bad etype", s.Name)
}
sameas = Thearch.Typedefs[i].Sameas
if sameas < 0 || sameas >= len(Types) {
Fatal("typeinit: %s bad sameas", s.Name)
}
Simtype[etype] = uint8(sameas)
minfltval[etype] = minfltval[sameas]
maxfltval[etype] = maxfltval[sameas]
Minintval[etype] = Minintval[sameas]
Maxintval[etype] = Maxintval[sameas]
t = Types[etype]
if t != nil {
Fatal("typeinit: %s already defined", s.Name)
}
t = typ(etype)
t.Sym = s1
dowidth(t)
Types[etype] = t
s1.Def = typenod(t)
}
Array_array = int(Rnd(0, int64(Widthptr)))
Array_nel = int(Rnd(int64(Array_array)+int64(Widthptr), int64(Widthint)))
Array_cap = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthint)))
sizeof_Array = int(Rnd(int64(Array_cap)+int64(Widthint), int64(Widthptr)))
// string is same as slice wo the cap
sizeof_String = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthptr)))
dowidth(Types[TSTRING])
dowidth(idealstring)
}
/*
* compute total size of f's in/out arguments.
*/
func Argsize(t *Type) int {
var save Iter
var fp *Type
var w int64
var x int64
w = 0
fp = Structfirst(&save, Getoutarg(t))
for fp != nil {
x = fp.Width + fp.Type.Width
if x > w {
w = x
}
fp = structnext(&save)
}
fp = funcfirst(&save, t)
for fp != nil {
x = fp.Width + fp.Type.Width
if x > w {
w = x
}
fp = funcnext(&save)
}
w = (w + int64(Widthptr) - 1) &^ (int64(Widthptr) - 1)
if int64(int(w)) != w {
Fatal("argsize too big")
}
return int(w)
}

View file

@ -0,0 +1,9 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
const (
DEFAULTCAPACITY = 16
)

167
src/cmd/internal/gc/bits.go Normal file
View file

@ -0,0 +1,167 @@
// Inferno utils/cc/bits.c
// http://code.google.com/p/inferno-os/source/browse/utils/cc/bits.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package gc
import "fmt"
/*
Bits
bor(Bits a, Bits b)
{
Bits c;
int i;
for(i=0; i<BITS; i++)
c.b[i] = a.b[i] | b.b[i];
return c;
}
Bits
band(Bits a, Bits b)
{
Bits c;
int i;
for(i=0; i<BITS; i++)
c.b[i] = a.b[i] & b.b[i];
return c;
}
Bits
bnot(Bits a)
{
Bits c;
int i;
for(i=0; i<BITS; i++)
c.b[i] = ~a.b[i];
return c;
}
*/
func bany(a *Bits) int {
var i int
for i = 0; i < BITS; i++ {
if a.b[i] != 0 {
return 1
}
}
return 0
}
/*
int
beq(Bits a, Bits b)
{
int i;
for(i=0; i<BITS; i++)
if(a.b[i] != b.b[i])
return 0;
return 1;
}
*/
func bnum(a Bits) int {
var i int
var b uint64
for i = 0; i < BITS; i++ {
b = a.b[i]
if b != 0 {
return 64*i + Bitno(b)
}
}
Fatal("bad in bnum")
return 0
}
func blsh(n uint) Bits {
var c Bits
c = zbits
c.b[n/64] = 1 << (n % 64)
return c
}
func btest(a *Bits, n uint) int {
return bool2int(a.b[n/64]&(1<<(n%64)) != 0)
}
func biset(a *Bits, n uint) {
a.b[n/64] |= 1 << (n % 64)
}
func biclr(a *Bits, n uint) {
a.b[n/64] &^= (1 << (n % 64))
}
func Bitno(b uint64) int {
var i int
for i = 0; i < 64; i++ {
if b&(1<<uint(i)) != 0 {
return i
}
}
Fatal("bad in bitno")
return 0
}
func Qconv(bits Bits, flag int) string {
var fp string
var i int
var first int
first = 1
for bany(&bits) != 0 {
i = bnum(bits)
if first != 0 {
first = 0
} else {
fp += fmt.Sprintf(" ")
}
if var_[i].node == nil || var_[i].node.Sym == nil {
fp += fmt.Sprintf("$%d", i)
} else {
fp += fmt.Sprintf("%s(%d)", var_[i].node.Sym.Name, i)
if var_[i].offset != 0 {
fp += fmt.Sprintf("%+d", int64(var_[i].offset))
}
}
biclr(&bits, uint(i))
}
return fp
}

File diff suppressed because one or more lines are too long

208
src/cmd/internal/gc/bv.go Normal file
View file

@ -0,0 +1,208 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "fmt"
const (
WORDSIZE = 4
WORDBITS = 32
WORDMASK = WORDBITS - 1
WORDSHIFT = 5
)
func bvsize(n uint32) uint32 {
return ((n + WORDBITS - 1) / WORDBITS) * WORDSIZE
}
func bvbits(bv *Bvec) int32 {
return bv.n
}
func bvwords(bv *Bvec) int32 {
return (bv.n + WORDBITS - 1) / WORDBITS
}
func bvalloc(n int32) *Bvec {
return &Bvec{n, make([]uint32, bvsize(uint32(n))/4)}
}
/* difference */
func bvandnot(dst *Bvec, src1 *Bvec, src2 *Bvec) {
var i int32
var w int32
if dst.n != src1.n || dst.n != src2.n {
Fatal("bvand: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
}
i = 0
w = 0
for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
dst.b[w] = src1.b[w] &^ src2.b[w]
}
}
func bvcmp(bv1 *Bvec, bv2 *Bvec) int {
if bv1.n != bv2.n {
Fatal("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
return 1
}
}
return 0
}
func bvcopy(dst *Bvec, src *Bvec) {
for i, x := range src.b {
dst.b[i] = x
}
}
func bvconcat(src1 *Bvec, src2 *Bvec) *Bvec {
var dst *Bvec
var i int32
dst = bvalloc(src1.n + src2.n)
for i = 0; i < src1.n; i++ {
if bvget(src1, i) != 0 {
bvset(dst, i)
}
}
for i = 0; i < src2.n; i++ {
if bvget(src2, i) != 0 {
bvset(dst, i+src1.n)
}
}
return dst
}
func bvget(bv *Bvec, i int32) int {
if i < 0 || i >= bv.n {
Fatal("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
return int((bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)) & 1)
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func bvnext(bv *Bvec, i int32) int {
var w uint32
if i >= bv.n {
return -1
}
// Jump i ahead to next word with bits.
if bv.b[i>>WORDSHIFT]>>uint(i&WORDMASK) == 0 {
i &^= WORDMASK
i += WORDBITS
for i < bv.n && bv.b[i>>WORDSHIFT] == 0 {
i += WORDBITS
}
}
if i >= bv.n {
return -1
}
// Find 1 bit.
w = bv.b[i>>WORDSHIFT] >> uint(i&WORDMASK)
for w&1 == 0 {
w >>= 1
i++
}
return int(i)
}
func bvisempty(bv *Bvec) int {
var i int32
for i = 0; i < bv.n; i += WORDBITS {
if bv.b[i>>WORDSHIFT] != 0 {
return 0
}
}
return 1
}
func bvnot(bv *Bvec) {
var i int32
var w int32
i = 0
w = 0
for ; i < bv.n; (func() { i += WORDBITS; w++ })() {
bv.b[w] = ^bv.b[w]
}
}
/* union */
func bvor(dst *Bvec, src1 *Bvec, src2 *Bvec) {
var i int32
var w int32
if dst.n != src1.n || dst.n != src2.n {
Fatal("bvor: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
}
i = 0
w = 0
for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
dst.b[w] = src1.b[w] | src2.b[w]
}
}
/* intersection */
func bvand(dst *Bvec, src1 *Bvec, src2 *Bvec) {
var i int32
var w int32
if dst.n != src1.n || dst.n != src2.n {
Fatal("bvor: lengths %d, %d, and %d are not equal", dst.n, src1.n, src2.n)
}
i = 0
w = 0
for ; i < dst.n; (func() { i += WORDBITS; w++ })() {
dst.b[w] = src1.b[w] & src2.b[w]
}
}
func bvprint(bv *Bvec) {
var i int32
fmt.Printf("#*")
for i = 0; i < bv.n; i++ {
fmt.Printf("%d", bvget(bv, i))
}
}
func bvreset(bv *Bvec, i int32) {
var mask uint32
if i < 0 || i >= bv.n {
Fatal("bvreset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask = ^(1 << uint(i%WORDBITS))
bv.b[i/WORDBITS] &= mask
}
func bvresetall(bv *Bvec) {
for i := range bv.b {
bv.b[i] = 0
}
}
func bvset(bv *Bvec, i int32) {
var mask uint32
if i < 0 || i >= bv.n {
Fatal("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask = 1 << uint(i%WORDBITS)
bv.b[i/WORDBITS] |= mask
}

View file

@ -0,0 +1,696 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/internal/obj"
"fmt"
)
/*
* function literals aka closures
*/
func closurehdr(ntype *Node) {
var n *Node
var name *Node
var a *Node
var l *NodeList
n = Nod(OCLOSURE, nil, nil)
n.Ntype = ntype
n.Funcdepth = Funcdepth
funchdr(n)
// steal ntype's argument names and
// leave a fresh copy in their place.
// references to these variables need to
// refer to the variables in the external
// function declared below; see walkclosure.
n.List = ntype.List
n.Rlist = ntype.Rlist
ntype.List = nil
ntype.Rlist = nil
for l = n.List; l != nil; l = l.Next {
name = l.N.Left
if name != nil {
name = newname(name.Sym)
}
a = Nod(ODCLFIELD, name, l.N.Right)
a.Isddd = l.N.Isddd
if name != nil {
name.Isddd = a.Isddd
}
ntype.List = list(ntype.List, a)
}
for l = n.Rlist; l != nil; l = l.Next {
name = l.N.Left
if name != nil {
name = newname(name.Sym)
}
ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, name, l.N.Right))
}
}
func closurebody(body *NodeList) *Node {
var func_ *Node
var v *Node
var l *NodeList
if body == nil {
body = list1(Nod(OEMPTY, nil, nil))
}
func_ = Curfn
func_.Nbody = body
func_.Endlineno = lineno
funcbody(func_)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
for l = func_.Cvars; l != nil; l = l.Next {
v = l.N
v.Closure.Closure = v.Outer
v.Outerexpr = oldname(v.Sym)
}
return func_
}
func typecheckclosure(func_ *Node, top int) {
var oldfn *Node
var n *Node
var l *NodeList
var olddd int
for l = func_.Cvars; l != nil; l = l.Next {
n = l.N.Closure
if !(n.Captured != 0) {
n.Captured = 1
if n.Decldepth == 0 {
Fatal("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
if n.Decldepth == decldepth {
n.Assigned = 0
}
}
}
for l = func_.Dcl; l != nil; l = l.Next {
if l.N.Op == ONAME && (l.N.Class == PPARAM || l.N.Class == PPARAMOUT) {
l.N.Decldepth = 1
}
}
oldfn = Curfn
typecheck(&func_.Ntype, Etype)
func_.Type = func_.Ntype.Type
func_.Top = top
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to xtop.
if Curfn != nil && func_.Type != nil {
Curfn = func_
olddd = decldepth
decldepth = 1
typechecklist(func_.Nbody, Etop)
decldepth = olddd
Curfn = oldfn
}
// Create top-level function
xtop = list(xtop, makeclosure(func_))
}
var makeclosure_closgen int
func makeclosure(func_ *Node) *Node {
var xtype *Node
var xfunc *Node
/*
* wrap body in external function
* that begins by reading closure parameters.
*/
xtype = Nod(OTFUNC, nil, nil)
xtype.List = func_.List
xtype.Rlist = func_.Rlist
// create the function
xfunc = Nod(ODCLFUNC, nil, nil)
makeclosure_closgen++
namebuf = fmt.Sprintf("func·%.3d", makeclosure_closgen)
xfunc.Nname = newname(Lookup(namebuf))
xfunc.Nname.Sym.Flags |= SymExported // disable export
xfunc.Nname.Ntype = xtype
xfunc.Nname.Defn = xfunc
declare(xfunc.Nname, PFUNC)
xfunc.Nname.Funcdepth = func_.Funcdepth
xfunc.Funcdepth = func_.Funcdepth
xfunc.Endlineno = func_.Endlineno
xfunc.Nbody = func_.Nbody
xfunc.Dcl = concat(func_.Dcl, xfunc.Dcl)
if xfunc.Nbody == nil {
Fatal("empty body - won't generate any code")
}
typecheck(&xfunc, Etop)
xfunc.Closure = func_
func_.Closure = xfunc
func_.Nbody = nil
func_.List = nil
func_.Rlist = nil
return xfunc
}
// capturevars is called in a separate phase after all typechecking is done.
// It decides whether each variable captured by a closure should be captured
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
func capturevars(xfunc *Node) {
var func_ *Node
var v *Node
var outer *Node
var l *NodeList
var lno int
lno = int(lineno)
lineno = xfunc.Lineno
func_ = xfunc.Closure
func_.Enter = nil
for l = func_.Cvars; l != nil; l = l.Next {
v = l.N
if v.Type == nil {
// if v->type is nil, it means v looked like it was
// going to be used in the closure but wasn't.
// this happens because when parsing a, b, c := f()
// the a, b, c gets parsed as references to older
// a, b, c before the parser figures out this is a
// declaration.
v.Op = OXXX
continue
}
// type check the & of closed variables outside the closure,
// so that the outer frame also grabs them and knows they escape.
dowidth(v.Type)
outer = v.Outerexpr
v.Outerexpr = nil
// out parameters will be assigned to implicitly upon return.
if outer.Class != PPARAMOUT && !(v.Closure.Addrtaken != 0) && !(v.Closure.Assigned != 0) && v.Type.Width <= 128 {
v.Byval = 1
} else {
v.Closure.Addrtaken = 1
outer = Nod(OADDR, outer, nil)
}
if Debug['m'] > 1 {
var name *Sym
var how string
name = nil
if v.Curfn != nil && v.Curfn.Nname != nil {
name = v.Curfn.Nname.Sym
}
how = "ref"
if v.Byval != 0 {
how = "value"
}
Warnl(int(v.Lineno), "%v capturing by %s: %v (addr=%d assign=%d width=%d)", Sconv(name, 0), how, Sconv(v.Sym, 0), v.Closure.Addrtaken, v.Closure.Assigned, int32(v.Type.Width))
}
typecheck(&outer, Erv)
func_.Enter = list(func_.Enter, outer)
}
lineno = int32(lno)
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
func transformclosure(xfunc *Node) {
var func_ *Node
var cv *Node
var addr *Node
var v *Node
var f *Node
var l *NodeList
var body *NodeList
var param **Type
var fld *Type
var offset int64
var lno int
var nvar int
lno = int(lineno)
lineno = xfunc.Lineno
func_ = xfunc.Closure
if func_.Top&Ecall != 0 {
// If the closure is directly called, we transform it to a plain function call
// with variables passed as args. This avoids allocation of a closure object.
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
// will complete the transformation later.
// For illustration, the following closure:
// func(a int) {
// println(byval)
// byref++
// }(42)
// becomes:
// func(a int, byval int, &byref *int) {
// println(byval)
// (*&byref)++
// }(42, byval, &byref)
// f is ONAME of the actual function.
f = xfunc.Nname
// Get pointer to input arguments and rewind to the end.
// We are going to append captured variables to input args.
param = &getinargx(f.Type).Type
for ; *param != nil; param = &(*param).Down {
}
for l = func_.Cvars; l != nil; l = l.Next {
v = l.N
if v.Op == OXXX {
continue
}
fld = typ(TFIELD)
fld.Funarg = 1
if v.Byval != 0 {
// If v is captured by value, we merely downgrade it to PPARAM.
v.Class = PPARAM
v.Ullman = 1
fld.Nname = v
} else {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PPARAMREF with &v heapaddr
// (accesses will implicitly deref &v).
namebuf = fmt.Sprintf("&%s", v.Sym.Name)
addr = newname(Lookup(namebuf))
addr.Type = Ptrto(v.Type)
addr.Class = PPARAM
v.Heapaddr = addr
fld.Nname = addr
}
fld.Type = fld.Nname.Type
fld.Sym = fld.Nname.Sym
// Declare the new param and append it to input arguments.
xfunc.Dcl = list(xfunc.Dcl, fld.Nname)
*param = fld
param = &fld.Down
}
// Recalculate param offsets.
if f.Type.Width > 0 {
Fatal("transformclosure: width is already calculated")
}
dowidth(f.Type)
xfunc.Type = f.Type // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
nvar = 0
body = nil
offset = int64(Widthptr)
for l = func_.Cvars; l != nil; l = l.Next {
v = l.N
if v.Op == OXXX {
continue
}
nvar++
// cv refers to the field inside of closure OSTRUCTLIT.
cv = Nod(OCLOSUREVAR, nil, nil)
cv.Type = v.Type
if !(v.Byval != 0) {
cv.Type = Ptrto(v.Type)
}
offset = Rnd(offset, int64(cv.Type.Align))
cv.Xoffset = offset
offset += cv.Type.Width
if v.Byval != 0 && v.Type.Width <= int64(2*Widthptr) && Thearch.Thechar == '6' {
// If it is a small variable captured by value, downgrade it to PAUTO.
// This optimization is currently enabled only for amd64, see:
// https://github.com/golang/go/issues/9865
v.Class = PAUTO
v.Ullman = 1
xfunc.Dcl = list(xfunc.Dcl, v)
body = list(body, Nod(OAS, v, cv))
} else {
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
namebuf = fmt.Sprintf("&%s", v.Sym.Name)
addr = newname(Lookup(namebuf))
addr.Ntype = Nod(OIND, typenod(v.Type), nil)
addr.Class = PAUTO
addr.Used = 1
addr.Curfn = xfunc
xfunc.Dcl = list(xfunc.Dcl, addr)
v.Heapaddr = addr
if v.Byval != 0 {
cv = Nod(OADDR, cv, nil)
}
body = list(body, Nod(OAS, addr, cv))
}
}
typechecklist(body, Etop)
walkstmtlist(body)
xfunc.Enter = body
xfunc.Needctxt = uint8(bool2int(nvar > 0))
}
lineno = int32(lno)
}
func walkclosure(func_ *Node, init **NodeList) *Node {
var clos *Node
var typ *Node
var typ1 *Node
var v *Node
var l *NodeList
// If no closure vars, don't bother wrapping.
if func_.Cvars == nil {
return func_.Closure.Nname
}
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
// the generated code looks like:
//
// clos = &struct{F uintptr; A0 *int; A1 *string}{func·001, &i, &s}
//
// The use of the struct provides type information to the garbage
// collector so that it can walk the closure. We could use (in this case)
// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
typ = Nod(OTSTRUCT, nil, nil)
typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
for l = func_.Cvars; l != nil; l = l.Next {
v = l.N
if v.Op == OXXX {
continue
}
typ1 = typenod(v.Type)
if !(v.Byval != 0) {
typ1 = Nod(OIND, typ1, nil)
}
typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
}
clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
clos.Esc = func_.Esc
clos.Right.Implicit = 1
clos.List = concat(list1(Nod(OCFUNC, func_.Closure.Nname, nil)), func_.Enter)
// Force type conversion from *struct to the func type.
clos = Nod(OCONVNOP, clos, nil)
clos.Type = func_.Type
typecheck(&clos, Erv)
// typecheck will insert a PTRLIT node under CONVNOP,
// tag it with escape analysis result.
clos.Left.Esc = func_.Esc
// non-escaping temp to use, if any.
// orderexpr did not compute the type; fill it in now.
if func_.Alloc != nil {
func_.Alloc.Type = clos.Left.Left.Type
func_.Alloc.Orig.Type = func_.Alloc.Type
clos.Left.Right = func_.Alloc
func_.Alloc = nil
}
walkexpr(&clos, init)
return clos
}
func typecheckpartialcall(fn *Node, sym *Node) {
switch fn.Op {
case ODOTINTER,
ODOTMETH:
break
default:
Fatal("invalid typecheckpartialcall")
}
// Create top-level function.
fn.Nname = makepartialcall(fn, fn.Type, sym)
fn.Right = sym
fn.Op = OCALLPART
fn.Type = fn.Nname.Type
}
var makepartialcall_gopkg *Pkg
func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
var ptr *Node
var n *Node
var fld *Node
var call *Node
var xtype *Node
var xfunc *Node
var cv *Node
var savecurfn *Node
var rcvrtype *Type
var basetype *Type
var t *Type
var body *NodeList
var l *NodeList
var callargs *NodeList
var retargs *NodeList
var p string
var sym *Sym
var spkg *Pkg
var i int
var ddd int
// TODO: names are not right
rcvrtype = fn.Left.Type
if exportname(meth.Sym.Name) {
p = fmt.Sprintf("%v.%s·fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), meth.Sym.Name)
} else {
p = fmt.Sprintf("%v.(%v)·fm", Tconv(rcvrtype, obj.FmtLeft|obj.FmtShort), Sconv(meth.Sym, obj.FmtLeft))
}
basetype = rcvrtype
if Isptr[rcvrtype.Etype] != 0 {
basetype = basetype.Type
}
if basetype.Etype != TINTER && basetype.Sym == nil {
Fatal("missing base type for %v", Tconv(rcvrtype, 0))
}
spkg = nil
if basetype.Sym != nil {
spkg = basetype.Sym.Pkg
}
if spkg == nil {
if makepartialcall_gopkg == nil {
makepartialcall_gopkg = mkpkg(newstrlit("go"))
}
spkg = makepartialcall_gopkg
}
sym = Pkglookup(p, spkg)
if sym.Flags&SymUniq != 0 {
return sym.Def
}
sym.Flags |= SymUniq
savecurfn = Curfn
Curfn = nil
xtype = Nod(OTFUNC, nil, nil)
i = 0
l = nil
callargs = nil
ddd = 0
xfunc = Nod(ODCLFUNC, nil, nil)
Curfn = xfunc
for t = getinargx(t0).Type; t != nil; t = t.Down {
namebuf = fmt.Sprintf("a%d", i)
i++
n = newname(Lookup(namebuf))
n.Class = PPARAM
xfunc.Dcl = list(xfunc.Dcl, n)
callargs = list(callargs, n)
fld = Nod(ODCLFIELD, n, typenod(t.Type))
if t.Isddd != 0 {
fld.Isddd = 1
ddd = 1
}
l = list(l, fld)
}
xtype.List = l
i = 0
l = nil
retargs = nil
for t = getoutargx(t0).Type; t != nil; t = t.Down {
namebuf = fmt.Sprintf("r%d", i)
i++
n = newname(Lookup(namebuf))
n.Class = PPARAMOUT
xfunc.Dcl = list(xfunc.Dcl, n)
retargs = list(retargs, n)
l = list(l, Nod(ODCLFIELD, n, typenod(t.Type)))
}
xtype.Rlist = l
xfunc.Dupok = 1
xfunc.Nname = newname(sym)
xfunc.Nname.Sym.Flags |= SymExported // disable export
xfunc.Nname.Ntype = xtype
xfunc.Nname.Defn = xfunc
declare(xfunc.Nname, PFUNC)
// Declare and initialize variable holding receiver.
body = nil
xfunc.Needctxt = 1
cv = Nod(OCLOSUREVAR, nil, nil)
cv.Xoffset = int64(Widthptr)
cv.Type = rcvrtype
if int(cv.Type.Align) > Widthptr {
cv.Xoffset = int64(cv.Type.Align)
}
ptr = Nod(ONAME, nil, nil)
ptr.Sym = Lookup("rcvr")
ptr.Class = PAUTO
ptr.Addable = 1
ptr.Ullman = 1
ptr.Used = 1
ptr.Curfn = xfunc
xfunc.Dcl = list(xfunc.Dcl, ptr)
if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) != 0 {
ptr.Ntype = typenod(rcvrtype)
body = list(body, Nod(OAS, ptr, cv))
} else {
ptr.Ntype = typenod(Ptrto(rcvrtype))
body = list(body, Nod(OAS, ptr, Nod(OADDR, cv, nil)))
}
call = Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
call.List = callargs
call.Isddd = uint8(ddd)
if t0.Outtuple == 0 {
body = list(body, call)
} else {
n = Nod(OAS2, nil, nil)
n.List = retargs
n.Rlist = list1(call)
body = list(body, n)
n = Nod(ORETURN, nil, nil)
body = list(body, n)
}
xfunc.Nbody = body
typecheck(&xfunc, Etop)
sym.Def = xfunc
xtop = list(xtop, xfunc)
Curfn = savecurfn
return xfunc
}
func walkpartialcall(n *Node, init **NodeList) *Node {
var clos *Node
var typ *Node
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
// clos = &struct{F uintptr; R T}{M.T·f, x}
//
// Like walkclosure above.
if Isinter(n.Left.Type) != 0 {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
n.Left = cheapexpr(n.Left, init)
checknil(n.Left, init)
}
typ = Nod(OTSTRUCT, nil, nil)
typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
typ.List = list(typ.List, Nod(ODCLFIELD, newname(Lookup("R")), typenod(n.Left.Type)))
clos = Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
clos.Esc = n.Esc
clos.Right.Implicit = 1
clos.List = list1(Nod(OCFUNC, n.Nname.Nname, nil))
clos.List = list(clos.List, n.Left)
// Force type conversion from *struct to the func type.
clos = Nod(OCONVNOP, clos, nil)
clos.Type = n.Type
typecheck(&clos, Erv)
// typecheck will insert a PTRLIT node under CONVNOP,
// tag it with escape analysis result.
clos.Left.Esc = n.Esc
// non-escaping temp to use, if any.
// orderexpr did not compute the type; fill it in now.
if n.Alloc != nil {
n.Alloc.Type = clos.Left.Left.Type
n.Alloc.Orig.Type = n.Alloc.Type
clos.Left.Right = n.Alloc
n.Alloc = nil
}
walkexpr(&clos, init)
return clos
}

1764
src/cmd/internal/gc/const.go Normal file

File diff suppressed because it is too large Load diff

503
src/cmd/internal/gc/cplx.go Normal file
View file

@ -0,0 +1,503 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "cmd/internal/obj"
func CASE(a int, b int) int {
return a<<16 | b
}
func overlap_cplx(f *Node, t *Node) int {
// check whether f and t could be overlapping stack references.
// not exact, because it's hard to check for the stack register
// in portable code. close enough: worst case we will allocate
// an extra temporary and the registerizer will clean it up.
return bool2int(f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset)
}
func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Prog) {
var tnl Node
var tnr Node
var n1 Node
var n2 Node
var n3 Node
var n4 Node
var na Node
var nb Node
var nc Node
// make both sides addable in ullman order
if nr != nil {
if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
}
if !(nr.Addable != 0) {
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
nr = &tnr
}
}
if !(nl.Addable != 0) {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
}
// build tree
// real(l) == real(r) && imag(l) == imag(r)
subnode(&n1, &n2, nl)
subnode(&n3, &n4, nr)
na = Node{}
na.Op = OANDAND
na.Left = &nb
na.Right = &nc
na.Type = Types[TBOOL]
nb = Node{}
nb.Op = OEQ
nb.Left = &n1
nb.Right = &n3
nb.Type = Types[TBOOL]
nc = Node{}
nc.Op = OEQ
nc.Left = &n2
nc.Right = &n4
nc.Type = Types[TBOOL]
if op == ONE {
true_ = !true_
}
Thearch.Bgen(&na, true_, likely, to)
}
// break addable nc-complex into nr-real and ni-imaginary
func subnode(nr *Node, ni *Node, nc *Node) {
var tc int
var t *Type
if !(nc.Addable != 0) {
Fatal("subnode not addable")
}
tc = Simsimtype(nc.Type)
tc = cplxsubtype(tc)
t = Types[tc]
if nc.Op == OLITERAL {
nodfconst(nr, t, &nc.Val.U.Cval.Real)
nodfconst(ni, t, &nc.Val.U.Cval.Imag)
return
}
*nr = *nc
nr.Type = t
*ni = *nc
ni.Type = t
ni.Xoffset += t.Width
}
// generate code res = -nl
func minus(nl *Node, res *Node) {
var ra Node
ra = Node{}
ra.Op = OMINUS
ra.Left = nl
ra.Type = nl.Type
Thearch.Cgen(&ra, res)
}
// build and execute tree
// real(res) = -real(nl)
// imag(res) = -imag(nl)
func complexminus(nl *Node, res *Node) {
var n1 Node
var n2 Node
var n5 Node
var n6 Node
subnode(&n1, &n2, nl)
subnode(&n5, &n6, res)
minus(&n1, &n5)
minus(&n2, &n6)
}
// build and execute tree
// real(res) = real(nl) op real(nr)
// imag(res) = imag(nl) op imag(nr)
func complexadd(op int, nl *Node, nr *Node, res *Node) {
var n1 Node
var n2 Node
var n3 Node
var n4 Node
var n5 Node
var n6 Node
var ra Node
subnode(&n1, &n2, nl)
subnode(&n3, &n4, nr)
subnode(&n5, &n6, res)
ra = Node{}
ra.Op = uint8(op)
ra.Left = &n1
ra.Right = &n3
ra.Type = n1.Type
Thearch.Cgen(&ra, &n5)
ra = Node{}
ra.Op = uint8(op)
ra.Left = &n2
ra.Right = &n4
ra.Type = n2.Type
Thearch.Cgen(&ra, &n6)
}
// build and execute tree
// tmp = real(nl)*real(nr) - imag(nl)*imag(nr)
// imag(res) = real(nl)*imag(nr) + imag(nl)*real(nr)
// real(res) = tmp
func complexmul(nl *Node, nr *Node, res *Node) {
var n1 Node
var n2 Node
var n3 Node
var n4 Node
var n5 Node
var n6 Node
var rm1 Node
var rm2 Node
var ra Node
var tmp Node
subnode(&n1, &n2, nl)
subnode(&n3, &n4, nr)
subnode(&n5, &n6, res)
Tempname(&tmp, n5.Type)
// real part -> tmp
rm1 = Node{}
rm1.Op = OMUL
rm1.Left = &n1
rm1.Right = &n3
rm1.Type = n1.Type
rm2 = Node{}
rm2.Op = OMUL
rm2.Left = &n2
rm2.Right = &n4
rm2.Type = n2.Type
ra = Node{}
ra.Op = OSUB
ra.Left = &rm1
ra.Right = &rm2
ra.Type = rm1.Type
Thearch.Cgen(&ra, &tmp)
// imag part
rm1 = Node{}
rm1.Op = OMUL
rm1.Left = &n1
rm1.Right = &n4
rm1.Type = n1.Type
rm2 = Node{}
rm2.Op = OMUL
rm2.Left = &n2
rm2.Right = &n3
rm2.Type = n2.Type
ra = Node{}
ra.Op = OADD
ra.Left = &rm1
ra.Right = &rm2
ra.Type = rm1.Type
Thearch.Cgen(&ra, &n6)
// tmp ->real part
Thearch.Cgen(&tmp, &n5)
}
func nodfconst(n *Node, t *Type, fval *Mpflt) {
*n = Node{}
n.Op = OLITERAL
n.Addable = 1
ullmancalc(n)
n.Val.U.Fval = fval
n.Val.Ctype = CTFLT
n.Type = t
if !(Isfloat[t.Etype] != 0) {
Fatal("nodfconst: bad type %v", Tconv(t, 0))
}
}
/*
* cplx.c
*/
func Complexop(n *Node, res *Node) int {
if n != nil && n.Type != nil {
if Iscomplex[n.Type.Etype] != 0 {
goto maybe
}
}
if res != nil && res.Type != nil {
if Iscomplex[res.Type.Etype] != 0 {
goto maybe
}
}
if n.Op == OREAL || n.Op == OIMAG {
goto yes
}
goto no
maybe:
switch n.Op {
case OCONV, // implemented ops
OADD,
OSUB,
OMUL,
OMINUS,
OCOMPLEX,
OREAL,
OIMAG:
goto yes
case ODOT,
ODOTPTR,
OINDEX,
OIND,
ONAME:
goto yes
}
//dump("\ncomplex-no", n);
no:
return 0
//dump("\ncomplex-yes", n);
yes:
return 1
}
func Complexmove(f *Node, t *Node) {
var ft int
var tt int
var n1 Node
var n2 Node
var n3 Node
var n4 Node
var tmp Node
if Debug['g'] != 0 {
Dump("\ncomplexmove-f", f)
Dump("complexmove-t", t)
}
if !(t.Addable != 0) {
Fatal("complexmove: to not addable")
}
ft = Simsimtype(f.Type)
tt = Simsimtype(t.Type)
switch uint32(ft)<<16 | uint32(tt) {
default:
Fatal("complexmove: unknown conversion: %v -> %v\n", Tconv(f.Type, 0), Tconv(t.Type, 0))
fallthrough
// complex to complex move/convert.
// make f addable.
// also use temporary if possible stack overlap.
case TCOMPLEX64<<16 | TCOMPLEX64,
TCOMPLEX64<<16 | TCOMPLEX128,
TCOMPLEX128<<16 | TCOMPLEX64,
TCOMPLEX128<<16 | TCOMPLEX128:
if !(f.Addable != 0) || overlap_cplx(f, t) != 0 {
Tempname(&tmp, f.Type)
Complexmove(f, &tmp)
f = &tmp
}
subnode(&n1, &n2, f)
subnode(&n3, &n4, t)
Thearch.Cgen(&n1, &n3)
Thearch.Cgen(&n2, &n4)
}
}
func Complexgen(n *Node, res *Node) {
var nl *Node
var nr *Node
var tnl Node
var tnr Node
var n1 Node
var n2 Node
var tmp Node
var tl int
var tr int
if Debug['g'] != 0 {
Dump("\ncomplexgen-n", n)
Dump("complexgen-res", res)
}
for n.Op == OCONVNOP {
n = n.Left
}
// pick off float/complex opcodes
switch n.Op {
case OCOMPLEX:
if res.Addable != 0 {
subnode(&n1, &n2, res)
Tempname(&tmp, n1.Type)
Thearch.Cgen(n.Left, &tmp)
Thearch.Cgen(n.Right, &n2)
Thearch.Cgen(&tmp, &n1)
return
}
case OREAL,
OIMAG:
nl = n.Left
if !(nl.Addable != 0) {
Tempname(&tmp, nl.Type)
Complexgen(nl, &tmp)
nl = &tmp
}
subnode(&n1, &n2, nl)
if n.Op == OREAL {
Thearch.Cgen(&n1, res)
return
}
Thearch.Cgen(&n2, res)
return
}
// perform conversion from n to res
tl = Simsimtype(res.Type)
tl = cplxsubtype(tl)
tr = Simsimtype(n.Type)
tr = cplxsubtype(tr)
if tl != tr {
if !(n.Addable != 0) {
Tempname(&n1, n.Type)
Complexmove(n, &n1)
n = &n1
}
Complexmove(n, res)
return
}
if !(res.Addable != 0) {
Thearch.Igen(res, &n1, nil)
Thearch.Cgen(n, &n1)
Thearch.Regfree(&n1)
return
}
if n.Addable != 0 {
Complexmove(n, res)
return
}
switch n.Op {
default:
Dump("complexgen: unknown op", n)
Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
fallthrough
case ODOT,
ODOTPTR,
OINDEX,
OIND,
ONAME, // PHEAP or PPARAMREF var
OCALLFUNC,
OCALLMETH,
OCALLINTER:
Thearch.Igen(n, &n1, res)
Complexmove(&n1, res)
Thearch.Regfree(&n1)
return
case OCONV,
OADD,
OSUB,
OMUL,
OMINUS,
OCOMPLEX,
OREAL,
OIMAG:
break
}
nl = n.Left
if nl == nil {
return
}
nr = n.Right
// make both sides addable in ullman order
if nr != nil {
if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
}
if !(nr.Addable != 0) {
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
nr = &tnr
}
}
if !(nl.Addable != 0) {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
}
switch n.Op {
default:
Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
case OCONV:
Complexmove(nl, res)
case OMINUS:
complexminus(nl, res)
case OADD,
OSUB:
complexadd(int(n.Op), nl, nr, res)
case OMUL:
complexmul(nl, nr, res)
}
}

1565
src/cmd/internal/gc/dcl.go Normal file

File diff suppressed because it is too large Load diff

1437
src/cmd/internal/gc/esc.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,596 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/internal/obj"
"fmt"
"sort"
"unicode"
"unicode/utf8"
)
var asmlist *NodeList
// Mark n's symbol as exported
func exportsym(n *Node) {
if n == nil || n.Sym == nil {
return
}
if n.Sym.Flags&(SymExport|SymPackage) != 0 {
if n.Sym.Flags&SymPackage != 0 {
Yyerror("export/package mismatch: %v", Sconv(n.Sym, 0))
}
return
}
n.Sym.Flags |= SymExport
if Debug['E'] != 0 {
fmt.Printf("export symbol %v\n", Sconv(n.Sym, 0))
}
exportlist = list(exportlist, n)
}
func exportname(s string) bool {
if s[0] < utf8.RuneSelf {
return 'A' <= s[0] && s[0] <= 'Z'
}
r, _ := utf8.DecodeRuneInString(s)
return unicode.IsUpper(r)
}
func initname(s string) int {
return bool2int(s == "init")
}
// exportedsym reports whether a symbol will be visible
// to files that import our package.
func exportedsym(sym *Sym) int {
// Builtins are visible everywhere.
if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
return 1
}
return bool2int(sym.Pkg == localpkg && exportname(sym.Name))
}
func autoexport(n *Node, ctxt int) {
if n == nil || n.Sym == nil {
return
}
if (ctxt != PEXTERN && ctxt != PFUNC) || dclcontext != PEXTERN {
return
}
if n.Ntype != nil && n.Ntype.Op == OTFUNC && n.Ntype.Left != nil { // method
return
}
// -A is for cmd/gc/mkbuiltin script, so export everything
if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) != 0 {
exportsym(n)
}
if asmhdr != "" && n.Sym.Pkg == localpkg && !(n.Sym.Flags&SymAsm != 0) {
n.Sym.Flags |= SymAsm
asmlist = list(asmlist, n)
}
}
func dumppkg(p *Pkg) {
var suffix string
if p == nil || p == localpkg || p.Exported != 0 || p == builtinpkg {
return
}
p.Exported = 1
suffix = ""
if !(p.Direct != 0) {
suffix = " // indirect"
}
fmt.Fprintf(bout, "\timport %s \"%v\"%s\n", p.Name, Zconv(p.Path, 0), suffix)
}
// Look for anything we need for the inline body
func reexportdeplist(ll *NodeList) {
for ; ll != nil; ll = ll.Next {
reexportdep(ll.N)
}
}
func reexportdep(n *Node) {
var t *Type
if !(n != nil) {
return
}
//print("reexportdep %+hN\n", n);
switch n.Op {
case ONAME:
switch n.Class &^ PHEAP {
// methods will be printed along with their type
// nodes for T.Method expressions
case PFUNC:
if n.Left != nil && n.Left.Op == OTYPE {
break
}
// nodes for method calls.
if !(n.Type != nil) || n.Type.Thistuple > 0 {
break
}
fallthrough
// fallthrough
case PEXTERN:
if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
if Debug['E'] != 0 {
fmt.Printf("reexport name %v\n", Sconv(n.Sym, 0))
}
exportlist = list(exportlist, n)
}
}
// Local variables in the bodies need their type.
case ODCL:
t = n.Left.Type
if t != Types[t.Etype] && t != idealbool && t != idealstring {
if Isptr[t.Etype] != 0 {
t = t.Type
}
if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
if Debug['E'] != 0 {
fmt.Printf("reexport type %v from declaration\n", Sconv(t.Sym, 0))
}
exportlist = list(exportlist, t.Sym.Def)
}
}
case OLITERAL:
t = n.Type
if t != Types[n.Type.Etype] && t != idealbool && t != idealstring {
if Isptr[t.Etype] != 0 {
t = t.Type
}
if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
if Debug['E'] != 0 {
fmt.Printf("reexport literal type %v\n", Sconv(t.Sym, 0))
}
exportlist = list(exportlist, t.Sym.Def)
}
}
fallthrough
// fallthrough
case OTYPE:
if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
if Debug['E'] != 0 {
fmt.Printf("reexport literal/type %v\n", Sconv(n.Sym, 0))
}
exportlist = list(exportlist, n)
}
// for operations that need a type when rendered, put the type on the export list.
case OCONV,
OCONVIFACE,
OCONVNOP,
ORUNESTR,
OARRAYBYTESTR,
OARRAYRUNESTR,
OSTRARRAYBYTE,
OSTRARRAYRUNE,
ODOTTYPE,
ODOTTYPE2,
OSTRUCTLIT,
OARRAYLIT,
OPTRLIT,
OMAKEMAP,
OMAKESLICE,
OMAKECHAN:
t = n.Type
if !(t.Sym != nil) && t.Type != nil {
t = t.Type
}
if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
if Debug['E'] != 0 {
fmt.Printf("reexport type for expression %v\n", Sconv(t.Sym, 0))
}
exportlist = list(exportlist, t.Sym.Def)
}
}
reexportdep(n.Left)
reexportdep(n.Right)
reexportdeplist(n.List)
reexportdeplist(n.Rlist)
reexportdeplist(n.Ninit)
reexportdep(n.Ntest)
reexportdep(n.Nincr)
reexportdeplist(n.Nbody)
reexportdeplist(n.Nelse)
}
func dumpexportconst(s *Sym) {
var n *Node
var t *Type
n = s.Def
typecheck(&n, Erv)
if n == nil || n.Op != OLITERAL {
Fatal("dumpexportconst: oconst nil: %v", Sconv(s, 0))
}
t = n.Type // may or may not be specified
dumpexporttype(t)
if t != nil && !(isideal(t) != 0) {
fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
} else {
fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
}
}
func dumpexportvar(s *Sym) {
var n *Node
var t *Type
n = s.Def
typecheck(&n, Erv|Ecall)
if n == nil || n.Type == nil {
Yyerror("variable exported but not defined: %v", Sconv(s, 0))
return
}
t = n.Type
dumpexporttype(t)
if t.Etype == TFUNC && n.Class == PFUNC {
if n.Inl != nil {
// when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
// currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
if Debug['l'] < 2 {
typecheckinl(n)
}
// NOTE: The space after %#S here is necessary for ld's export data parser.
fmt.Fprintf(bout, "\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Inl, obj.FmtSharp))
reexportdeplist(n.Inl)
} else {
fmt.Fprintf(bout, "\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
}
} else {
fmt.Fprintf(bout, "\tvar %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp))
}
}
type methodbyname []*Type
func (x methodbyname) Len() int {
return len(x)
}
func (x methodbyname) Swap(i, j int) {
x[i], x[j] = x[j], x[i]
}
func (x methodbyname) Less(i, j int) bool {
var a *Type
var b *Type
a = x[i]
b = x[j]
return stringsCompare(a.Sym.Name, b.Sym.Name) < 0
}
func dumpexporttype(t *Type) {
var f *Type
var m []*Type
var i int
var n int
if t == nil {
return
}
if t.Printed != 0 || t == Types[t.Etype] || t == bytetype || t == runetype || t == errortype {
return
}
t.Printed = 1
if t.Sym != nil && t.Etype != TFIELD {
dumppkg(t.Sym.Pkg)
}
dumpexporttype(t.Type)
dumpexporttype(t.Down)
if t.Sym == nil || t.Etype == TFIELD {
return
}
n = 0
for f = t.Method; f != nil; f = f.Down {
dumpexporttype(f)
n++
}
m = make([]*Type, n)
i = 0
for f = t.Method; f != nil; f = f.Down {
m[i] = f
i++
}
sort.Sort(methodbyname(m[:n]))
fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
for i = 0; i < n; i++ {
f = m[i]
if f.Nointerface != 0 {
fmt.Fprintf(bout, "\t//go:nointerface\n")
}
if f.Type.Nname != nil && f.Type.Nname.Inl != nil { // nname was set by caninl
// when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
// currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
if Debug['l'] < 2 {
typecheckinl(f.Type.Nname)
}
fmt.Fprintf(bout, "\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Inl, obj.FmtSharp))
reexportdeplist(f.Type.Nname.Inl)
} else {
fmt.Fprintf(bout, "\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
}
}
}
func dumpsym(s *Sym) {
if s.Flags&SymExported != 0 {
return
}
s.Flags |= SymExported
if s.Def == nil {
Yyerror("unknown export symbol: %v", Sconv(s, 0))
return
}
// print("dumpsym %O %+S\n", s->def->op, s);
dumppkg(s.Pkg)
switch s.Def.Op {
default:
Yyerror("unexpected export symbol: %v %v", Oconv(int(s.Def.Op), 0), Sconv(s, 0))
case OLITERAL:
dumpexportconst(s)
case OTYPE:
if s.Def.Type.Etype == TFORW {
Yyerror("export of incomplete type %v", Sconv(s, 0))
} else {
dumpexporttype(s.Def.Type)
}
case ONAME:
dumpexportvar(s)
}
}
func dumpexport() {
var l *NodeList
var i int32
var lno int32
var p *Pkg
lno = lineno
fmt.Fprintf(bout, "\n$$\npackage %s", localpkg.Name)
if safemode != 0 {
fmt.Fprintf(bout, " safe")
}
fmt.Fprintf(bout, "\n")
for i = 0; i < int32(len(phash)); i++ {
for p = phash[i]; p != nil; p = p.Link {
if p.Direct != 0 {
dumppkg(p)
}
}
}
for l = exportlist; l != nil; l = l.Next {
lineno = l.N.Lineno
dumpsym(l.N.Sym)
}
fmt.Fprintf(bout, "\n$$\n")
lineno = lno
}
/*
* import
*/
/*
* return the sym for ss, which should match lexical
*/
func importsym(s *Sym, op int) *Sym {
var pkgstr string
if s.Def != nil && int(s.Def.Op) != op {
pkgstr = fmt.Sprintf("during import \"%v\"", Zconv(importpkg.Path, 0))
redeclare(s, pkgstr)
}
// mark the symbol so it is not reexported
if s.Def == nil {
if exportname(s.Name) || initname(s.Name) != 0 {
s.Flags |= SymExport
} else {
s.Flags |= SymPackage // package scope
}
}
return s
}
/*
* return the type pkg.name, forward declaring if needed
*/
func pkgtype(s *Sym) *Type {
var t *Type
importsym(s, OTYPE)
if s.Def == nil || s.Def.Op != OTYPE {
t = typ(TFORW)
t.Sym = s
s.Def = typenod(t)
}
if s.Def.Type == nil {
Yyerror("pkgtype %v", Sconv(s, 0))
}
return s.Def.Type
}
func importimport(s *Sym, z *Strlit) {
// Informational: record package name
// associated with import path, for use in
// human-readable messages.
var p *Pkg
if isbadimport(z) {
errorexit()
}
p = mkpkg(z)
if p.Name == "" {
p.Name = s.Name
Pkglookup(s.Name, nil).Npkg++
} else if p.Name != s.Name {
Yyerror("conflicting names %s and %s for package \"%v\"", p.Name, s.Name, Zconv(p.Path, 0))
}
if !(incannedimport != 0) && myimportpath != "" && z.S == myimportpath {
Yyerror("import \"%v\": package depends on \"%v\" (import cycle)", Zconv(importpkg.Path, 0), Zconv(z, 0))
errorexit()
}
}
func importconst(s *Sym, t *Type, n *Node) {
var n1 *Node
importsym(s, OLITERAL)
Convlit(&n, t)
if s.Def != nil { // TODO: check if already the same.
return
}
if n.Op != OLITERAL {
Yyerror("expression must be a constant")
return
}
if n.Sym != nil {
n1 = Nod(OXXX, nil, nil)
*n1 = *n
n = n1
}
n.Orig = newname(s)
n.Sym = s
declare(n, PEXTERN)
if Debug['E'] != 0 {
fmt.Printf("import const %v\n", Sconv(s, 0))
}
}
func importvar(s *Sym, t *Type) {
var n *Node
importsym(s, ONAME)
if s.Def != nil && s.Def.Op == ONAME {
if Eqtype(t, s.Def.Type) {
return
}
Yyerror("inconsistent definition for var %v during import\n\t%v (in \"%v\")\n\t%v (in \"%v\")", Sconv(s, 0), Tconv(s.Def.Type, 0), Zconv(s.Importdef.Path, 0), Tconv(t, 0), Zconv(importpkg.Path, 0))
}
n = newname(s)
s.Importdef = importpkg
n.Type = t
declare(n, PEXTERN)
if Debug['E'] != 0 {
fmt.Printf("import var %v %v\n", Sconv(s, 0), Tconv(t, obj.FmtLong))
}
}
func importtype(pt *Type, t *Type) {
var n *Node
// override declaration in unsafe.go for Pointer.
// there is no way in Go code to define unsafe.Pointer
// so we have to supply it.
if incannedimport != 0 && importpkg.Name == "unsafe" && pt.Nod.Sym.Name == "Pointer" {
t = Types[TUNSAFEPTR]
}
if pt.Etype == TFORW {
n = pt.Nod
copytype(pt.Nod, t)
pt.Nod = n // unzero nod
pt.Sym.Importdef = importpkg
pt.Sym.Lastlineno = int32(parserline())
declare(n, PEXTERN)
checkwidth(pt)
} else if !Eqtype(pt.Orig, t) {
Yyerror("inconsistent definition for type %v during import\n\t%v (in \"%v\")\n\t%v (in \"%v\")", Sconv(pt.Sym, 0), Tconv(pt, obj.FmtLong), Zconv(pt.Sym.Importdef.Path, 0), Tconv(t, obj.FmtLong), Zconv(importpkg.Path, 0))
}
if Debug['E'] != 0 {
fmt.Printf("import type %v %v\n", Tconv(pt, 0), Tconv(t, obj.FmtLong))
}
}
func dumpasmhdr() {
var b *obj.Biobuf
var l *NodeList
var n *Node
var t *Type
b, err := obj.Bopenw(asmhdr)
if err != nil {
Fatal("%v", err)
}
fmt.Fprintf(b, "// generated by %cg -asmhdr from package %s\n\n", Thearch.Thechar, localpkg.Name)
for l = asmlist; l != nil; l = l.Next {
n = l.N
if isblanksym(n.Sym) {
continue
}
switch n.Op {
case OLITERAL:
fmt.Fprintf(b, "#define const_%s %v\n", n.Sym.Name, Vconv(&n.Val, obj.FmtSharp))
case OTYPE:
t = n.Type
if t.Etype != TSTRUCT || t.Map != nil || t.Funarg != 0 {
break
}
fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width))
for t = t.Type; t != nil; t = t.Down {
if !isblanksym(t.Sym) {
fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Width))
}
}
}
}
obj.Bterm(b)
}

1953
src/cmd/internal/gc/fmt.go Normal file

File diff suppressed because it is too large Load diff

1017
src/cmd/internal/gc/gen.go Normal file

File diff suppressed because it is too large Load diff

1179
src/cmd/internal/gc/go.go Normal file

File diff suppressed because it is too large Load diff

2252
src/cmd/internal/gc/go.y Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,617 @@
// Derived from Inferno utils/6c/txt.c
// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package gc
import "cmd/internal/obj"
var ddumped int
var dfirst *obj.Prog
var dpc *obj.Prog
/*
* Is this node a memory operand?
*/
func Ismem(n *Node) int {
switch n.Op {
case OITAB,
OSPTR,
OLEN,
OCAP,
OINDREG,
ONAME,
OPARAM,
OCLOSUREVAR:
return 1
case OADDR:
return bool2int(Thearch.Thechar == '6' || Thearch.Thechar == '9') // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
}
return 0
}
func Samereg(a *Node, b *Node) int {
if a == nil || b == nil {
return 0
}
if a.Op != OREGISTER {
return 0
}
if b.Op != OREGISTER {
return 0
}
if a.Val.U.Reg != b.Val.U.Reg {
return 0
}
return 1
}
/*
* gsubr.c
*/
func Gbranch(as int, t *Type, likely int) *obj.Prog {
var p *obj.Prog
p = Prog(as)
p.To.Type = obj.TYPE_BRANCH
p.To.U.Branch = nil
if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' {
p.From.Type = obj.TYPE_CONST
p.From.Offset = int64(bool2int(likely > 0))
}
return p
}
func Prog(as int) *obj.Prog {
var p *obj.Prog
if as == obj.ADATA || as == obj.AGLOBL {
if ddumped != 0 {
Fatal("already dumped data")
}
if dpc == nil {
dpc = Ctxt.NewProg()
dfirst = dpc
}
p = dpc
dpc = Ctxt.NewProg()
p.Link = dpc
} else {
p = Pc
Pc = Ctxt.NewProg()
Clearp(Pc)
p.Link = Pc
}
if lineno == 0 {
if Debug['K'] != 0 {
Warn("prog: line 0")
}
}
p.As = int16(as)
p.Lineno = lineno
return p
}
func Nodreg(n *Node, t *Type, r int) {
if t == nil {
Fatal("nodreg: t nil")
}
*n = Node{}
n.Op = OREGISTER
n.Addable = 1
ullmancalc(n)
n.Val.U.Reg = int16(r)
n.Type = t
}
func Nodindreg(n *Node, t *Type, r int) {
Nodreg(n, t, r)
n.Op = OINDREG
}
func Afunclit(a *obj.Addr, n *Node) {
if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN {
a.Type = obj.TYPE_MEM
a.Sym = Linksym(n.Sym)
}
}
func Clearp(p *obj.Prog) {
obj.Nopout(p)
p.As = obj.AEND
p.Pc = int64(pcloc)
pcloc++
}
func dumpdata() {
ddumped = 1
if dfirst == nil {
return
}
newplist()
*Pc = *dfirst
Pc = dpc
Clearp(Pc)
}
func fixautoused(p *obj.Prog) {
var lp **obj.Prog
for lp = &p; ; {
p = *lp
if !(p != nil) {
break
}
if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !(((p.From.Node).(*Node)).Used != 0) {
*lp = p.Link
continue
}
if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && !(((p.To.Node).(*Node)).Used != 0) {
// Cannot remove VARDEF instruction, because - unlike TYPE handled above -
// VARDEFs are interspersed with other code, and a jump might be using the
// VARDEF as a target. Replace with a no-op instead. A later pass will remove
// the no-ops.
obj.Nopout(p)
continue
}
if p.From.Name == obj.NAME_AUTO && p.From.Node != nil {
p.From.Offset += ((p.From.Node).(*Node)).Stkdelta
}
if p.To.Name == obj.NAME_AUTO && p.To.Node != nil {
p.To.Offset += ((p.To.Node).(*Node)).Stkdelta
}
lp = &p.Link
}
}
func ggloblnod(nam *Node) {
var p *obj.Prog
p = Thearch.Gins(obj.AGLOBL, nam, nil)
p.Lineno = nam.Lineno
p.From.Sym.Gotype = Linksym(ngotype(nam))
p.To.Sym = nil
p.To.Type = obj.TYPE_CONST
p.To.Offset = nam.Type.Width
if nam.Readonly != 0 {
p.From3.Offset = obj.RODATA
}
if nam.Type != nil && !haspointers(nam.Type) {
p.From3.Offset |= obj.NOPTR
}
}
func ggloblsym(s *Sym, width int32, flags int8) {
var p *obj.Prog
p = Thearch.Gins(obj.AGLOBL, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
p.To.Type = obj.TYPE_CONST
p.To.Offset = int64(width)
p.From3.Offset = int64(flags)
}
func gjmp(to *obj.Prog) *obj.Prog {
var p *obj.Prog
p = Gbranch(obj.AJMP, nil, 0)
if to != nil {
Patch(p, to)
}
return p
}
func gtrack(s *Sym) {
var p *obj.Prog
p = Thearch.Gins(obj.AUSEFIELD, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
}
func gused(n *Node) {
Thearch.Gins(obj.ANOP, n, nil) // used
}
func Isfat(t *Type) int {
if t != nil {
switch t.Etype {
case TSTRUCT,
TARRAY,
TSTRING,
TINTER: // maybe remove later
return 1
}
}
return 0
}
func markautoused(p *obj.Prog) {
for ; p != nil; p = p.Link {
if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL {
continue
}
if p.From.Node != nil {
((p.From.Node).(*Node)).Used = 1
}
if p.To.Node != nil {
((p.To.Node).(*Node)).Used = 1
}
}
}
func Naddr(n *Node, a *obj.Addr, canemitcode int) {
var s *Sym
*a = obj.Zprog.From
if n == nil {
return
}
if n.Type != nil && n.Type.Etype != TIDEAL {
// TODO(rsc): This is undone by the selective clearing of width below,
// to match architectures that were not as aggressive in setting width
// during naddr. Those widths must be cleared to avoid triggering
// failures in gins when it detects real but heretofore latent (and one
// hopes innocuous) type mismatches.
// The type mismatches should be fixed and the clearing below removed.
dowidth(n.Type)
a.Width = n.Type.Width
}
switch n.Op {
default:
Fatal("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
case OREGISTER:
a.Type = obj.TYPE_REG
a.Reg = n.Val.U.Reg
a.Sym = nil
if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
a.Width = 0
}
case OINDREG:
a.Type = obj.TYPE_MEM
a.Reg = n.Val.U.Reg
a.Sym = Linksym(n.Sym)
a.Offset = n.Xoffset
if a.Offset != int64(int32(a.Offset)) {
Yyerror("offset %d too large for OINDREG", a.Offset)
}
if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
a.Width = 0
}
// n->left is PHEAP ONAME for stack parameter.
// compute address of actual parameter on stack.
case OPARAM:
a.Etype = Simtype[n.Left.Type.Etype]
a.Width = n.Left.Type.Width
a.Offset = n.Xoffset
a.Sym = Linksym(n.Left.Sym)
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_PARAM
a.Node = n.Left.Orig
case OCLOSUREVAR:
if !(Curfn.Needctxt != 0) {
Fatal("closurevar without needctxt")
}
a.Type = obj.TYPE_MEM
a.Reg = int16(Thearch.REGCTXT)
a.Sym = nil
a.Offset = n.Xoffset
case OCFUNC:
Naddr(n.Left, a, canemitcode)
a.Sym = Linksym(n.Left.Sym)
case ONAME:
a.Etype = 0
if n.Type != nil {
a.Etype = Simtype[n.Type.Etype]
}
a.Offset = n.Xoffset
s = n.Sym
a.Node = n.Orig
//if(a->node >= (Node*)&n)
// fatal("stack node");
if s == nil {
s = Lookup(".noname")
}
if n.Method != 0 {
if n.Type != nil {
if n.Type.Sym != nil {
if n.Type.Sym.Pkg != nil {
s = Pkglookup(s.Name, n.Type.Sym.Pkg)
}
}
}
}
a.Type = obj.TYPE_MEM
switch n.Class {
default:
Fatal("naddr: ONAME class %v %d\n", Sconv(n.Sym, 0), n.Class)
fallthrough
case PEXTERN:
a.Name = obj.NAME_EXTERN
case PAUTO:
a.Name = obj.NAME_AUTO
case PPARAM,
PPARAMOUT:
a.Name = obj.NAME_PARAM
case PFUNC:
a.Name = obj.NAME_EXTERN
a.Type = obj.TYPE_ADDR
a.Width = int64(Widthptr)
s = funcsym(s)
}
a.Sym = Linksym(s)
case OLITERAL:
if Thearch.Thechar == '8' {
a.Width = 0
}
switch n.Val.Ctype {
default:
Fatal("naddr: const %v", Tconv(n.Type, obj.FmtLong))
case CTFLT:
a.Type = obj.TYPE_FCONST
a.U.Dval = mpgetflt(n.Val.U.Fval)
case CTINT,
CTRUNE:
a.Sym = nil
a.Type = obj.TYPE_CONST
a.Offset = Mpgetfix(n.Val.U.Xval)
case CTSTR:
datagostring(n.Val.U.Sval, a)
case CTBOOL:
a.Sym = nil
a.Type = obj.TYPE_CONST
a.Offset = int64(n.Val.U.Bval)
case CTNIL:
a.Sym = nil
a.Type = obj.TYPE_CONST
a.Offset = 0
}
case OADDR:
Naddr(n.Left, a, canemitcode)
a.Etype = uint8(Tptr)
if Thearch.Thechar != '5' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
a.Width = int64(Widthptr)
}
if a.Type != obj.TYPE_MEM {
Fatal("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
}
a.Type = obj.TYPE_ADDR
// itable of interface value
case OITAB:
Naddr(n.Left, a, canemitcode)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // itab(nil)
}
a.Etype = uint8(Tptr)
a.Width = int64(Widthptr)
// pointer in a string or slice
case OSPTR:
Naddr(n.Left, a, canemitcode)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // ptr(nil)
}
a.Etype = Simtype[Tptr]
a.Offset += int64(Array_array)
a.Width = int64(Widthptr)
// len of string or slice
case OLEN:
Naddr(n.Left, a, canemitcode)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // len(nil)
}
a.Etype = Simtype[TUINT]
if Thearch.Thechar == '9' {
a.Etype = Simtype[TINT]
}
a.Offset += int64(Array_nel)
if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
a.Width = int64(Widthint)
}
// cap of string or slice
case OCAP:
Naddr(n.Left, a, canemitcode)
if a.Type == obj.TYPE_CONST && a.Offset == 0 {
break // cap(nil)
}
a.Etype = Simtype[TUINT]
if Thearch.Thechar == '9' {
a.Etype = Simtype[TINT]
}
a.Offset += int64(Array_cap)
if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
a.Width = int64(Widthint)
}
}
}
func newplist() *obj.Plist {
var pl *obj.Plist
pl = obj.Linknewplist(Ctxt)
Pc = Ctxt.NewProg()
Clearp(Pc)
pl.Firstpc = Pc
return pl
}
func nodarg(t *Type, fp int) *Node {
var n *Node
var l *NodeList
var first *Type
var savet Iter
// entire argument struct, not just one arg
if t.Etype == TSTRUCT && t.Funarg != 0 {
n = Nod(ONAME, nil, nil)
n.Sym = Lookup(".args")
n.Type = t
first = Structfirst(&savet, &t)
if first == nil {
Fatal("nodarg: bad struct")
}
if first.Width == BADWIDTH {
Fatal("nodarg: offset not computed for %v", Tconv(t, 0))
}
n.Xoffset = first.Width
n.Addable = 1
goto fp
}
if t.Etype != TFIELD {
Fatal("nodarg: not field %v", Tconv(t, 0))
}
if fp == 1 {
for l = Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
return n
}
}
}
n = Nod(ONAME, nil, nil)
n.Type = t.Type
n.Sym = t.Sym
if t.Width == BADWIDTH {
Fatal("nodarg: offset not computed for %v", Tconv(t, 0))
}
n.Xoffset = t.Width
n.Addable = 1
n.Orig = t.Nname
// Rewrite argument named _ to __,
// or else the assignment to _ will be
// discarded during code generation.
fp:
if isblank(n) {
n.Sym = Lookup("__")
}
switch fp {
case 0: // output arg
n.Op = OINDREG
n.Val.U.Reg = int16(Thearch.REGSP)
if Thearch.Thechar == '5' {
n.Xoffset += 4
}
if Thearch.Thechar == '9' {
n.Xoffset += 8
}
case 1: // input arg
n.Class = PPARAM
case 2: // offset output arg
Fatal("shouldn't be used")
n.Op = OINDREG
n.Val.U.Reg = int16(Thearch.REGSP)
n.Xoffset += Types[Tptr].Width
}
n.Typecheck = 1
return n
}
func Patch(p *obj.Prog, to *obj.Prog) {
if p.To.Type != obj.TYPE_BRANCH {
Fatal("patch: not a branch")
}
p.To.U.Branch = to
p.To.Offset = to.Pc
}
func unpatch(p *obj.Prog) *obj.Prog {
var q *obj.Prog
if p.To.Type != obj.TYPE_BRANCH {
Fatal("unpatch: not a branch")
}
q = p.To.U.Branch
p.To.U.Branch = nil
p.To.Offset = 0
return q
}

232
src/cmd/internal/gc/init.go Normal file
View file

@ -0,0 +1,232 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "fmt"
// case OADD:
// if(n->right->op == OLITERAL) {
// v = n->right->vconst;
// naddr(n->left, a, canemitcode);
// } else
// if(n->left->op == OLITERAL) {
// v = n->left->vconst;
// naddr(n->right, a, canemitcode);
// } else
// goto bad;
// a->offset += v;
// break;
/*
* a function named init is a special case.
* it is called by the initialization before
* main is run. to make it unique within a
* package and also uncallable, the name,
* normally "pkg.init", is altered to "pkg.init·1".
*/
var renameinit_initgen int
func renameinit() *Sym {
renameinit_initgen++
namebuf = fmt.Sprintf("init·%d", renameinit_initgen)
return Lookup(namebuf)
}
/*
* hand-craft the following initialization code
* var initdone· uint8 (1)
* func init() (2)
* if initdone· != 0 { (3)
* if initdone· == 2 (4)
* return
* throw(); (5)
* }
* initdone· = 1; (6)
* // over all matching imported symbols
* <pkg>.init() (7)
* { <init stmts> } (8)
* init·<n>() // if any (9)
* initdone· = 2; (10)
* return (11)
* }
*/
func anyinit(n *NodeList) int {
var h uint32
var s *Sym
var l *NodeList
// are there any interesting init statements
for l = n; l != nil; l = l.Next {
switch l.N.Op {
case ODCLFUNC,
ODCLCONST,
ODCLTYPE,
OEMPTY:
break
case OAS:
if isblank(l.N.Left) && candiscard(l.N.Right) != 0 {
break
}
fallthrough
// fall through
default:
return 1
}
}
// is this main
if localpkg.Name == "main" {
return 1
}
// is there an explicit init function
namebuf = fmt.Sprintf("init·1")
s = Lookup(namebuf)
if s.Def != nil {
return 1
}
// are there any imported init functions
for h = 0; h < NHASH; h++ {
for s = hash[h]; s != nil; s = s.Link {
if s.Name[0] != 'i' || s.Name != "init" {
continue
}
if s.Def == nil {
continue
}
return 1
}
}
// then none
return 0
}
func fninit(n *NodeList) {
var i int
var gatevar *Node
var a *Node
var b *Node
var fn *Node
var r *NodeList
var h uint32
var s *Sym
var initsym *Sym
if Debug['A'] != 0 {
// sys.go or unsafe.go during compiler build
return
}
n = initfix(n)
if !(anyinit(n) != 0) {
return
}
r = nil
// (1)
namebuf = fmt.Sprintf("initdone·")
gatevar = newname(Lookup(namebuf))
addvar(gatevar, Types[TUINT8], PEXTERN)
// (2)
Maxarg = 0
namebuf = fmt.Sprintf("init")
fn = Nod(ODCLFUNC, nil, nil)
initsym = Lookup(namebuf)
fn.Nname = newname(initsym)
fn.Nname.Defn = fn
fn.Nname.Ntype = Nod(OTFUNC, nil, nil)
declare(fn.Nname, PFUNC)
funchdr(fn)
// (3)
a = Nod(OIF, nil, nil)
a.Ntest = Nod(ONE, gatevar, Nodintconst(0))
r = list(r, a)
// (4)
b = Nod(OIF, nil, nil)
b.Ntest = Nod(OEQ, gatevar, Nodintconst(2))
b.Nbody = list1(Nod(ORETURN, nil, nil))
a.Nbody = list1(b)
// (5)
b = syslook("throwinit", 0)
b = Nod(OCALL, b, nil)
a.Nbody = list(a.Nbody, b)
// (6)
a = Nod(OAS, gatevar, Nodintconst(1))
r = list(r, a)
// (7)
for h = 0; h < NHASH; h++ {
for s = hash[h]; s != nil; s = s.Link {
if s.Name[0] != 'i' || s.Name != "init" {
continue
}
if s.Def == nil {
continue
}
if s == initsym {
continue
}
// could check that it is fn of no args/returns
a = Nod(OCALL, s.Def, nil)
r = list(r, a)
}
}
// (8)
r = concat(r, n)
// (9)
// could check that it is fn of no args/returns
for i = 1; ; i++ {
namebuf = fmt.Sprintf("init·%d", i)
s = Lookup(namebuf)
if s.Def == nil {
break
}
a = Nod(OCALL, s.Def, nil)
r = list(r, a)
}
// (10)
a = Nod(OAS, gatevar, Nodintconst(2))
r = list(r, a)
// (11)
a = Nod(ORETURN, nil, nil)
r = list(r, a)
exportsym(fn.Nname)
fn.Nbody = r
funcbody(fn)
Curfn = fn
typecheck(&fn, Etop)
typechecklist(r, Etop)
Curfn = nil
funccompile(fn)
}

1040
src/cmd/internal/gc/inl.go Normal file

File diff suppressed because it is too large Load diff

3204
src/cmd/internal/gc/lex.go Normal file

File diff suppressed because it is too large Load diff

329
src/cmd/internal/gc/md5.go Normal file
View file

@ -0,0 +1,329 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
// 64-bit MD5 (does full MD5 but returns 64 bits only).
// Translation of ../../crypto/md5/md5*.go.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
type MD5 struct {
s [4]uint32
x [64]uint8
nx int
len uint64
}
const (
_Chunk = 64
)
const (
_Init0 = 0x67452301
_Init1 = 0xEFCDAB89
_Init2 = 0x98BADCFE
_Init3 = 0x10325476
)
func md5reset(d *MD5) {
d.s[0] = _Init0
d.s[1] = _Init1
d.s[2] = _Init2
d.s[3] = _Init3
d.nx = 0
d.len = 0
}
func md5write(d *MD5, p []byte, nn int) {
var i int
var n int
d.len += uint64(nn)
if d.nx > 0 {
n = nn
if n > _Chunk-d.nx {
n = _Chunk - d.nx
}
for i = 0; i < n; i++ {
d.x[d.nx+i] = uint8(p[i])
}
d.nx += n
if d.nx == _Chunk {
md5block(d, d.x[:], _Chunk)
d.nx = 0
}
p = p[n:]
nn -= n
}
n = md5block(d, p, nn)
p = p[n:]
nn -= n
if nn > 0 {
for i = 0; i < nn; i++ {
d.x[i] = uint8(p[i])
}
d.nx = nn
}
}
func md5sum(d *MD5, hi *uint64) uint64 {
var tmp [64]uint8
var i int
var len uint64
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
len = d.len
tmp = [64]uint8{}
tmp[0] = 0x80
if len%64 < 56 {
md5write(d, tmp[:], int(56-len%64))
} else {
md5write(d, tmp[:], int(64+56-len%64))
}
// Length in bits.
len <<= 3
for i = 0; i < 8; i++ {
tmp[i] = uint8(len >> uint(8*i))
}
md5write(d, tmp[:], 8)
if d.nx != 0 {
Fatal("md5sum")
}
if hi != nil {
*hi = uint64(d.s[2]) | uint64(d.s[3])<<32
}
return uint64(d.s[0]) | uint64(d.s[1])<<32
}
// MD5 block step.
// In its own file so that a faster assembly or C version
// can be substituted easily.
// table[i] = int((1<<32) * abs(sin(i+1 radians))).
var table = [64]uint32{
// round 1
0xd76aa478,
0xe8c7b756,
0x242070db,
0xc1bdceee,
0xf57c0faf,
0x4787c62a,
0xa8304613,
0xfd469501,
0x698098d8,
0x8b44f7af,
0xffff5bb1,
0x895cd7be,
0x6b901122,
0xfd987193,
0xa679438e,
0x49b40821,
// round 2
0xf61e2562,
0xc040b340,
0x265e5a51,
0xe9b6c7aa,
0xd62f105d,
0x2441453,
0xd8a1e681,
0xe7d3fbc8,
0x21e1cde6,
0xc33707d6,
0xf4d50d87,
0x455a14ed,
0xa9e3e905,
0xfcefa3f8,
0x676f02d9,
0x8d2a4c8a,
// round3
0xfffa3942,
0x8771f681,
0x6d9d6122,
0xfde5380c,
0xa4beea44,
0x4bdecfa9,
0xf6bb4b60,
0xbebfbc70,
0x289b7ec6,
0xeaa127fa,
0xd4ef3085,
0x4881d05,
0xd9d4d039,
0xe6db99e5,
0x1fa27cf8,
0xc4ac5665,
// round 4
0xf4292244,
0x432aff97,
0xab9423a7,
0xfc93a039,
0x655b59c3,
0x8f0ccc92,
0xffeff47d,
0x85845dd1,
0x6fa87e4f,
0xfe2ce6e0,
0xa3014314,
0x4e0811a1,
0xf7537e82,
0xbd3af235,
0x2ad7d2bb,
0xeb86d391,
}
var shift1 = []uint32{7, 12, 17, 22}
var shift2 = []uint32{5, 9, 14, 20}
var shift3 = []uint32{4, 11, 16, 23}
var shift4 = []uint32{6, 10, 15, 21}
func md5block(dig *MD5, p []byte, nn int) int {
var a uint32
var b uint32
var c uint32
var d uint32
var aa uint32
var bb uint32
var cc uint32
var dd uint32
var i int
var j int
var n int
var X [16]uint32
a = dig.s[0]
b = dig.s[1]
c = dig.s[2]
d = dig.s[3]
n = 0
for nn >= _Chunk {
aa = a
bb = b
cc = c
dd = d
for i = 0; i < 16; i++ {
j = i * 4
X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
}
// Round 1.
for i = 0; i < 16; i++ {
var x uint32
var t uint32
var s uint32
var f uint32
x = uint32(i)
t = uint32(i)
s = shift1[i%4]
f = ((c ^ d) & b) ^ d
a += f + X[x] + table[t]
a = a<<s | a>>(32-s)
a += b
t = d
d = c
c = b
b = a
a = t
}
// Round 2.
for i = 0; i < 16; i++ {
var x uint32
var t uint32
var s uint32
var g uint32
x = (1 + 5*uint32(i)) % 16
t = 16 + uint32(i)
s = shift2[i%4]
g = ((b ^ c) & d) ^ c
a += g + X[x] + table[t]
a = a<<s | a>>(32-s)
a += b
t = d
d = c
c = b
b = a
a = t
}
// Round 3.
for i = 0; i < 16; i++ {
var x uint32
var t uint32
var s uint32
var h uint32
x = (5 + 3*uint32(i)) % 16
t = 32 + uint32(i)
s = shift3[i%4]
h = b ^ c ^ d
a += h + X[x] + table[t]
a = a<<s | a>>(32-s)
a += b
t = d
d = c
c = b
b = a
a = t
}
// Round 4.
for i = 0; i < 16; i++ {
var x uint32
var s uint32
var t uint32
var ii uint32
x = (7 * uint32(i)) % 16
s = shift4[i%4]
t = 48 + uint32(i)
ii = c ^ (b | ^d)
a += ii + X[x] + table[t]
a = a<<s | a>>(32-s)
a += b
t = d
d = c
c = b
b = a
a = t
}
a += aa
b += bb
c += cc
d += dd
p = p[_Chunk:]
n += _Chunk
nn -= _Chunk
}
dig.s[0] = a
dig.s[1] = b
dig.s[2] = c
dig.s[3] = d
return n
}

View file

@ -0,0 +1,698 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/internal/obj"
"fmt"
"math"
)
/// uses arithmetic
func mpcmpfixflt(a *Mpint, b *Mpflt) int {
var buf string
var c Mpflt
buf = fmt.Sprintf("%v", Bconv(a, 0))
mpatoflt(&c, buf)
return mpcmpfltflt(&c, b)
}
func mpcmpfltfix(a *Mpflt, b *Mpint) int {
var buf string
var c Mpflt
buf = fmt.Sprintf("%v", Bconv(b, 0))
mpatoflt(&c, buf)
return mpcmpfltflt(a, &c)
}
func Mpcmpfixfix(a *Mpint, b *Mpint) int {
var c Mpint
mpmovefixfix(&c, a)
mpsubfixfix(&c, b)
return mptestfix(&c)
}
func mpcmpfixc(b *Mpint, c int64) int {
var c1 Mpint
Mpmovecfix(&c1, c)
return Mpcmpfixfix(b, &c1)
}
func mpcmpfltflt(a *Mpflt, b *Mpflt) int {
var c Mpflt
mpmovefltflt(&c, a)
mpsubfltflt(&c, b)
return mptestflt(&c)
}
func mpcmpfltc(b *Mpflt, c float64) int {
var a Mpflt
Mpmovecflt(&a, c)
return mpcmpfltflt(b, &a)
}
func mpsubfixfix(a *Mpint, b *Mpint) {
mpnegfix(a)
mpaddfixfix(a, b, 0)
mpnegfix(a)
}
func mpsubfltflt(a *Mpflt, b *Mpflt) {
mpnegflt(a)
mpaddfltflt(a, b)
mpnegflt(a)
}
func mpaddcfix(a *Mpint, c int64) {
var b Mpint
Mpmovecfix(&b, c)
mpaddfixfix(a, &b, 0)
}
func mpaddcflt(a *Mpflt, c float64) {
var b Mpflt
Mpmovecflt(&b, c)
mpaddfltflt(a, &b)
}
func mpmulcfix(a *Mpint, c int64) {
var b Mpint
Mpmovecfix(&b, c)
mpmulfixfix(a, &b)
}
func mpmulcflt(a *Mpflt, c float64) {
var b Mpflt
Mpmovecflt(&b, c)
mpmulfltflt(a, &b)
}
func mpdivfixfix(a *Mpint, b *Mpint) {
var q Mpint
var r Mpint
mpdivmodfixfix(&q, &r, a, b)
mpmovefixfix(a, &q)
}
func mpmodfixfix(a *Mpint, b *Mpint) {
var q Mpint
var r Mpint
mpdivmodfixfix(&q, &r, a, b)
mpmovefixfix(a, &r)
}
func mpcomfix(a *Mpint) {
var b Mpint
Mpmovecfix(&b, 1)
mpnegfix(a)
mpsubfixfix(a, &b)
}
func Mpmovefixflt(a *Mpflt, b *Mpint) {
a.Val = *b
a.Exp = 0
mpnorm(a)
}
// convert (truncate) b to a.
// return -1 (but still convert) if b was non-integer.
func mpexactfltfix(a *Mpint, b *Mpflt) int {
var f Mpflt
*a = b.Val
Mpshiftfix(a, int(b.Exp))
if b.Exp < 0 {
f.Val = *a
f.Exp = 0
mpnorm(&f)
if mpcmpfltflt(b, &f) != 0 {
return -1
}
}
return 0
}
func mpmovefltfix(a *Mpint, b *Mpflt) int {
var f Mpflt
var i int
if mpexactfltfix(a, b) == 0 {
return 0
}
// try rounding down a little
f = *b
f.Val.A[0] = 0
if mpexactfltfix(a, &f) == 0 {
return 0
}
// try rounding up a little
for i = 1; i < Mpprec; i++ {
f.Val.A[i]++
if f.Val.A[i] != Mpbase {
break
}
f.Val.A[i] = 0
}
mpnorm(&f)
if mpexactfltfix(a, &f) == 0 {
return 0
}
return -1
}
func mpmovefixfix(a *Mpint, b *Mpint) {
*a = *b
}
func mpmovefltflt(a *Mpflt, b *Mpflt) {
*a = *b
}
var tab = []float64{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7}
func mppow10flt(a *Mpflt, p int) {
if p < 0 {
panic("abort")
}
if p < len(tab) {
Mpmovecflt(a, tab[p])
return
}
mppow10flt(a, p>>1)
mpmulfltflt(a, a)
if p&1 != 0 {
mpmulcflt(a, 10)
}
}
func mphextofix(a *Mpint, s string) {
var c int8
var d int
var bit int
var hexdigitp int
var end int
for s != "" && s[0] == '0' {
s = s[1:]
}
// overflow
if 4*len(s) > Mpscale*Mpprec {
a.Ovf = 1
return
}
end = len(s) - 1
for hexdigitp = end; hexdigitp >= 0; hexdigitp-- {
c = int8(s[hexdigitp])
if c >= '0' && c <= '9' {
d = int(c) - '0'
} else if c >= 'A' && c <= 'F' {
d = int(c) - 'A' + 10
} else {
d = int(c) - 'a' + 10
}
bit = 4 * (end - hexdigitp)
for d > 0 {
if d&1 != 0 {
a.A[bit/Mpscale] |= int(1) << uint(bit%Mpscale)
}
bit++
d = d >> 1
}
}
}
//
// floating point input
// required syntax is [+-]d*[.]d*[e[+-]d*] or [+-]0xH*[e[+-]d*]
//
func mpatoflt(a *Mpflt, as string) {
var b Mpflt
var dp int
var c int
var f int
var ef int
var ex int
var eb int
var base int
var s string
var start string
for as[0] == ' ' || as[0] == '\t' {
as = as[1:]
}
/* determine base */
s = as
base = -1
for base == -1 {
if s == "" {
base = 10
break
}
c := s[0]
s = s[1:]
switch c {
case '-',
'+':
break
case '0':
if s != "" && s[0] == 'x' {
base = 16
} else {
base = 10
}
default:
base = 10
}
}
s = as
dp = 0 /* digits after decimal point */
f = 0 /* sign */
ex = 0 /* exponent */
eb = 0 /* binary point */
Mpmovecflt(a, 0.0)
if base == 16 {
start = ""
for {
c, _ = intstarstringplusplus(s)
if c == '-' {
f = 1
s = s[1:]
} else if c == '+' {
s = s[1:]
} else if c == '0' && s[1] == 'x' {
s = s[2:]
start = s
} else if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
s = s[1:]
} else {
break
}
}
if start == "" {
Yyerror("malformed hex constant: %s", as)
goto bad
}
mphextofix(&a.Val, start[:len(start)-len(s)])
if a.Val.Ovf != 0 {
Yyerror("constant too large: %s", as)
goto bad
}
a.Exp = 0
mpnorm(a)
}
for {
c, s = intstarstringplusplus(s)
switch c {
default:
Yyerror("malformed constant: %s (at %c)", as, c)
goto bad
case '-':
f = 1
fallthrough
case ' ',
'\t',
'+':
continue
case '.':
if base == 16 {
Yyerror("decimal point in hex constant: %s", as)
goto bad
}
dp = 1
continue
case '1',
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'0':
mpmulcflt(a, 10)
mpaddcflt(a, float64(c)-'0')
if dp != 0 {
dp++
}
continue
case 'P',
'p':
eb = 1
fallthrough
case 'E',
'e':
ex = 0
ef = 0
for {
c, s = intstarstringplusplus(s)
if c == '+' || c == ' ' || c == '\t' {
continue
}
if c == '-' {
ef = 1
continue
}
if c >= '0' && c <= '9' {
ex = ex*10 + (c - '0')
if ex > 1e8 {
Yyerror("constant exponent out of range: %s", as)
errorexit()
}
continue
}
break
}
if ef != 0 {
ex = -ex
}
fallthrough
case 0:
break
}
break
}
if eb != 0 {
if dp != 0 {
Yyerror("decimal point and binary point in constant: %s", as)
goto bad
}
mpsetexp(a, int(a.Exp)+ex)
goto out
}
if dp != 0 {
dp--
}
if mpcmpfltc(a, 0.0) != 0 {
if ex >= dp {
mppow10flt(&b, ex-dp)
mpmulfltflt(a, &b)
} else {
// 4 approximates least_upper_bound(log2(10)).
if dp-ex >= 1<<(32-3) || int(int16(4*(dp-ex))) != 4*(dp-ex) {
Mpmovecflt(a, 0.0)
} else {
mppow10flt(&b, dp-ex)
mpdivfltflt(a, &b)
}
}
}
out:
if f != 0 {
mpnegflt(a)
}
return
bad:
Mpmovecflt(a, 0.0)
}
//
// fixed point input
// required syntax is [+-][0[x]]d*
//
func mpatofix(a *Mpint, as string) {
var c int
var f int
var s string
var s0 string
s = as
f = 0
Mpmovecfix(a, 0)
c, s = intstarstringplusplus(s)
switch c {
case '-':
f = 1
fallthrough
case '+':
c, s = intstarstringplusplus(s)
if c != '0' {
break
}
fallthrough
case '0':
goto oct
}
for c != 0 {
if c >= '0' && c <= '9' {
mpmulcfix(a, 10)
mpaddcfix(a, int64(c)-'0')
c, s = intstarstringplusplus(s)
continue
}
Yyerror("malformed decimal constant: %s", as)
goto bad
}
goto out
oct:
c, s = intstarstringplusplus(s)
if c == 'x' || c == 'X' {
goto hex
}
for c != 0 {
if c >= '0' && c <= '7' {
mpmulcfix(a, 8)
mpaddcfix(a, int64(c)-'0')
c, s = intstarstringplusplus(s)
continue
}
Yyerror("malformed octal constant: %s", as)
goto bad
}
goto out
hex:
s0 = s
c, _ = intstarstringplusplus(s)
for c != 0 {
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
s = s[1:]
c, _ = intstarstringplusplus(s)
continue
}
Yyerror("malformed hex constant: %s", as)
goto bad
}
mphextofix(a, s0)
if a.Ovf != 0 {
Yyerror("constant too large: %s", as)
goto bad
}
out:
if f != 0 {
mpnegfix(a)
}
return
bad:
Mpmovecfix(a, 0)
}
func Bconv(xval *Mpint, flag int) string {
var buf [500]byte
var p int
var fp string
var q Mpint
var r Mpint
var ten Mpint
var sixteen Mpint
var f int
var digit int
mpmovefixfix(&q, xval)
f = 0
if mptestfix(&q) < 0 {
f = 1
mpnegfix(&q)
}
p = len(buf)
if flag&obj.FmtSharp != 0 /*untyped*/ {
// Hexadecimal
Mpmovecfix(&sixteen, 16)
for {
mpdivmodfixfix(&q, &r, &q, &sixteen)
digit = int(Mpgetfix(&r))
if digit < 10 {
p--
buf[p] = byte(digit + '0')
} else {
p--
buf[p] = byte(digit - 10 + 'A')
}
if mptestfix(&q) <= 0 {
break
}
}
p--
buf[p] = 'x'
p--
buf[p] = '0'
} else {
// Decimal
Mpmovecfix(&ten, 10)
for {
mpdivmodfixfix(&q, &r, &q, &ten)
p--
buf[p] = byte(Mpgetfix(&r) + '0')
if mptestfix(&q) <= 0 {
break
}
}
}
if f != 0 {
p--
buf[p] = '-'
}
fp += string(buf[p:])
return fp
}
func Fconv(fvp *Mpflt, flag int) string {
var buf string
var fp string
var fv Mpflt
var d float64
var dexp float64
var exp int
if flag&obj.FmtSharp != 0 /*untyped*/ {
// alternate form - decimal for error messages.
// for well in range, convert to double and use print's %g
exp = int(fvp.Exp) + sigfig(fvp)*Mpscale
if -900 < exp && exp < 900 {
d = mpgetflt(fvp)
if d >= 0 && (flag&obj.FmtSign != 0 /*untyped*/) {
fp += fmt.Sprintf("+")
}
fp += fmt.Sprintf("%g", d)
return fp
}
// very out of range. compute decimal approximation by hand.
// decimal exponent
dexp = float64(fvp.Exp) * 0.301029995663981195 // log_10(2)
exp = int(dexp)
// decimal mantissa
fv = *fvp
fv.Val.Neg = 0
fv.Exp = 0
d = mpgetflt(&fv)
d *= math.Pow(10, dexp-float64(exp))
for d >= 9.99995 {
d /= 10
exp++
}
if fvp.Val.Neg != 0 {
fp += fmt.Sprintf("-")
} else if flag&obj.FmtSign != 0 /*untyped*/ {
fp += fmt.Sprintf("+")
}
fp += fmt.Sprintf("%.5fe+%d", d, exp)
return fp
}
if sigfig(fvp) == 0 {
buf = fmt.Sprintf("0p+0")
goto out
}
fv = *fvp
for fv.Val.A[0] == 0 {
Mpshiftfix(&fv.Val, -Mpscale)
fv.Exp += Mpscale
}
for fv.Val.A[0]&1 == 0 {
Mpshiftfix(&fv.Val, -1)
fv.Exp += 1
}
if fv.Exp >= 0 {
buf = fmt.Sprintf("%vp+%d", Bconv(&fv.Val, obj.FmtSharp), fv.Exp)
goto out
}
buf = fmt.Sprintf("%vp-%d", Bconv(&fv.Val, obj.FmtSharp), -fv.Exp)
out:
fp += buf
return fp
}

View file

@ -0,0 +1,728 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
//
// return the significant
// words of the argument
//
func mplen(a *Mpint) int {
var i int
var n int
n = -1
for i = 0; i < Mpprec; i++ {
if a.A[i] != 0 {
n = i
}
}
return n + 1
}
//
// left shift mpint by one
// ignores sign
//
func mplsh(a *Mpint, quiet int) {
var x int
var i int
var c int
c = 0
for i = 0; i < Mpprec; i++ {
x = (a.A[i] << 1) + c
c = 0
if x >= Mpbase {
x -= Mpbase
c = 1
}
a.A[i] = x
}
a.Ovf = uint8(c)
if a.Ovf != 0 && !(quiet != 0) {
Yyerror("constant shift overflow")
}
}
//
// left shift mpint by Mpscale
// ignores sign
//
func mplshw(a *Mpint, quiet int) {
var i int
i = Mpprec - 1
if a.A[i] != 0 {
a.Ovf = 1
if !(quiet != 0) {
Yyerror("constant shift overflow")
}
}
for ; i > 0; i-- {
a.A[i] = a.A[i-1]
}
a.A[i] = 0
}
//
// right shift mpint by one
// ignores sign and overflow
//
func mprsh(a *Mpint) {
var x int
var lo int
var i int
var c int
c = 0
lo = a.A[0] & 1
for i = Mpprec - 1; i >= 0; i-- {
x = a.A[i]
a.A[i] = (x + c) >> 1
c = 0
if x&1 != 0 {
c = Mpbase
}
}
if a.Neg != 0 && lo != 0 {
mpaddcfix(a, -1)
}
}
//
// right shift mpint by Mpscale
// ignores sign and overflow
//
func mprshw(a *Mpint) {
var lo int
var i int
lo = a.A[0]
for i = 0; i < Mpprec-1; i++ {
a.A[i] = a.A[i+1]
}
a.A[i] = 0
if a.Neg != 0 && lo != 0 {
mpaddcfix(a, -1)
}
}
//
// return the sign of (abs(a)-abs(b))
//
func mpcmp(a *Mpint, b *Mpint) int {
var x int
var i int
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in cmp")
}
return 0
}
for i = Mpprec - 1; i >= 0; i-- {
x = a.A[i] - b.A[i]
if x > 0 {
return +1
}
if x < 0 {
return -1
}
}
return 0
}
//
// negate a
// ignore sign and ovf
//
func mpneg(a *Mpint) {
var x int
var i int
var c int
c = 0
for i = 0; i < Mpprec; i++ {
x = -a.A[i] - c
c = 0
if x < 0 {
x += Mpbase
c = 1
}
a.A[i] = x
}
}
// shift left by s (or right by -s)
func Mpshiftfix(a *Mpint, s int) {
if s >= 0 {
for s >= Mpscale {
mplshw(a, 0)
s -= Mpscale
}
for s > 0 {
mplsh(a, 0)
s--
}
} else {
s = -s
for s >= Mpscale {
mprshw(a)
s -= Mpscale
}
for s > 0 {
mprsh(a)
s--
}
}
}
/// implements fix arihmetic
func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
var i int
var c int
var x int
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpaddxx")
}
a.Ovf = 1
return
}
c = 0
if a.Neg != b.Neg {
goto sub
}
// perform a+b
for i = 0; i < Mpprec; i++ {
x = a.A[i] + b.A[i] + c
c = 0
if x >= Mpbase {
x -= Mpbase
c = 1
}
a.A[i] = x
}
a.Ovf = uint8(c)
if a.Ovf != 0 && !(quiet != 0) {
Yyerror("constant addition overflow")
}
return
// perform a-b
sub:
switch mpcmp(a, b) {
case 0:
Mpmovecfix(a, 0)
case 1:
for i = 0; i < Mpprec; i++ {
x = a.A[i] - b.A[i] - c
c = 0
if x < 0 {
x += Mpbase
c = 1
}
a.A[i] = x
}
case -1:
a.Neg ^= 1
for i = 0; i < Mpprec; i++ {
x = b.A[i] - a.A[i] - c
c = 0
if x < 0 {
x += Mpbase
c = 1
}
a.A[i] = x
}
}
}
func mpmulfixfix(a *Mpint, b *Mpint) {
var i int
var j int
var na int
var nb int
var x int
var s Mpint
var q Mpint
var c *Mpint
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpmulfixfix")
}
a.Ovf = 1
return
}
// pick the smaller
// to test for bits
na = mplen(a)
nb = mplen(b)
if na > nb {
mpmovefixfix(&s, a)
c = b
na = nb
} else {
mpmovefixfix(&s, b)
c = a
}
s.Neg = 0
Mpmovecfix(&q, 0)
for i = 0; i < na; i++ {
x = c.A[i]
for j = 0; j < Mpscale; j++ {
if x&1 != 0 {
if s.Ovf != 0 {
q.Ovf = 1
goto out
}
mpaddfixfix(&q, &s, 1)
if q.Ovf != 0 {
goto out
}
}
mplsh(&s, 1)
x >>= 1
}
}
out:
q.Neg = a.Neg ^ b.Neg
mpmovefixfix(a, &q)
if a.Ovf != 0 {
Yyerror("constant multiplication overflow")
}
}
func mpmulfract(a *Mpint, b *Mpint) {
var i int
var j int
var x int
var s Mpint
var q Mpint
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpmulflt")
}
a.Ovf = 1
return
}
mpmovefixfix(&s, b)
s.Neg = 0
Mpmovecfix(&q, 0)
i = Mpprec - 1
x = a.A[i]
if x != 0 {
Yyerror("mpmulfract not normal")
}
for i--; i >= 0; i-- {
x = a.A[i]
if x == 0 {
mprshw(&s)
continue
}
for j = 0; j < Mpscale; j++ {
x <<= 1
if x&Mpbase != 0 {
mpaddfixfix(&q, &s, 1)
}
mprsh(&s)
}
}
q.Neg = a.Neg ^ b.Neg
mpmovefixfix(a, &q)
if a.Ovf != 0 {
Yyerror("constant multiplication overflow")
}
}
func mporfixfix(a *Mpint, b *Mpint) {
var i int
var x int
x = 0
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mporfixfix")
}
Mpmovecfix(a, 0)
a.Ovf = 1
return
}
if a.Neg != 0 {
a.Neg = 0
mpneg(a)
}
if b.Neg != 0 {
mpneg(b)
}
for i = 0; i < Mpprec; i++ {
x = a.A[i] | b.A[i]
a.A[i] = x
}
if b.Neg != 0 {
mpneg(b)
}
if x&Mpsign != 0 {
a.Neg = 1
mpneg(a)
}
}
func mpandfixfix(a *Mpint, b *Mpint) {
var i int
var x int
x = 0
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpandfixfix")
}
Mpmovecfix(a, 0)
a.Ovf = 1
return
}
if a.Neg != 0 {
a.Neg = 0
mpneg(a)
}
if b.Neg != 0 {
mpneg(b)
}
for i = 0; i < Mpprec; i++ {
x = a.A[i] & b.A[i]
a.A[i] = x
}
if b.Neg != 0 {
mpneg(b)
}
if x&Mpsign != 0 {
a.Neg = 1
mpneg(a)
}
}
func mpandnotfixfix(a *Mpint, b *Mpint) {
var i int
var x int
x = 0
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mpandnotfixfix")
}
Mpmovecfix(a, 0)
a.Ovf = 1
return
}
if a.Neg != 0 {
a.Neg = 0
mpneg(a)
}
if b.Neg != 0 {
mpneg(b)
}
for i = 0; i < Mpprec; i++ {
x = a.A[i] &^ b.A[i]
a.A[i] = x
}
if b.Neg != 0 {
mpneg(b)
}
if x&Mpsign != 0 {
a.Neg = 1
mpneg(a)
}
}
func mpxorfixfix(a *Mpint, b *Mpint) {
var i int
var x int
x = 0
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mporfixfix")
}
Mpmovecfix(a, 0)
a.Ovf = 1
return
}
if a.Neg != 0 {
a.Neg = 0
mpneg(a)
}
if b.Neg != 0 {
mpneg(b)
}
for i = 0; i < Mpprec; i++ {
x = a.A[i] ^ b.A[i]
a.A[i] = x
}
if b.Neg != 0 {
mpneg(b)
}
if x&Mpsign != 0 {
a.Neg = 1
mpneg(a)
}
}
func mplshfixfix(a *Mpint, b *Mpint) {
var s int64
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mporfixfix")
}
Mpmovecfix(a, 0)
a.Ovf = 1
return
}
s = Mpgetfix(b)
if s < 0 || s >= Mpprec*Mpscale {
Yyerror("stupid shift: %d", s)
Mpmovecfix(a, 0)
return
}
Mpshiftfix(a, int(s))
}
func mprshfixfix(a *Mpint, b *Mpint) {
var s int64
if a.Ovf != 0 || b.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("ovf in mprshfixfix")
}
Mpmovecfix(a, 0)
a.Ovf = 1
return
}
s = Mpgetfix(b)
if s < 0 || s >= Mpprec*Mpscale {
Yyerror("stupid shift: %d", s)
if a.Neg != 0 {
Mpmovecfix(a, -1)
} else {
Mpmovecfix(a, 0)
}
return
}
Mpshiftfix(a, int(-s))
}
func mpnegfix(a *Mpint) {
a.Neg ^= 1
}
func Mpgetfix(a *Mpint) int64 {
var v int64
if a.Ovf != 0 {
if nsavederrors+nerrors == 0 {
Yyerror("constant overflow")
}
return 0
}
v = int64(uint64(a.A[0]))
v |= int64(uint64(a.A[1]) << Mpscale)
v |= int64(uint64(a.A[2]) << (Mpscale + Mpscale))
if a.Neg != 0 {
v = int64(-uint64(v))
}
return v
}
func Mpmovecfix(a *Mpint, c int64) {
var i int
var x int64
a.Neg = 0
a.Ovf = 0
x = c
if x < 0 {
a.Neg = 1
x = int64(-uint64(x))
}
for i = 0; i < Mpprec; i++ {
a.A[i] = int(x & Mpmask)
x >>= Mpscale
}
}
func mpdivmodfixfix(q *Mpint, r *Mpint, n *Mpint, d *Mpint) {
var i int
var ns int
var ds int
ns = int(n.Neg)
ds = int(d.Neg)
n.Neg = 0
d.Neg = 0
mpmovefixfix(r, n)
Mpmovecfix(q, 0)
// shift denominator until it
// is larger than numerator
for i = 0; i < Mpprec*Mpscale; i++ {
if mpcmp(d, r) > 0 {
break
}
mplsh(d, 1)
}
// if it never happens
// denominator is probably zero
if i >= Mpprec*Mpscale {
q.Ovf = 1
r.Ovf = 1
n.Neg = uint8(ns)
d.Neg = uint8(ds)
Yyerror("constant division overflow")
return
}
// shift denominator back creating
// quotient a bit at a time
// when done the remaining numerator
// will be the remainder
for ; i > 0; i-- {
mplsh(q, 1)
mprsh(d)
if mpcmp(d, r) <= 0 {
mpaddcfix(q, 1)
mpsubfixfix(r, d)
}
}
n.Neg = uint8(ns)
d.Neg = uint8(ds)
r.Neg = uint8(ns)
q.Neg = uint8(ns ^ ds)
}
func mpiszero(a *Mpint) int {
var i int
for i = Mpprec - 1; i >= 0; i-- {
if a.A[i] != 0 {
return 0
}
}
return 1
}
func mpdivfract(a *Mpint, b *Mpint) {
var n Mpint
var d Mpint
var i int
var j int
var neg int
var x int
mpmovefixfix(&n, a) // numerator
mpmovefixfix(&d, b) // denominator
neg = int(n.Neg) ^ int(d.Neg)
n.Neg = 0
d.Neg = 0
for i = Mpprec - 1; i >= 0; i-- {
x = 0
for j = 0; j < Mpscale; j++ {
x <<= 1
if mpcmp(&d, &n) <= 0 {
if !(mpiszero(&d) != 0) {
x |= 1
}
mpsubfixfix(&n, &d)
}
mprsh(&d)
}
a.A[i] = x
}
a.Neg = uint8(neg)
}
func mptestfix(a *Mpint) int {
var b Mpint
var r int
Mpmovecfix(&b, 0)
r = mpcmp(a, &b)
if a.Neg != 0 {
if r > 0 {
return -1
}
if r < 0 {
return +1
}
}
return r
}

View file

@ -0,0 +1,377 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"fmt"
"math"
)
/*
* returns the leading non-zero
* word of the number
*/
func sigfig(a *Mpflt) int {
var i int
for i = Mpprec - 1; i >= 0; i-- {
if a.Val.A[i] != 0 {
break
}
}
//print("sigfig %d %d\n", i-z+1, z);
return i + 1
}
/*
* sets the exponent.
* a too large exponent is an error.
* a too small exponent rounds the number to zero.
*/
func mpsetexp(a *Mpflt, exp int) {
if int(int16(exp)) != exp {
if exp > 0 {
Yyerror("float constant is too large")
a.Exp = 0x7fff
} else {
Mpmovecflt(a, 0)
}
} else {
a.Exp = int16(exp)
}
}
/*
* shifts the leading non-zero
* word of the number to Mpnorm
*/
func mpnorm(a *Mpflt) {
var s int
var os int
var x int
os = sigfig(a)
if os == 0 {
// zero
a.Exp = 0
a.Val.Neg = 0
return
}
// this will normalize to the nearest word
x = a.Val.A[os-1]
s = (Mpnorm - os) * Mpscale
// further normalize to the nearest bit
for {
x <<= 1
if x&Mpbase != 0 {
break
}
s++
if x == 0 {
// this error comes from trying to
// convert an Inf or something
// where the initial x=0x80000000
s = (Mpnorm - os) * Mpscale
break
}
}
Mpshiftfix(&a.Val, s)
mpsetexp(a, int(a.Exp)-s)
}
/// implements float arihmetic
func mpaddfltflt(a *Mpflt, b *Mpflt) {
var sa int
var sb int
var s int
var c Mpflt
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("\n%v + %v", Fconv(a, 0), Fconv(b, 0))
}
sa = sigfig(a)
if sa == 0 {
mpmovefltflt(a, b)
goto out
}
sb = sigfig(b)
if sb == 0 {
goto out
}
s = int(a.Exp) - int(b.Exp)
if s > 0 {
// a is larger, shift b right
mpmovefltflt(&c, b)
Mpshiftfix(&c.Val, -s)
mpaddfixfix(&a.Val, &c.Val, 0)
goto out
}
if s < 0 {
// b is larger, shift a right
Mpshiftfix(&a.Val, s)
mpsetexp(a, int(a.Exp)-s)
mpaddfixfix(&a.Val, &b.Val, 0)
goto out
}
mpaddfixfix(&a.Val, &b.Val, 0)
out:
mpnorm(a)
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf(" = %v\n\n", Fconv(a, 0))
}
}
func mpmulfltflt(a *Mpflt, b *Mpflt) {
var sa int
var sb int
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("%v\n * %v\n", Fconv(a, 0), Fconv(b, 0))
}
sa = sigfig(a)
if sa == 0 {
// zero
a.Exp = 0
a.Val.Neg = 0
return
}
sb = sigfig(b)
if sb == 0 {
// zero
mpmovefltflt(a, b)
return
}
mpmulfract(&a.Val, &b.Val)
mpsetexp(a, (int(a.Exp)+int(b.Exp))+Mpscale*Mpprec-Mpscale-1)
mpnorm(a)
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf(" = %v\n\n", Fconv(a, 0))
}
}
func mpdivfltflt(a *Mpflt, b *Mpflt) {
var sa int
var sb int
var c Mpflt
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("%v\n / %v\n", Fconv(a, 0), Fconv(b, 0))
}
sb = sigfig(b)
if sb == 0 {
// zero and ovfl
a.Exp = 0
a.Val.Neg = 0
a.Val.Ovf = 1
Yyerror("constant division by zero")
return
}
sa = sigfig(a)
if sa == 0 {
// zero
a.Exp = 0
a.Val.Neg = 0
return
}
// adjust b to top
mpmovefltflt(&c, b)
Mpshiftfix(&c.Val, Mpscale)
// divide
mpdivfract(&a.Val, &c.Val)
mpsetexp(a, (int(a.Exp)-int(c.Exp))-Mpscale*(Mpprec-1)+1)
mpnorm(a)
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf(" = %v\n\n", Fconv(a, 0))
}
}
func mpgetfltN(a *Mpflt, prec int, bias int) float64 {
var s int
var i int
var e int
var minexp int
var v uint64
var f float64
if a.Val.Ovf != 0 && nsavederrors+nerrors == 0 {
Yyerror("mpgetflt ovf")
}
s = sigfig(a)
if s == 0 {
return 0
}
if s != Mpnorm {
Yyerror("mpgetflt norm")
mpnorm(a)
}
for a.Val.A[Mpnorm-1]&Mpsign == 0 {
Mpshiftfix(&a.Val, 1)
mpsetexp(a, int(a.Exp)-1) // can set 'a' to zero
s = sigfig(a)
if s == 0 {
return 0
}
}
// pick up the mantissa, a rounding bit, and a tie-breaking bit in a uvlong
s = prec + 2
v = 0
for i = Mpnorm - 1; s >= Mpscale; i-- {
v = v<<Mpscale | uint64(a.Val.A[i])
s -= Mpscale
}
if s > 0 {
v = v<<uint(s) | uint64(a.Val.A[i])>>uint(Mpscale-s)
if a.Val.A[i]&((1<<uint(Mpscale-s))-1) != 0 {
v |= 1
}
i--
}
for ; i >= 0; i-- {
if a.Val.A[i] != 0 {
v |= 1
}
}
// gradual underflow
e = Mpnorm*Mpscale + int(a.Exp) - prec
minexp = bias + 1 - prec + 1
if e < minexp {
s = minexp - e
if s > prec+1 {
s = prec + 1
}
if v&((1<<uint(s))-1) != 0 {
v |= 1 << uint(s)
}
v >>= uint(s)
e = minexp
}
// round to even
v |= (v & 4) >> 2
v += v & 1
v >>= 2
f = float64(v)
f = math.Ldexp(f, e)
if a.Val.Neg != 0 {
f = -f
}
return f
}
func mpgetflt(a *Mpflt) float64 {
return mpgetfltN(a, 53, -1023)
}
func mpgetflt32(a *Mpflt) float64 {
return mpgetfltN(a, 24, -127)
}
func Mpmovecflt(a *Mpflt, c float64) {
var i int
var f float64
var l int
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("\nconst %g", c)
}
Mpmovecfix(&a.Val, 0)
a.Exp = 0
if c == 0 {
goto out
}
if c < 0 {
a.Val.Neg = 1
c = -c
}
f, i = math.Frexp(c)
a.Exp = int16(i)
for i = 0; i < 10; i++ {
f = f * Mpbase
l = int(math.Floor(f))
f = f - float64(l)
a.Exp -= Mpscale
a.Val.A[0] = l
if f == 0 {
break
}
Mpshiftfix(&a.Val, Mpscale)
}
out:
mpnorm(a)
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf(" = %v\n", Fconv(a, 0))
}
}
func mpnegflt(a *Mpflt) {
a.Val.Neg ^= 1
}
func mptestflt(a *Mpflt) int {
var s int
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf("\n%v?", Fconv(a, 0))
}
s = sigfig(a)
if s != 0 {
s = +1
if a.Val.Neg != 0 {
s = -1
}
}
if Mpdebug != 0 /*TypeKind(100016)*/ {
fmt.Printf(" = %d\n", s)
}
return s
}

481
src/cmd/internal/gc/obj.go Normal file
View file

@ -0,0 +1,481 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/internal/obj"
"fmt"
)
/*
* architecture-independent object file output
*/
const (
ArhdrSize = 60
)
func formathdr(arhdr []byte, name string, size int64) {
copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
}
func dumpobj() {
var externs *NodeList
var tmp *NodeList
var arhdr [ArhdrSize]byte
var startobj int64
var size int64
var zero *Sym
var err error
bout, err = obj.Bopenw(outfile)
if err != nil {
Flusherrors()
fmt.Printf("can't create %s: %v\n", outfile, err)
errorexit()
}
startobj = 0
if writearchive != 0 {
obj.Bwritestring(bout, "!<arch>\n")
arhdr = [ArhdrSize]byte{}
obj.Bwrite(bout, arhdr[:])
startobj = obj.Boffset(bout)
}
fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
dumpexport()
if writearchive != 0 {
obj.Bflush(bout)
size = obj.Boffset(bout) - startobj
if size&1 != 0 {
obj.Bputc(bout, 0)
}
obj.Bseek(bout, startobj-ArhdrSize, 0)
formathdr(arhdr[:], "__.PKGDEF", size)
obj.Bwrite(bout, arhdr[:])
obj.Bflush(bout)
obj.Bseek(bout, startobj+size+(size&1), 0)
arhdr = [ArhdrSize]byte{}
obj.Bwrite(bout, arhdr[:])
startobj = obj.Boffset(bout)
fmt.Fprintf(bout, "go object %s %s %s %s\n", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
}
if pragcgobuf != "" {
if writearchive != 0 {
// write empty export section; must be before cgo section
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
}
fmt.Fprintf(bout, "\n$$ // cgo\n")
fmt.Fprintf(bout, "%s\n$$\n\n", pragcgobuf)
}
fmt.Fprintf(bout, "\n!\n")
externs = nil
if externdcl != nil {
externs = externdcl.End
}
dumpglobls()
dumptypestructs()
// Dump extra globals.
tmp = externdcl
if externs != nil {
externdcl = externs.Next
}
dumpglobls()
externdcl = tmp
zero = Pkglookup("zerovalue", Runtimepkg)
ggloblsym(zero, int32(zerosize), obj.DUPOK|obj.RODATA)
dumpdata()
obj.Writeobjdirect(Ctxt, bout)
if writearchive != 0 {
obj.Bflush(bout)
size = obj.Boffset(bout) - startobj
if size&1 != 0 {
obj.Bputc(bout, 0)
}
obj.Bseek(bout, startobj-ArhdrSize, 0)
namebuf = fmt.Sprintf("_go_.%c", Thearch.Thechar)
formathdr(arhdr[:], namebuf, size)
obj.Bwrite(bout, arhdr[:])
}
obj.Bterm(bout)
}
func dumpglobls() {
var n *Node
var l *NodeList
// add globals
for l = externdcl; l != nil; l = l.Next {
n = l.N
if n.Op != ONAME {
continue
}
if n.Type == nil {
Fatal("external %v nil type\n", Nconv(n, 0))
}
if n.Class == PFUNC {
continue
}
if n.Sym.Pkg != localpkg {
continue
}
dowidth(n.Type)
ggloblnod(n)
}
for l = funcsyms; l != nil; l = l.Next {
n = l.N
dsymptr(n.Sym, 0, n.Sym.Def.Shortname.Sym, 0)
ggloblsym(n.Sym, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
// Do not reprocess funcsyms on next dumpglobls call.
funcsyms = nil
}
func Bputname(b *obj.Biobuf, s *obj.LSym) {
obj.Bwritestring(b, s.Name)
obj.Bputc(b, 0)
}
func Linksym(s *Sym) *obj.LSym {
var p string
if s == nil {
return nil
}
if s.Lsym != nil {
return s.Lsym
}
if isblanksym(s) {
s.Lsym = obj.Linklookup(Ctxt, "_", 0)
} else if s.Linkname != "" {
s.Lsym = obj.Linklookup(Ctxt, s.Linkname, 0)
} else {
p = fmt.Sprintf("%s.%s", s.Pkg.Prefix, s.Name)
s.Lsym = obj.Linklookup(Ctxt, p, 0)
}
return s.Lsym
}
func duintxx(s *Sym, off int, v uint64, wid int) int {
// Update symbol data directly instead of generating a
// DATA instruction that liblink will have to interpret later.
// This reduces compilation time and memory usage.
off = int(Rnd(int64(off), int64(wid)))
return int(obj.Setuintxx(Ctxt, Linksym(s), int64(off), v, int64(wid)))
}
func duint8(s *Sym, off int, v uint8) int {
return duintxx(s, off, uint64(v), 1)
}
func duint16(s *Sym, off int, v uint16) int {
return duintxx(s, off, uint64(v), 2)
}
func duint32(s *Sym, off int, v uint32) int {
return duintxx(s, off, uint64(v), 4)
}
func duint64(s *Sym, off int, v uint64) int {
return duintxx(s, off, v, 8)
}
func duintptr(s *Sym, off int, v uint64) int {
return duintxx(s, off, v, Widthptr)
}
var stringsym_gen int
func stringsym(s string) *Sym {
var sym *Sym
var off int
var n int
var m int
var tmp struct {
lit Strlit
buf string
}
var pkg *Pkg
if len(s) > 100 {
// huge strings are made static to avoid long names
stringsym_gen++
namebuf = fmt.Sprintf(".gostring.%d", stringsym_gen)
pkg = localpkg
} else {
// small strings get named by their contents,
// so that multiple modules using the same string
// can share it.
tmp.lit.S = s
namebuf = fmt.Sprintf("\"%v\"", Zconv(&tmp.lit, 0))
pkg = gostringpkg
}
sym = Pkglookup(namebuf, pkg)
// SymUniq flag indicates that data is generated already
if sym.Flags&SymUniq != 0 {
return sym
}
sym.Flags |= SymUniq
sym.Def = newname(sym)
off = 0
// string header
off = dsymptr(sym, off, sym, Widthptr+Widthint)
off = duintxx(sym, off, uint64(len(s)), Widthint)
// string data
for n = 0; n < len(s); n += m {
m = 8
if m > len(s)-n {
m = len(s) - n
}
off = dsname(sym, off, s[n:n+m])
}
off = duint8(sym, off, 0) // terminating NUL for runtime
off = (off + Widthptr - 1) &^ (Widthptr - 1) // round to pointer alignment
ggloblsym(sym, int32(off), obj.DUPOK|obj.RODATA)
return sym
}
var slicebytes_gen int
func slicebytes(nam *Node, s string, len int) {
var off int
var n int
var m int
var sym *Sym
slicebytes_gen++
namebuf = fmt.Sprintf(".gobytes.%d", slicebytes_gen)
sym = Pkglookup(namebuf, localpkg)
sym.Def = newname(sym)
off = 0
for n = 0; n < len; n += m {
m = 8
if m > len-n {
m = len - n
}
off = dsname(sym, off, s[n:n+m])
}
ggloblsym(sym, int32(off), obj.NOPTR)
if nam.Op != ONAME {
Fatal("slicebytes %v", Nconv(nam, 0))
}
off = int(nam.Xoffset)
off = dsymptr(nam.Sym, off, sym, 0)
off = duintxx(nam.Sym, off, uint64(len), Widthint)
duintxx(nam.Sym, off, uint64(len), Widthint)
}
func dstringptr(s *Sym, off int, str string) int {
var p *obj.Prog
off = int(Rnd(int64(off), int64(Widthptr)))
p = Thearch.Gins(obj.ADATA, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
p.From.Offset = int64(off)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(Widthptr)
Datastring(str+"\x00", &p.To) // TODO(rsc): Remove NUL
p.To.Type = obj.TYPE_ADDR
p.To.Etype = Simtype[TINT]
off += Widthptr
return off
}
/*
* gobj.c
*/
func Datastring(s string, a *obj.Addr) {
var sym *Sym
sym = stringsym(s)
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_EXTERN
a.Sym = Linksym(sym)
a.Node = sym.Def
a.Offset = int64(Widthptr) + int64(Widthint) // skip header
a.Etype = Simtype[TINT]
}
func datagostring(sval *Strlit, a *obj.Addr) {
var sym *Sym
sym = stringsym(sval.S)
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_EXTERN
a.Sym = Linksym(sym)
a.Node = sym.Def
a.Offset = 0 // header
a.Etype = TSTRING
}
func dgostringptr(s *Sym, off int, str string) int {
var n int
var lit *Strlit
if str == "" {
return duintptr(s, off, 0)
}
n = len(str)
lit = new(Strlit)
lit.S = str
lit.S = lit.S[:n]
return dgostrlitptr(s, off, lit)
}
func dgostrlitptr(s *Sym, off int, lit *Strlit) int {
var p *obj.Prog
if lit == nil {
return duintptr(s, off, 0)
}
off = int(Rnd(int64(off), int64(Widthptr)))
p = Thearch.Gins(obj.ADATA, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
p.From.Offset = int64(off)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(Widthptr)
datagostring(lit, &p.To)
p.To.Type = obj.TYPE_ADDR
p.To.Etype = Simtype[TINT]
off += Widthptr
return off
}
func dsname(s *Sym, off int, t string) int {
var p *obj.Prog
p = Thearch.Gins(obj.ADATA, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Offset = int64(off)
p.From.Sym = Linksym(s)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(len(t))
p.To.Type = obj.TYPE_SCONST
p.To.U.Sval = t
return off + len(t)
}
func dsymptr(s *Sym, off int, x *Sym, xoff int) int {
var p *obj.Prog
off = int(Rnd(int64(off), int64(Widthptr)))
p = Thearch.Gins(obj.ADATA, nil, nil)
p.From.Type = obj.TYPE_MEM
p.From.Name = obj.NAME_EXTERN
p.From.Sym = Linksym(s)
p.From.Offset = int64(off)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(Widthptr)
p.To.Type = obj.TYPE_ADDR
p.To.Name = obj.NAME_EXTERN
p.To.Sym = Linksym(x)
p.To.Offset = int64(xoff)
off += Widthptr
return off
}
func gdata(nam *Node, nr *Node, wid int) {
var p *obj.Prog
if nr.Op == OLITERAL {
switch nr.Val.Ctype {
case CTCPLX:
gdatacomplex(nam, nr.Val.U.Cval)
return
case CTSTR:
gdatastring(nam, nr.Val.U.Sval)
return
}
}
p = Thearch.Gins(obj.ADATA, nam, nr)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(wid)
}
func gdatacomplex(nam *Node, cval *Mpcplx) {
var p *obj.Prog
var w int
w = cplxsubtype(int(nam.Type.Etype))
w = int(Types[w].Width)
p = Thearch.Gins(obj.ADATA, nam, nil)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(w)
p.To.Type = obj.TYPE_FCONST
p.To.U.Dval = mpgetflt(&cval.Real)
p = Thearch.Gins(obj.ADATA, nam, nil)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(w)
p.From.Offset += int64(w)
p.To.Type = obj.TYPE_FCONST
p.To.U.Dval = mpgetflt(&cval.Imag)
}
func gdatastring(nam *Node, sval *Strlit) {
var p *obj.Prog
var nod1 Node
p = Thearch.Gins(obj.ADATA, nam, nil)
Datastring(sval.S, &p.To)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = Types[Tptr].Width
p.To.Type = obj.TYPE_ADDR
//print("%P\n", p);
Nodconst(&nod1, Types[TINT], int64(len(sval.S)))
p = Thearch.Gins(obj.ADATA, nam, &nod1)
p.From3.Type = obj.TYPE_CONST
p.From3.Offset = int64(Widthint)
p.From.Offset += int64(Widthptr)
}

View file

@ -0,0 +1,162 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
// auto generated by go tool dist
var opnames = []string{
OXXX: "XXX",
ONAME: "NAME",
ONONAME: "NONAME",
OTYPE: "TYPE",
OPACK: "PACK",
OLITERAL: "LITERAL",
OADD: "ADD",
OSUB: "SUB",
OOR: "OR",
OXOR: "XOR",
OADDSTR: "ADDSTR",
OADDR: "ADDR",
OANDAND: "ANDAND",
OAPPEND: "APPEND",
OARRAYBYTESTR: "ARRAYBYTESTR",
OARRAYBYTESTRTMP: "ARRAYBYTESTRTMP",
OARRAYRUNESTR: "ARRAYRUNESTR",
OSTRARRAYBYTE: "STRARRAYBYTE",
OSTRARRAYBYTETMP: "STRARRAYBYTETMP",
OSTRARRAYRUNE: "STRARRAYRUNE",
OAS: "AS",
OAS2: "AS2",
OAS2FUNC: "AS2FUNC",
OAS2RECV: "AS2RECV",
OAS2MAPR: "AS2MAPR",
OAS2DOTTYPE: "AS2DOTTYPE",
OASOP: "ASOP",
OCALL: "CALL",
OCALLFUNC: "CALLFUNC",
OCALLMETH: "CALLMETH",
OCALLINTER: "CALLINTER",
OCALLPART: "CALLPART",
OCAP: "CAP",
OCLOSE: "CLOSE",
OCLOSURE: "CLOSURE",
OCMPIFACE: "CMPIFACE",
OCMPSTR: "CMPSTR",
OCOMPLIT: "COMPLIT",
OMAPLIT: "MAPLIT",
OSTRUCTLIT: "STRUCTLIT",
OARRAYLIT: "ARRAYLIT",
OPTRLIT: "PTRLIT",
OCONV: "CONV",
OCONVIFACE: "CONVIFACE",
OCONVNOP: "CONVNOP",
OCOPY: "COPY",
ODCL: "DCL",
ODCLFUNC: "DCLFUNC",
ODCLFIELD: "DCLFIELD",
ODCLCONST: "DCLCONST",
ODCLTYPE: "DCLTYPE",
ODELETE: "DELETE",
ODOT: "DOT",
ODOTPTR: "DOTPTR",
ODOTMETH: "DOTMETH",
ODOTINTER: "DOTINTER",
OXDOT: "XDOT",
ODOTTYPE: "DOTTYPE",
ODOTTYPE2: "DOTTYPE2",
OEQ: "EQ",
ONE: "NE",
OLT: "LT",
OLE: "LE",
OGE: "GE",
OGT: "GT",
OIND: "IND",
OINDEX: "INDEX",
OINDEXMAP: "INDEXMAP",
OKEY: "KEY",
OPARAM: "PARAM",
OLEN: "LEN",
OMAKE: "MAKE",
OMAKECHAN: "MAKECHAN",
OMAKEMAP: "MAKEMAP",
OMAKESLICE: "MAKESLICE",
OMUL: "MUL",
ODIV: "DIV",
OMOD: "MOD",
OLSH: "LSH",
ORSH: "RSH",
OAND: "AND",
OANDNOT: "ANDNOT",
ONEW: "NEW",
ONOT: "NOT",
OCOM: "COM",
OPLUS: "PLUS",
OMINUS: "MINUS",
OOROR: "OROR",
OPANIC: "PANIC",
OPRINT: "PRINT",
OPRINTN: "PRINTN",
OPAREN: "PAREN",
OSEND: "SEND",
OSLICE: "SLICE",
OSLICEARR: "SLICEARR",
OSLICESTR: "SLICESTR",
OSLICE3: "SLICE3",
OSLICE3ARR: "SLICE3ARR",
ORECOVER: "RECOVER",
ORECV: "RECV",
ORUNESTR: "RUNESTR",
OSELRECV: "SELRECV",
OSELRECV2: "SELRECV2",
OIOTA: "IOTA",
OREAL: "REAL",
OIMAG: "IMAG",
OCOMPLEX: "COMPLEX",
OBLOCK: "BLOCK",
OBREAK: "BREAK",
OCASE: "CASE",
OXCASE: "XCASE",
OCONTINUE: "CONTINUE",
ODEFER: "DEFER",
OEMPTY: "EMPTY",
OFALL: "FALL",
OXFALL: "XFALL",
OFOR: "FOR",
OGOTO: "GOTO",
OIF: "IF",
OLABEL: "LABEL",
OPROC: "PROC",
ORANGE: "RANGE",
ORETURN: "RETURN",
OSELECT: "SELECT",
OSWITCH: "SWITCH",
OTYPESW: "TYPESW",
OTCHAN: "TCHAN",
OTMAP: "TMAP",
OTSTRUCT: "TSTRUCT",
OTINTER: "TINTER",
OTFUNC: "TFUNC",
OTARRAY: "TARRAY",
ODDD: "DDD",
ODDDARG: "DDDARG",
OINLCALL: "INLCALL",
OEFACE: "EFACE",
OITAB: "ITAB",
OSPTR: "SPTR",
OCLOSUREVAR: "CLOSUREVAR",
OCFUNC: "CFUNC",
OCHECKNIL: "CHECKNIL",
OVARKILL: "VARKILL",
OREGISTER: "REGISTER",
OINDREG: "INDREG",
OCMP: "CMP",
ODEC: "DEC",
OINC: "INC",
OEXTEND: "EXTEND",
OHMUL: "HMUL",
OLROT: "LROT",
ORROTC: "RROTC",
ORETJMP: "RETJMP",
OEND: "END",
}

1188
src/cmd/internal/gc/order.go Normal file

File diff suppressed because it is too large Load diff

597
src/cmd/internal/gc/pgen.go Normal file
View file

@ -0,0 +1,597 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/internal/obj"
"fmt"
"strings"
)
// "Portable" code generation.
// Compiled separately for 5g, 6g, and 8g, so allowed to use gg.h, opt.h.
// Must code to the intersection of the three back ends.
//#include "opt.h"
var makefuncdatasym_nsym int32
func makefuncdatasym(namefmt string, funcdatakind int64) *Sym {
var nod Node
var pnod *Node
var sym *Sym
namebuf = fmt.Sprintf(namefmt, makefuncdatasym_nsym)
makefuncdatasym_nsym++
sym = Lookup(namebuf)
pnod = newname(sym)
pnod.Class = PEXTERN
Nodconst(&nod, Types[TINT32], funcdatakind)
Thearch.Gins(obj.AFUNCDATA, &nod, pnod)
return sym
}
// gvardef inserts a VARDEF for n into the instruction stream.
// VARDEF is an annotation for the liveness analysis, marking a place
// where a complete initialization (definition) of a variable begins.
// Since the liveness analysis can see initialization of single-word
// variables quite easy, gvardef is usually only called for multi-word
// or 'fat' variables, those satisfying isfat(n->type).
// However, gvardef is also called when a non-fat variable is initialized
// via a block move; the only time this happens is when you have
// return f()
// for a function with multiple return values exactly matching the return
// types of the current function.
//
// A 'VARDEF x' annotation in the instruction stream tells the liveness
// analysis to behave as though the variable x is being initialized at that
// point in the instruction stream. The VARDEF must appear before the
// actual (multi-instruction) initialization, and it must also appear after
// any uses of the previous value, if any. For example, if compiling:
//
// x = x[1:]
//
// it is important to generate code like:
//
// base, len, cap = pieces of x[1:]
// VARDEF x
// x = {base, len, cap}
//
// If instead the generated code looked like:
//
// VARDEF x
// base, len, cap = pieces of x[1:]
// x = {base, len, cap}
//
// then the liveness analysis would decide the previous value of x was
// unnecessary even though it is about to be used by the x[1:] computation.
// Similarly, if the generated code looked like:
//
// base, len, cap = pieces of x[1:]
// x = {base, len, cap}
// VARDEF x
//
// then the liveness analysis will not preserve the new value of x, because
// the VARDEF appears to have "overwritten" it.
//
// VARDEF is a bit of a kludge to work around the fact that the instruction
// stream is working on single-word values but the liveness analysis
// wants to work on individual variables, which might be multi-word
// aggregates. It might make sense at some point to look into letting
// the liveness analysis work on single-word values as well, although
// there are complications around interface values, slices, and strings,
// all of which cannot be treated as individual words.
//
// VARKILL is the opposite of VARDEF: it marks a value as no longer needed,
// even if its address has been taken. That is, a VARKILL annotation asserts
// that its argument is certainly dead, for use when the liveness analysis
// would not otherwise be able to deduce that fact.
func gvardefx(n *Node, as int) {
if n == nil {
Fatal("gvardef nil")
}
if n.Op != ONAME {
Yyerror("gvardef %v; %v", Oconv(int(n.Op), obj.FmtSharp), Nconv(n, 0))
return
}
switch n.Class {
case PAUTO,
PPARAM,
PPARAMOUT:
Thearch.Gins(as, nil, n)
}
}
func Gvardef(n *Node) {
gvardefx(n, obj.AVARDEF)
}
func gvarkill(n *Node) {
gvardefx(n, obj.AVARKILL)
}
func removevardef(firstp *obj.Prog) {
var p *obj.Prog
for p = firstp; p != nil; p = p.Link {
for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL) {
p.Link = p.Link.Link
}
if p.To.Type == obj.TYPE_BRANCH {
for p.To.U.Branch != nil && (p.To.U.Branch.As == obj.AVARDEF || p.To.U.Branch.As == obj.AVARKILL) {
p.To.U.Branch = p.To.U.Branch.Link
}
}
}
}
func gcsymdup(s *Sym) {
var ls *obj.LSym
var lo uint64
var hi uint64
ls = Linksym(s)
if len(ls.R) > 0 {
Fatal("cannot rosymdup %s with relocations", ls.Name)
}
var d MD5
md5reset(&d)
md5write(&d, ls.P, len(ls.P))
lo = md5sum(&d, &hi)
ls.Name = fmt.Sprintf("gclocals·%016x%016x", lo, hi)
ls.Dupok = 1
}
func emitptrargsmap() {
var nptr int
var nbitmap int
var j int
var off int
var xoffset int64
var bv *Bvec
var sym *Sym
sym = Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Nname.Sym.Name))
nptr = int(Curfn.Type.Argwid / int64(Widthptr))
bv = bvalloc(int32(nptr) * 2)
nbitmap = 1
if Curfn.Type.Outtuple > 0 {
nbitmap = 2
}
off = duint32(sym, 0, uint32(nbitmap))
off = duint32(sym, off, uint32(bv.n))
if Curfn.Type.Thistuple > 0 {
xoffset = 0
twobitwalktype1(getthisx(Curfn.Type), &xoffset, bv)
}
if Curfn.Type.Intuple > 0 {
xoffset = 0
twobitwalktype1(getinargx(Curfn.Type), &xoffset, bv)
}
for j = 0; int32(j) < bv.n; j += 32 {
off = duint32(sym, off, bv.b[j/32])
}
if Curfn.Type.Outtuple > 0 {
xoffset = 0
twobitwalktype1(getoutargx(Curfn.Type), &xoffset, bv)
for j = 0; int32(j) < bv.n; j += 32 {
off = duint32(sym, off, bv.b[j/32])
}
}
ggloblsym(sym, int32(off), obj.RODATA)
}
// Sort the list of stack variables. Autos after anything else,
// within autos, unused after used, within used, things with
// pointers first, zeroed things first, and then decreasing size.
// Because autos are laid out in decreasing addresses
// on the stack, pointers first, zeroed things first and decreasing size
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
func cmpstackvar(a *Node, b *Node) int {
var ap int
var bp int
if a.Class != b.Class {
if a.Class == PAUTO {
return +1
}
return -1
}
if a.Class != PAUTO {
if a.Xoffset < b.Xoffset {
return -1
}
if a.Xoffset > b.Xoffset {
return +1
}
return 0
}
if (a.Used == 0) != (b.Used == 0) {
return int(b.Used) - int(a.Used)
}
ap = bool2int(haspointers(a.Type))
bp = bool2int(haspointers(b.Type))
if ap != bp {
return bp - ap
}
ap = int(a.Needzero)
bp = int(b.Needzero)
if ap != bp {
return bp - ap
}
if a.Type.Width < b.Type.Width {
return +1
}
if a.Type.Width > b.Type.Width {
return -1
}
return stringsCompare(a.Sym.Name, b.Sym.Name)
}
// TODO(lvd) find out where the PAUTO/OLITERAL nodes come from.
func allocauto(ptxt *obj.Prog) {
var ll *NodeList
var n *Node
var w int64
Stksize = 0
stkptrsize = 0
if Curfn.Dcl == nil {
return
}
// Mark the PAUTO's unused.
for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
if ll.N.Class == PAUTO {
ll.N.Used = 0
}
}
markautoused(ptxt)
listsort(&Curfn.Dcl, cmpstackvar)
// Unused autos are at the end, chop 'em off.
ll = Curfn.Dcl
n = ll.N
if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
// No locals used at all
Curfn.Dcl = nil
fixautoused(ptxt)
return
}
for ll = Curfn.Dcl; ll.Next != nil; ll = ll.Next {
n = ll.Next.N
if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
ll.Next = nil
Curfn.Dcl.End = ll
break
}
}
// Reassign stack offsets of the locals that are still there.
for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
n = ll.N
if n.Class != PAUTO || n.Op != ONAME {
continue
}
dowidth(n.Type)
w = n.Type.Width
if w >= Thearch.MAXWIDTH || w < 0 {
Fatal("bad width")
}
Stksize += w
Stksize = Rnd(Stksize, int64(n.Type.Align))
if haspointers(n.Type) {
stkptrsize = Stksize
}
if Thearch.Thechar == '5' || Thearch.Thechar == '9' {
Stksize = Rnd(Stksize, int64(Widthptr))
}
if Stksize >= 1<<31 {
setlineno(Curfn)
Yyerror("stack frame too large (>2GB)")
}
n.Stkdelta = -Stksize - n.Xoffset
}
Stksize = Rnd(Stksize, int64(Widthreg))
stkptrsize = Rnd(stkptrsize, int64(Widthreg))
fixautoused(ptxt)
// The debug information needs accurate offsets on the symbols.
for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
if ll.N.Class != PAUTO || ll.N.Op != ONAME {
continue
}
ll.N.Xoffset += ll.N.Stkdelta
ll.N.Stkdelta = 0
}
}
func movelarge(l *NodeList) {
for ; l != nil; l = l.Next {
if l.N.Op == ODCLFUNC {
movelargefn(l.N)
}
}
}
func movelargefn(fn *Node) {
var l *NodeList
var n *Node
for l = fn.Dcl; l != nil; l = l.Next {
n = l.N
if n.Class == PAUTO && n.Type != nil && n.Type.Width > MaxStackVarSize {
addrescapes(n)
}
}
}
func Cgen_checknil(n *Node) {
var reg Node
if Disable_checknil != 0 {
return
}
// Ideally we wouldn't see any integer types here, but we do.
if n.Type == nil || (!(Isptr[n.Type.Etype] != 0) && !(Isint[n.Type.Etype] != 0) && n.Type.Etype != TUNSAFEPTR) {
Dump("checknil", n)
Fatal("bad checknil")
}
if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !(n.Addable != 0) || n.Op == OLITERAL {
Thearch.Regalloc(&reg, Types[Tptr], n)
Thearch.Cgen(n, &reg)
Thearch.Gins(obj.ACHECKNIL, &reg, nil)
Thearch.Regfree(&reg)
return
}
Thearch.Gins(obj.ACHECKNIL, n, nil)
}
/*
* ggen.c
*/
func compile(fn *Node) {
var pl *obj.Plist
var nod1 Node
var n *Node
var ptxt *obj.Prog
var p *obj.Prog
var lno int32
var t *Type
var save Iter
var oldstksize int64
var l *NodeList
var nam *Node
var gcargs *Sym
var gclocals *Sym
if Newproc == nil {
Newproc = Sysfunc("newproc")
Deferproc = Sysfunc("deferproc")
Deferreturn = Sysfunc("deferreturn")
Panicindex = Sysfunc("panicindex")
panicslice = Sysfunc("panicslice")
throwreturn = Sysfunc("throwreturn")
}
lno = setlineno(fn)
Curfn = fn
dowidth(Curfn.Type)
if fn.Nbody == nil {
if pure_go != 0 || strings.HasPrefix(fn.Nname.Sym.Name, "init·") {
Yyerror("missing function body", fn)
goto ret
}
if Debug['A'] != 0 {
goto ret
}
emitptrargsmap()
goto ret
}
saveerrors()
// set up domain for labels
clearlabels()
if Curfn.Type.Outnamed != 0 {
// add clearing of the output parameters
t = Structfirst(&save, Getoutarg(Curfn.Type))
for t != nil {
if t.Nname != nil {
n = Nod(OAS, t.Nname, nil)
typecheck(&n, Etop)
Curfn.Nbody = concat(list1(n), Curfn.Nbody)
}
t = structnext(&save)
}
}
order(Curfn)
if nerrors != 0 {
goto ret
}
Hasdefer = 0
walk(Curfn)
if nerrors != 0 {
goto ret
}
if flag_race != 0 {
racewalk(Curfn)
}
if nerrors != 0 {
goto ret
}
continpc = nil
breakpc = nil
pl = newplist()
pl.Name = Linksym(Curfn.Nname.Sym)
setlineno(Curfn)
Nodconst(&nod1, Types[TINT32], 0)
nam = Curfn.Nname
if isblank(nam) {
nam = nil
}
ptxt = Thearch.Gins(obj.ATEXT, nam, &nod1)
if fn.Dupok != 0 {
ptxt.From3.Offset |= obj.DUPOK
}
if fn.Wrapper != 0 {
ptxt.From3.Offset |= obj.WRAPPER
}
if fn.Needctxt != 0 {
ptxt.From3.Offset |= obj.NEEDCTXT
}
if fn.Nosplit {
ptxt.From3.Offset |= obj.NOSPLIT
}
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
if myimportpath != "" && myimportpath == "reflect" {
if Curfn.Nname.Sym.Name == "callReflect" || Curfn.Nname.Sym.Name == "callMethod" {
ptxt.From3.Offset |= obj.WRAPPER
}
}
Afunclit(&ptxt.From, Curfn.Nname)
Thearch.Ginit()
gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
for t = Curfn.Paramfld; t != nil; t = t.Down {
gtrack(tracksym(t.Type))
}
for l = fn.Dcl; l != nil; l = l.Next {
n = l.N
if n.Op != ONAME { // might be OTYPE or OLITERAL
continue
}
switch n.Class {
case PAUTO,
PPARAM,
PPARAMOUT:
Nodconst(&nod1, Types[TUINTPTR], l.N.Type.Width)
p = Thearch.Gins(obj.ATYPE, l.N, &nod1)
p.From.Gotype = Linksym(ngotype(l.N))
}
}
Genlist(Curfn.Enter)
Genlist(Curfn.Nbody)
Thearch.Gclean()
checklabels()
if nerrors != 0 {
goto ret
}
if Curfn.Endlineno != 0 {
lineno = Curfn.Endlineno
}
if Curfn.Type.Outtuple != 0 {
Thearch.Ginscall(throwreturn, 0)
}
Thearch.Ginit()
// TODO: Determine when the final cgen_ret can be omitted. Perhaps always?
Thearch.Cgen_ret(nil)
if Hasdefer != 0 {
// deferreturn pretends to have one uintptr argument.
// Reserve space for it so stack scanner is happy.
if Maxarg < int64(Widthptr) {
Maxarg = int64(Widthptr)
}
}
Thearch.Gclean()
if nerrors != 0 {
goto ret
}
Pc.As = obj.ARET // overwrite AEND
Pc.Lineno = lineno
fixjmp(ptxt)
if !(Debug['N'] != 0) || Debug['R'] != 0 || Debug['P'] != 0 {
regopt(ptxt)
nilopt(ptxt)
}
Thearch.Expandchecks(ptxt)
oldstksize = Stksize
allocauto(ptxt)
if false {
fmt.Printf("allocauto: %d to %d\n", oldstksize, int64(Stksize))
}
setlineno(Curfn)
if int64(Stksize)+Maxarg > 1<<31 {
Yyerror("stack frame too large (>2GB)")
goto ret
}
// Emit garbage collection symbols.
liveness(Curfn, ptxt, gcargs, gclocals)
gcsymdup(gcargs)
gcsymdup(gclocals)
Thearch.Defframe(ptxt)
if Debug['f'] != 0 {
frame(0)
}
// Remove leftover instrumentation from the instruction stream.
removevardef(ptxt)
ret:
lineno = lno
}

2018
src/cmd/internal/gc/plive.go Normal file

File diff suppressed because it is too large Load diff

1283
src/cmd/internal/gc/popt.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,681 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"fmt"
"strings"
)
// The racewalk pass modifies the code tree for the function as follows:
//
// 1. It inserts a call to racefuncenter at the beginning of each function.
// 2. It inserts a call to racefuncexit at the end of each function.
// 3. It inserts a call to raceread before each memory read.
// 4. It inserts a call to racewrite before each memory write.
//
// The rewriting is not yet complete. Certain nodes are not rewritten
// but should be.
// TODO(dvyukov): do not instrument initialization as writes:
// a := make([]int, 10)
// Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion.
var omit_pkgs = []string{"runtime", "runtime/race"}
// Only insert racefuncenter/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives.
var noinst_pkgs = []string{"sync", "sync/atomic"}
func ispkgin(pkgs []string) int {
var i int
if myimportpath != "" {
for i = 0; i < len(pkgs); i++ {
if myimportpath == pkgs[i] {
return 1
}
}
}
return 0
}
func isforkfunc(fn *Node) int {
// Special case for syscall.forkAndExecInChild.
// In the child, this function must not acquire any locks, because
// they might have been locked at the time of the fork. This means
// no rescheduling, no malloc calls, and no new stack segments.
// Race instrumentation does all of the above.
return bool2int(myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild")
}
func racewalk(fn *Node) {
var nd *Node
var nodpc *Node
var s string
if ispkgin(omit_pkgs) != 0 || isforkfunc(fn) != 0 {
return
}
if !(ispkgin(noinst_pkgs) != 0) {
racewalklist(fn.Nbody, nil)
// nothing interesting for race detector in fn->enter
racewalklist(fn.Exit, nil)
}
// nodpc is the PC of the caller as extracted by
// getcallerpc. We use -widthptr(FP) for x86.
// BUG: this will not work on arm.
nodpc = Nod(OXXX, nil, nil)
*nodpc = *nodfp
nodpc.Type = Types[TUINTPTR]
nodpc.Xoffset = int64(-Widthptr)
nd = mkcall("racefuncenter", nil, nil, nodpc)
fn.Enter = concat(list1(nd), fn.Enter)
nd = mkcall("racefuncexit", nil, nil)
fn.Exit = list(fn.Exit, nd)
if Debug['W'] != 0 {
s = fmt.Sprintf("after racewalk %v", Sconv(fn.Nname.Sym, 0))
dumplist(s, fn.Nbody)
s = fmt.Sprintf("enter %v", Sconv(fn.Nname.Sym, 0))
dumplist(s, fn.Enter)
s = fmt.Sprintf("exit %v", Sconv(fn.Nname.Sym, 0))
dumplist(s, fn.Exit)
}
}
func racewalklist(l *NodeList, init **NodeList) {
var instr *NodeList
for ; l != nil; l = l.Next {
instr = nil
racewalknode(&l.N, &instr, 0, 0)
if init == nil {
l.N.Ninit = concat(l.N.Ninit, instr)
} else {
*init = concat(*init, instr)
}
}
}
// walkexpr and walkstmt combined
// walks the tree and adds calls to the
// instrumentation code to top-level (statement) nodes' init
func racewalknode(np **Node, init **NodeList, wr int, skip int) {
var n *Node
var n1 *Node
var l *NodeList
var fini *NodeList
n = *np
if n == nil {
return
}
if Debug['w'] > 1 {
Dump("racewalk-before", n)
}
setlineno(n)
if init == nil {
Fatal("racewalk: bad init list")
}
if init == &n.Ninit {
// If init == &n->ninit and n->ninit is non-nil,
// racewalknode might append it to itself.
// nil it out and handle it separately before putting it back.
l = n.Ninit
n.Ninit = nil
racewalklist(l, nil)
racewalknode(&n, &l, wr, skip) // recurse with nil n->ninit
appendinit(&n, l)
*np = n
return
}
racewalklist(n.Ninit, nil)
switch n.Op {
default:
Fatal("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
fallthrough
case OAS,
OAS2FUNC:
racewalknode(&n.Left, init, 1, 0)
racewalknode(&n.Right, init, 0, 0)
goto ret
// can't matter
case OCFUNC,
OVARKILL:
goto ret
case OBLOCK:
if n.List == nil {
goto ret
}
switch n.List.N.Op {
// Blocks are used for multiple return function calls.
// x, y := f() becomes BLOCK{CALL f, AS x [SP+0], AS y [SP+n]}
// We don't want to instrument between the statements because it will
// smash the results.
case OCALLFUNC,
OCALLMETH,
OCALLINTER:
racewalknode(&n.List.N, &n.List.N.Ninit, 0, 0)
fini = nil
racewalklist(n.List.Next, &fini)
n.List = concat(n.List, fini)
// Ordinary block, for loop initialization or inlined bodies.
default:
racewalklist(n.List, nil)
}
goto ret
case ODEFER:
racewalknode(&n.Left, init, 0, 0)
goto ret
case OPROC:
racewalknode(&n.Left, init, 0, 0)
goto ret
case OCALLINTER:
racewalknode(&n.Left, init, 0, 0)
goto ret
// Instrument dst argument of runtime.writebarrier* calls
// as we do not instrument runtime code.
// typedslicecopy is instrumented in runtime.
case OCALLFUNC:
if n.Left.Sym != nil && n.Left.Sym.Pkg == Runtimepkg && (strings.HasPrefix(n.Left.Sym.Name, "writebarrier") || n.Left.Sym.Name == "typedmemmove") {
// Find the dst argument.
// The list can be reordered, so it's not necessary just the first or the second element.
for l = n.List; l != nil; l = l.Next {
if n.Left.Sym.Name == "typedmemmove" {
if l.N.Left.Xoffset == int64(Widthptr) {
break
}
} else {
if l.N.Left.Xoffset == 0 {
break
}
}
}
if l == nil {
Fatal("racewalk: writebarrier no arg")
}
if l.N.Right.Op != OADDR {
Fatal("racewalk: writebarrier bad arg")
}
callinstr(&l.N.Right.Left, init, 1, 0)
}
racewalknode(&n.Left, init, 0, 0)
goto ret
case ONOT,
OMINUS,
OPLUS,
OREAL,
OIMAG,
OCOM:
racewalknode(&n.Left, init, wr, 0)
goto ret
case ODOTINTER:
racewalknode(&n.Left, init, 0, 0)
goto ret
case ODOT:
racewalknode(&n.Left, init, 0, 1)
callinstr(&n, init, wr, skip)
goto ret
case ODOTPTR: // dst = (*x).f with implicit *; otherwise it's ODOT+OIND
racewalknode(&n.Left, init, 0, 0)
callinstr(&n, init, wr, skip)
goto ret
case OIND: // *p
racewalknode(&n.Left, init, 0, 0)
callinstr(&n, init, wr, skip)
goto ret
case OSPTR,
OLEN,
OCAP:
racewalknode(&n.Left, init, 0, 0)
if Istype(n.Left.Type, TMAP) != 0 {
n1 = Nod(OCONVNOP, n.Left, nil)
n1.Type = Ptrto(Types[TUINT8])
n1 = Nod(OIND, n1, nil)
typecheck(&n1, Erv)
callinstr(&n1, init, 0, skip)
}
goto ret
case OLSH,
ORSH,
OLROT,
OAND,
OANDNOT,
OOR,
OXOR,
OSUB,
OMUL,
OHMUL,
OEQ,
ONE,
OLT,
OLE,
OGE,
OGT,
OADD,
OCOMPLEX:
racewalknode(&n.Left, init, wr, 0)
racewalknode(&n.Right, init, wr, 0)
goto ret
case OANDAND,
OOROR:
racewalknode(&n.Left, init, wr, 0)
// walk has ensured the node has moved to a location where
// side effects are safe.
// n->right may not be executed,
// so instrumentation goes to n->right->ninit, not init.
racewalknode(&n.Right, &n.Right.Ninit, wr, 0)
goto ret
case ONAME:
callinstr(&n, init, wr, skip)
goto ret
case OCONV:
racewalknode(&n.Left, init, wr, 0)
goto ret
case OCONVNOP:
racewalknode(&n.Left, init, wr, 0)
goto ret
case ODIV,
OMOD:
racewalknode(&n.Left, init, wr, 0)
racewalknode(&n.Right, init, wr, 0)
goto ret
case OINDEX:
if !(Isfixedarray(n.Left.Type) != 0) {
racewalknode(&n.Left, init, 0, 0)
} else if !(islvalue(n.Left) != 0) {
// index of unaddressable array, like Map[k][i].
racewalknode(&n.Left, init, wr, 0)
racewalknode(&n.Right, init, 0, 0)
goto ret
}
racewalknode(&n.Right, init, 0, 0)
if n.Left.Type.Etype != TSTRING {
callinstr(&n, init, wr, skip)
}
goto ret
// Seems to only lead to double instrumentation.
//racewalknode(&n->left, init, 0, 0);
case OSLICE,
OSLICEARR,
OSLICE3,
OSLICE3ARR:
goto ret
case OADDR:
racewalknode(&n.Left, init, 0, 1)
goto ret
// n->left is Type* which is not interesting.
case OEFACE:
racewalknode(&n.Right, init, 0, 0)
goto ret
case OITAB:
racewalknode(&n.Left, init, 0, 0)
goto ret
// should not appear in AST by now
case OSEND,
ORECV,
OCLOSE,
ONEW,
OXCASE,
OXFALL,
OCASE,
OPANIC,
ORECOVER,
OCONVIFACE,
OCMPIFACE,
OMAKECHAN,
OMAKEMAP,
OMAKESLICE,
OCALL,
OCOPY,
OAPPEND,
ORUNESTR,
OARRAYBYTESTR,
OARRAYRUNESTR,
OSTRARRAYBYTE,
OSTRARRAYRUNE,
OINDEXMAP,
// lowered to call
OCMPSTR,
OADDSTR,
ODOTTYPE,
ODOTTYPE2,
OAS2DOTTYPE,
OCALLPART,
// lowered to PTRLIT
OCLOSURE, // lowered to PTRLIT
ORANGE, // lowered to ordinary for loop
OARRAYLIT, // lowered to assignments
OMAPLIT,
OSTRUCTLIT,
OAS2,
OAS2RECV,
OAS2MAPR,
OASOP:
Yyerror("racewalk: %v must be lowered by now", Oconv(int(n.Op), 0))
goto ret
// impossible nodes: only appear in backend.
case ORROTC,
OEXTEND:
Yyerror("racewalk: %v cannot exist now", Oconv(int(n.Op), 0))
goto ret
// just do generic traversal
case OFOR,
OIF,
OCALLMETH,
ORETURN,
ORETJMP,
OSWITCH,
OSELECT,
OEMPTY,
OBREAK,
OCONTINUE,
OFALL,
OGOTO,
OLABEL:
goto ret
// does not require instrumentation
case OPRINT, // don't bother instrumenting it
OPRINTN, // don't bother instrumenting it
OCHECKNIL, // always followed by a read.
OPARAM, // it appears only in fn->exit to copy heap params back
OCLOSUREVAR, // immutable pointer to captured variable
ODOTMETH, // either part of CALLMETH or CALLPART (lowered to PTRLIT)
OINDREG, // at this stage, only n(SP) nodes from nodarg
ODCL, // declarations (without value) cannot be races
ODCLCONST,
ODCLTYPE,
OTYPE,
ONONAME,
OLITERAL,
OSLICESTR,
// always preceded by bounds checking, avoid double instrumentation.
OTYPESW: // ignored by code generation, do not instrument.
goto ret
}
ret:
if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
racewalklist(n.List, init)
}
if n.Ntest != nil {
racewalknode(&n.Ntest, &n.Ntest.Ninit, 0, 0)
}
if n.Nincr != nil {
racewalknode(&n.Nincr, &n.Nincr.Ninit, 0, 0)
}
racewalklist(n.Nbody, nil)
racewalklist(n.Nelse, nil)
racewalklist(n.Rlist, nil)
*np = n
}
func isartificial(n *Node) int {
// compiler-emitted artificial things that we do not want to instrument,
// cant' possibly participate in a data race.
if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
if n.Sym.Name == "_" {
return 1
}
// autotmp's are always local
if strings.HasPrefix(n.Sym.Name, "autotmp_") {
return 1
}
// statictmp's are read-only
if strings.HasPrefix(n.Sym.Name, "statictmp_") {
return 1
}
// go.itab is accessed only by the compiler and runtime (assume safe)
if n.Sym.Pkg != nil && n.Sym.Pkg.Name != "" && n.Sym.Pkg.Name == "go.itab" {
return 1
}
}
return 0
}
func callinstr(np **Node, init **NodeList, wr int, skip int) int {
var name string
var f *Node
var b *Node
var n *Node
var t *Type
var class int
var hascalls int
n = *np
//print("callinstr for %+N [ %O ] etype=%E class=%d\n",
// n, n->op, n->type ? n->type->etype : -1, n->class);
if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
return 0
}
t = n.Type
if isartificial(n) != 0 {
return 0
}
b = outervalue(n)
// it skips e.g. stores to ... parameter array
if isartificial(b) != 0 {
return 0
}
class = int(b.Class)
// BUG: we _may_ want to instrument PAUTO sometimes
// e.g. if we've got a local variable/method receiver
// that has got a pointer inside. Whether it points to
// the heap or not is impossible to know at compile time
if (class&PHEAP != 0) || class == PPARAMREF || class == PEXTERN || b.Op == OINDEX || b.Op == ODOTPTR || b.Op == OIND {
hascalls = 0
foreach(n, hascallspred, &hascalls)
if hascalls != 0 {
n = detachexpr(n, init)
*np = n
}
n = treecopy(n)
makeaddable(n)
if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
name = "racereadrange"
if wr != 0 {
name = "racewriterange"
}
f = mkcall(name, nil, init, uintptraddr(n), Nodintconst(t.Width))
} else {
name = "raceread"
if wr != 0 {
name = "racewrite"
}
f = mkcall(name, nil, init, uintptraddr(n))
}
*init = list(*init, f)
return 1
}
return 0
}
// makeaddable returns a node whose memory location is the
// same as n, but which is addressable in the Go language
// sense.
// This is different from functions like cheapexpr that may make
// a copy of their argument.
func makeaddable(n *Node) {
// The arguments to uintptraddr technically have an address but
// may not be addressable in the Go sense: for example, in the case
// of T(v).Field where T is a struct type and v is
// an addressable value.
switch n.Op {
case OINDEX:
if Isfixedarray(n.Left.Type) != 0 {
makeaddable(n.Left)
}
// Turn T(v).Field into v.Field
case ODOT,
OXDOT:
if n.Left.Op == OCONVNOP {
n.Left = n.Left.Left
}
makeaddable(n.Left)
// nothing to do
case ODOTPTR:
fallthrough
default:
break
}
}
func uintptraddr(n *Node) *Node {
var r *Node
r = Nod(OADDR, n, nil)
r.Bounded = 1
r = conv(r, Types[TUNSAFEPTR])
r = conv(r, Types[TUINTPTR])
return r
}
func detachexpr(n *Node, init **NodeList) *Node {
var addr *Node
var as *Node
var ind *Node
var l *Node
addr = Nod(OADDR, n, nil)
l = temp(Ptrto(n.Type))
as = Nod(OAS, l, addr)
typecheck(&as, Etop)
walkexpr(&as, init)
*init = list(*init, as)
ind = Nod(OIND, l, nil)
typecheck(&ind, Erv)
walkexpr(&ind, init)
return ind
}
func foreachnode(n *Node, f func(*Node, interface{}), c interface{}) {
if n != nil {
f(n, c)
}
}
func foreachlist(l *NodeList, f func(*Node, interface{}), c interface{}) {
for ; l != nil; l = l.Next {
foreachnode(l.N, f, c)
}
}
func foreach(n *Node, f func(*Node, interface{}), c interface{}) {
foreachlist(n.Ninit, f, c)
foreachnode(n.Left, f, c)
foreachnode(n.Right, f, c)
foreachlist(n.List, f, c)
foreachnode(n.Ntest, f, c)
foreachnode(n.Nincr, f, c)
foreachlist(n.Nbody, f, c)
foreachlist(n.Nelse, f, c)
foreachlist(n.Rlist, f, c)
}
func hascallspred(n *Node, c interface{}) {
switch n.Op {
case OCALL,
OCALLFUNC,
OCALLMETH,
OCALLINTER:
(*c.(*int))++
}
}
// appendinit is like addinit in subr.c
// but appends rather than prepends.
func appendinit(np **Node, init *NodeList) {
var n *Node
if init == nil {
return
}
n = *np
switch n.Op {
// There may be multiple refs to this node;
// introduce OCONVNOP to hold init list.
case ONAME,
OLITERAL:
n = Nod(OCONVNOP, n, nil)
n.Type = n.Left.Type
n.Typecheck = 1
*np = n
}
n.Ninit = concat(n.Ninit, init)
n.Ullman = UINF
}

View file

@ -0,0 +1,426 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "cmd/internal/obj"
/*
* range
*/
func typecheckrange(n *Node) {
var toomany int
var why string
var t *Type
var t1 *Type
var t2 *Type
var v1 *Node
var v2 *Node
var ll *NodeList
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
// 1. typcheck produced values,
// this part can declare new vars and so it must be typechecked before body,
// because body can contain a closure that captures the vars.
// 2. decldepth++ to denote loop body.
// 3. typecheck body.
// 4. decldepth--.
typecheck(&n.Right, Erv)
t = n.Right.Type
if t == nil {
goto out
}
// delicate little dance. see typecheckas2
for ll = n.List; ll != nil; ll = ll.Next {
if ll.N.Defn != n {
typecheck(&ll.N, Erv|Easgn)
}
}
if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
t = t.Type
}
n.Type = t
toomany = 0
switch t.Etype {
default:
Yyerror("cannot range over %v", Nconv(n.Right, obj.FmtLong))
goto out
case TARRAY:
t1 = Types[TINT]
t2 = t.Type
case TMAP:
t1 = t.Down
t2 = t.Type
case TCHAN:
if !(t.Chan&Crecv != 0) {
Yyerror("invalid operation: range %v (receive from send-only type %v)", Nconv(n.Right, 0), Tconv(n.Right.Type, 0))
goto out
}
t1 = t.Type
t2 = nil
if count(n.List) == 2 {
toomany = 1
}
case TSTRING:
t1 = Types[TINT]
t2 = runetype
}
if count(n.List) > 2 || toomany != 0 {
Yyerror("too many variables in range")
}
v1 = nil
if n.List != nil {
v1 = n.List.N
}
v2 = nil
if n.List != nil && n.List.Next != nil {
v2 = n.List.Next.N
}
// this is not only a optimization but also a requirement in the spec.
// "if the second iteration variable is the blank identifier, the range
// clause is equivalent to the same clause with only the first variable
// present."
if isblank(v2) {
if v1 != nil {
n.List = list1(v1)
}
v2 = nil
}
if v1 != nil {
if v1.Defn == n {
v1.Type = t1
} else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 {
Yyerror("cannot assign type %v to %v in range%s", Tconv(t1, 0), Nconv(v1, obj.FmtLong), why)
}
checkassign(n, v1)
}
if v2 != nil {
if v2.Defn == n {
v2.Type = t2
} else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 {
Yyerror("cannot assign type %v to %v in range%s", Tconv(t2, 0), Nconv(v2, obj.FmtLong), why)
}
checkassign(n, v2)
}
// second half of dance
out:
n.Typecheck = 1
for ll = n.List; ll != nil; ll = ll.Next {
if ll.N.Typecheck == 0 {
typecheck(&ll.N, Erv|Easgn)
}
}
decldepth++
typechecklist(n.Nbody, Etop)
decldepth--
}
func walkrange(n *Node) {
var ohv1 *Node
var hv1 *Node // hidden (old) val 1, 2 // hidden aggregate, iterator // hidden len, pointer // hidden bool // not hidden aggregate, val 1, 2
var hv2 *Node
var ha *Node
var hit *Node
var hn *Node
var hp *Node
var hb *Node
var a *Node
var v1 *Node
var v2 *Node
var fn *Node
var tmp *Node
var keyname *Node
var valname *Node
var key *Node
var val *Node
var body *NodeList
var init *NodeList
var th *Type
var t *Type
var lno int
t = n.Type
init = nil
a = n.Right
lno = int(setlineno(a))
v1 = nil
if n.List != nil {
v1 = n.List.N
}
v2 = nil
if n.List != nil && n.List.Next != nil && !isblank(n.List.Next.N) {
v2 = n.List.Next.N
}
// n->list has no meaning anymore, clear it
// to avoid erroneous processing by racewalk.
n.List = nil
hv2 = nil
switch t.Etype {
default:
Fatal("walkrange")
fallthrough
// Lower n into runtime·memclr if possible, for
// fast zeroing of slices and arrays (issue 5373).
// Look for instances of
//
// for i := range a {
// a[i] = zero
// }
//
// in which the evaluation of a is side-effect-free.
case TARRAY:
if !(Debug['N'] != 0) {
if !(flag_race != 0) {
if v1 != nil {
if v2 == nil {
if n.Nbody != nil {
if n.Nbody.N != nil { // at least one statement in body
if n.Nbody.Next == nil { // at most one statement in body
tmp = n.Nbody.N // first statement of body
if tmp.Op == OAS {
if tmp.Left.Op == OINDEX {
if samesafeexpr(tmp.Left.Left, a) != 0 {
if samesafeexpr(tmp.Left.Right, v1) != 0 {
if t.Type.Width > 0 {
if iszero(tmp.Right) != 0 {
// Convert to
// if len(a) != 0 {
// hp = &a[0]
// hn = len(a)*sizeof(elem(a))
// memclr(hp, hn)
// i = len(a) - 1
// }
n.Op = OIF
n.Nbody = nil
n.Ntest = Nod(ONE, Nod(OLEN, a, nil), Nodintconst(0))
n.Nincr = nil
// hp = &a[0]
hp = temp(Ptrto(Types[TUINT8]))
tmp = Nod(OINDEX, a, Nodintconst(0))
tmp.Bounded = 1
tmp = Nod(OADDR, tmp, nil)
tmp = Nod(OCONVNOP, tmp, nil)
tmp.Type = Ptrto(Types[TUINT8])
n.Nbody = list(n.Nbody, Nod(OAS, hp, tmp))
// hn = len(a) * sizeof(elem(a))
hn = temp(Types[TUINTPTR])
tmp = Nod(OLEN, a, nil)
tmp = Nod(OMUL, tmp, Nodintconst(t.Type.Width))
tmp = conv(tmp, Types[TUINTPTR])
n.Nbody = list(n.Nbody, Nod(OAS, hn, tmp))
// memclr(hp, hn)
fn = mkcall("memclr", nil, nil, hp, hn)
n.Nbody = list(n.Nbody, fn)
// i = len(a) - 1
v1 = Nod(OAS, v1, Nod(OSUB, Nod(OLEN, a, nil), Nodintconst(1)))
n.Nbody = list(n.Nbody, v1)
typecheck(&n.Ntest, Erv)
typechecklist(n.Nbody, Etop)
walkstmt(&n)
lineno = int32(lno)
return
}
}
}
}
}
}
}
}
}
}
}
}
}
// orderstmt arranged for a copy of the array/slice variable if needed.
ha = a
hv1 = temp(Types[TINT])
hn = temp(Types[TINT])
hp = nil
init = list(init, Nod(OAS, hv1, nil))
init = list(init, Nod(OAS, hn, Nod(OLEN, ha, nil)))
if v2 != nil {
hp = temp(Ptrto(n.Type.Type))
tmp = Nod(OINDEX, ha, Nodintconst(0))
tmp.Bounded = 1
init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
}
n.Ntest = Nod(OLT, hv1, hn)
n.Nincr = Nod(OAS, hv1, Nod(OADD, hv1, Nodintconst(1)))
if v1 == nil {
body = nil
} else if v2 == nil {
body = list1(Nod(OAS, v1, hv1))
} else {
a = Nod(OAS2, nil, nil)
a.List = list(list1(v1), v2)
a.Rlist = list(list1(hv1), Nod(OIND, hp, nil))
body = list1(a)
// Advance pointer as part of increment.
// We used to advance the pointer before executing the loop body,
// but doing so would make the pointer point past the end of the
// array during the final iteration, possibly causing another unrelated
// piece of memory not to be garbage collected until the loop finished.
// Advancing during the increment ensures that the pointer p only points
// pass the end of the array during the final "p++; i++; if(i >= len(x)) break;",
// after which p is dead, so it cannot confuse the collector.
tmp = Nod(OADD, hp, Nodintconst(t.Type.Width))
tmp.Type = hp.Type
tmp.Typecheck = 1
tmp.Right.Type = Types[Tptr]
tmp.Right.Typecheck = 1
a = Nod(OAS, hp, tmp)
typecheck(&a, Etop)
n.Nincr.Ninit = list1(a)
}
// orderstmt allocated the iterator for us.
// we only use a once, so no copy needed.
case TMAP:
ha = a
th = hiter(t)
hit = n.Alloc
hit.Type = th
n.Left = nil
keyname = newname(th.Type.Sym) // depends on layout of iterator struct. See reflect.c:hiter
valname = newname(th.Type.Down.Sym) // ditto
fn = syslook("mapiterinit", 1)
argtype(fn, t.Down)
argtype(fn, t.Type)
argtype(fn, th)
init = list(init, mkcall1(fn, nil, nil, typename(t), ha, Nod(OADDR, hit, nil)))
n.Ntest = Nod(ONE, Nod(ODOT, hit, keyname), nodnil())
fn = syslook("mapiternext", 1)
argtype(fn, th)
n.Nincr = mkcall1(fn, nil, nil, Nod(OADDR, hit, nil))
key = Nod(ODOT, hit, keyname)
key = Nod(OIND, key, nil)
if v1 == nil {
body = nil
} else if v2 == nil {
body = list1(Nod(OAS, v1, key))
} else {
val = Nod(ODOT, hit, valname)
val = Nod(OIND, val, nil)
a = Nod(OAS2, nil, nil)
a.List = list(list1(v1), v2)
a.Rlist = list(list1(key), val)
body = list1(a)
}
// orderstmt arranged for a copy of the channel variable.
case TCHAN:
ha = a
n.Ntest = nil
hv1 = temp(t.Type)
hv1.Typecheck = 1
if haspointers(t.Type) {
init = list(init, Nod(OAS, hv1, nil))
}
hb = temp(Types[TBOOL])
n.Ntest = Nod(ONE, hb, Nodbool(0))
a = Nod(OAS2RECV, nil, nil)
a.Typecheck = 1
a.List = list(list1(hv1), hb)
a.Rlist = list1(Nod(ORECV, ha, nil))
n.Ntest.Ninit = list1(a)
if v1 == nil {
body = nil
} else {
body = list1(Nod(OAS, v1, hv1))
}
// orderstmt arranged for a copy of the string variable.
case TSTRING:
ha = a
ohv1 = temp(Types[TINT])
hv1 = temp(Types[TINT])
init = list(init, Nod(OAS, hv1, nil))
if v2 == nil {
a = Nod(OAS, hv1, mkcall("stringiter", Types[TINT], nil, ha, hv1))
} else {
hv2 = temp(runetype)
a = Nod(OAS2, nil, nil)
a.List = list(list1(hv1), hv2)
fn = syslook("stringiter2", 0)
a.Rlist = list1(mkcall1(fn, getoutargx(fn.Type), nil, ha, hv1))
}
n.Ntest = Nod(ONE, hv1, Nodintconst(0))
n.Ntest.Ninit = list(list1(Nod(OAS, ohv1, hv1)), a)
body = nil
if v1 != nil {
body = list1(Nod(OAS, v1, ohv1))
}
if v2 != nil {
body = list(body, Nod(OAS, v2, hv2))
}
}
n.Op = OFOR
typechecklist(init, Etop)
n.Ninit = concat(n.Ninit, init)
typechecklist(n.Ntest.Ninit, Etop)
typecheck(&n.Ntest, Erv)
typecheck(&n.Nincr, Etop)
typechecklist(body, Etop)
n.Nbody = concat(body, n.Nbody)
walkstmt(&n)
lineno = int32(lno)
}

File diff suppressed because it is too large Load diff

1401
src/cmd/internal/gc/reg.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,389 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
/*
* select
*/
func typecheckselect(sel *Node) {
var ncase *Node
var n *Node
var def *Node
var l *NodeList
var lno int
var count int
def = nil
lno = int(setlineno(sel))
count = 0
typechecklist(sel.Ninit, Etop)
for l = sel.List; l != nil; l = l.Next {
count++
ncase = l.N
setlineno(ncase)
if ncase.Op != OXCASE {
Fatal("typecheckselect %v", Oconv(int(ncase.Op), 0))
}
if ncase.List == nil {
// default
if def != nil {
Yyerror("multiple defaults in select (first at %v)", def.Line())
} else {
def = ncase
}
} else if ncase.List.Next != nil {
Yyerror("select cases cannot be lists")
} else {
n = typecheck(&ncase.List.N, Etop)
ncase.Left = n
ncase.List = nil
setlineno(n)
switch n.Op {
default:
Yyerror("select case must be receive, send or assign recv")
// convert x = <-c into OSELRECV(x, <-c).
// remove implicit conversions; the eventual assignment
// will reintroduce them.
case OAS:
if (n.Right.Op == OCONVNOP || n.Right.Op == OCONVIFACE) && n.Right.Implicit != 0 {
n.Right = n.Right.Left
}
if n.Right.Op != ORECV {
Yyerror("select assignment must have receive on right hand side")
break
}
n.Op = OSELRECV
// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
case OAS2RECV:
if n.Rlist.N.Op != ORECV {
Yyerror("select assignment must have receive on right hand side")
break
}
n.Op = OSELRECV2
n.Left = n.List.N
n.Ntest = n.List.Next.N
n.List = nil
n.Right = n.Rlist.N
n.Rlist = nil
// convert <-c into OSELRECV(N, <-c)
case ORECV:
n = Nod(OSELRECV, nil, n)
n.Typecheck = 1
ncase.Left = n
case OSEND:
break
}
}
typechecklist(ncase.Nbody, Etop)
}
sel.Xoffset = int64(count)
lineno = int32(lno)
}
func walkselect(sel *Node) {
var lno int
var i int
var n *Node
var r *Node
var a *Node
var var_ *Node
var selv *Node
var cas *Node
var dflt *Node
var ch *Node
var l *NodeList
var init *NodeList
if sel.List == nil && sel.Xoffset != 0 {
Fatal("double walkselect") // already rewrote
}
lno = int(setlineno(sel))
i = count(sel.List)
// optimization: zero-case select
if i == 0 {
sel.Nbody = list1(mkcall("block", nil, nil))
goto out
}
// optimization: one-case select: single op.
// TODO(rsc): Reenable optimization once order.c can handle it.
// golang.org/issue/7672.
if i == 1 {
cas = sel.List.N
setlineno(cas)
l = cas.Ninit
if cas.Left != nil { // not default:
n = cas.Left
l = concat(l, n.Ninit)
n.Ninit = nil
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
fallthrough
// ok already
case OSEND:
ch = n.Left
case OSELRECV,
OSELRECV2:
ch = n.Right.Left
if n.Op == OSELRECV || n.Ntest == nil {
if n.Left == nil {
n = n.Right
} else {
n.Op = OAS
}
break
}
if n.Left == nil {
typecheck(&nblank, Erv|Easgn)
n.Left = nblank
}
n.Op = OAS2
n.List = list(list1(n.Left), n.Ntest)
n.Rlist = list1(n.Right)
n.Right = nil
n.Left = nil
n.Ntest = nil
n.Typecheck = 0
typecheck(&n, Etop)
}
// if ch == nil { block() }; n;
a = Nod(OIF, nil, nil)
a.Ntest = Nod(OEQ, ch, nodnil())
a.Nbody = list1(mkcall("block", nil, &l))
typecheck(&a, Etop)
l = list(l, a)
l = list(l, n)
}
l = concat(l, cas.Nbody)
sel.Nbody = l
goto out
}
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
for l = sel.List; l != nil; l = l.Next {
cas = l.N
setlineno(cas)
n = cas.Left
if n == nil {
continue
}
switch n.Op {
case OSEND:
n.Right = Nod(OADDR, n.Right, nil)
typecheck(&n.Right, Erv)
case OSELRECV,
OSELRECV2:
if n.Op == OSELRECV2 && n.Ntest == nil {
n.Op = OSELRECV
}
if n.Op == OSELRECV2 {
n.Ntest = Nod(OADDR, n.Ntest, nil)
typecheck(&n.Ntest, Erv)
}
if n.Left == nil {
n.Left = nodnil()
} else {
n.Left = Nod(OADDR, n.Left, nil)
typecheck(&n.Left, Erv)
}
}
}
// optimization: two-case select but one is default: single non-blocking op.
if i == 2 && (sel.List.N.Left == nil || sel.List.Next.N.Left == nil) {
if sel.List.N.Left == nil {
cas = sel.List.Next.N
dflt = sel.List.N
} else {
dflt = sel.List.Next.N
cas = sel.List.N
}
n = cas.Left
setlineno(n)
r = Nod(OIF, nil, nil)
r.Ninit = cas.Ninit
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
fallthrough
// if selectnbsend(c, v) { body } else { default body }
case OSEND:
ch = n.Left
r.Ntest = mkcall1(chanfn("selectnbsend", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), ch, n.Right)
// if c != nil && selectnbrecv(&v, c) { body } else { default body }
case OSELRECV:
r = Nod(OIF, nil, nil)
r.Ninit = cas.Ninit
ch = n.Right.Left
r.Ntest = mkcall1(chanfn("selectnbrecv", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, ch)
// if c != nil && selectnbrecv2(&v, c) { body } else { default body }
case OSELRECV2:
r = Nod(OIF, nil, nil)
r.Ninit = cas.Ninit
ch = n.Right.Left
r.Ntest = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, n.Ntest, ch)
}
typecheck(&r.Ntest, Erv)
r.Nbody = cas.Nbody
r.Nelse = concat(dflt.Ninit, dflt.Nbody)
sel.Nbody = list1(r)
goto out
}
init = sel.Ninit
sel.Ninit = nil
// generate sel-struct
setlineno(sel)
selv = temp(selecttype(int32(sel.Xoffset)))
r = Nod(OAS, selv, nil)
typecheck(&r, Etop)
init = list(init, r)
var_ = conv(conv(Nod(OADDR, selv, nil), Types[TUNSAFEPTR]), Ptrto(Types[TUINT8]))
r = mkcall("newselect", nil, nil, var_, Nodintconst(selv.Type.Width), Nodintconst(sel.Xoffset))
typecheck(&r, Etop)
init = list(init, r)
// register cases
for l = sel.List; l != nil; l = l.Next {
cas = l.N
setlineno(cas)
n = cas.Left
r = Nod(OIF, nil, nil)
r.Ninit = cas.Ninit
cas.Ninit = nil
if n != nil {
r.Ninit = concat(r.Ninit, n.Ninit)
n.Ninit = nil
}
if n == nil {
// selectdefault(sel *byte);
r.Ntest = mkcall("selectdefault", Types[TBOOL], &r.Ninit, var_)
} else {
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
fallthrough
// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
case OSEND:
r.Ntest = mkcall1(chanfn("selectsend", 2, n.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Left, n.Right)
// selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool);
case OSELRECV:
r.Ntest = mkcall1(chanfn("selectrecv", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left)
// selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
case OSELRECV2:
r.Ntest = mkcall1(chanfn("selectrecv2", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left, n.Ntest)
}
}
// selv is no longer alive after use.
r.Nbody = list(r.Nbody, Nod(OVARKILL, selv, nil))
r.Nbody = concat(r.Nbody, cas.Nbody)
r.Nbody = list(r.Nbody, Nod(OBREAK, nil, nil))
init = list(init, r)
}
// run the select
setlineno(sel)
init = list(init, mkcall("selectgo", nil, nil, var_))
sel.Nbody = init
out:
sel.List = nil
walkstmtlist(sel.Nbody)
lineno = int32(lno)
}
// Keep in sync with src/runtime/chan.h.
func selecttype(size int32) *Type {
var sel *Node
var sudog *Node
var scase *Node
var arr *Node
// TODO(dvyukov): it's possible to generate SudoG and Scase only once
// and then cache; and also cache Select per size.
sudog = Nod(OTSTRUCT, nil, nil)
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("g")), typenod(Ptrto(Types[TUINT8]))))
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("selectdone")), typenod(Ptrto(Types[TUINT8]))))
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("next")), typenod(Ptrto(Types[TUINT8]))))
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("prev")), typenod(Ptrto(Types[TUINT8]))))
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("nrelease")), typenod(Types[TINT32])))
sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("waitlink")), typenod(Ptrto(Types[TUINT8]))))
typecheck(&sudog, Etype)
sudog.Type.Noalg = 1
sudog.Type.Local = 1
scase = Nod(OTSTRUCT, nil, nil)
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("chan")), typenod(Ptrto(Types[TUINT8]))))
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("pc")), typenod(Types[TUINTPTR])))
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("kind")), typenod(Types[TUINT16])))
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("so")), typenod(Types[TUINT16])))
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("receivedp")), typenod(Ptrto(Types[TUINT8]))))
scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
typecheck(&scase, Etype)
scase.Type.Noalg = 1
scase.Type.Local = 1
sel = Nod(OTSTRUCT, nil, nil)
sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("tcase")), typenod(Types[TUINT16])))
sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("ncase")), typenod(Types[TUINT16])))
sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorder")), typenod(Ptrto(Types[TUINT8]))))
sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorder")), typenod(Ptrto(Types[TUINT8]))))
arr = Nod(OTARRAY, Nodintconst(int64(size)), scase)
sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("scase")), arr))
arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Ptrto(Types[TUINT8])))
sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorderarr")), arr))
arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Types[TUINT16]))
sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorderarr")), arr))
typecheck(&sel, Etype)
sel.Type.Noalg = 1
sel.Type.Local = 1
return sel.Type
}

1602
src/cmd/internal/gc/sinit.go Normal file

File diff suppressed because it is too large Load diff

3932
src/cmd/internal/gc/subr.go Normal file

File diff suppressed because it is too large Load diff

1028
src/cmd/internal/gc/swt.go Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,178 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "cmd/internal/obj"
/*
* look for
* unsafe.Sizeof
* unsafe.Offsetof
* unsafe.Alignof
* rewrite with a constant
*/
func unsafenmagic(nn *Node) *Node {
var r *Node
var n *Node
var base *Node
var r1 *Node
var s *Sym
var t *Type
var tr *Type
var v int64
var val Val
var fn *Node
var args *NodeList
fn = nn.Left
args = nn.List
if safemode != 0 || fn == nil || fn.Op != ONAME {
goto no
}
s = fn.Sym
if s == nil {
goto no
}
if s.Pkg != unsafepkg {
goto no
}
if args == nil {
Yyerror("missing argument for %v", Sconv(s, 0))
goto no
}
r = args.N
if s.Name == "Sizeof" {
typecheck(&r, Erv)
defaultlit(&r, nil)
tr = r.Type
if tr == nil {
goto bad
}
dowidth(tr)
v = tr.Width
goto yes
}
if s.Name == "Offsetof" {
// must be a selector.
if r.Op != OXDOT {
goto bad
}
// Remember base of selector to find it back after dot insertion.
// Since r->left may be mutated by typechecking, check it explicitly
// first to track it correctly.
typecheck(&r.Left, Erv)
base = r.Left
typecheck(&r, Erv)
switch r.Op {
case ODOT,
ODOTPTR:
break
case OCALLPART:
Yyerror("invalid expression %v: argument is a method value", Nconv(nn, 0))
v = 0
goto ret
default:
goto bad
}
v = 0
// add offsets for inserted dots.
for r1 = r; r1.Left != base; r1 = r1.Left {
switch r1.Op {
case ODOT:
v += r1.Xoffset
case ODOTPTR:
Yyerror("invalid expression %v: selector implies indirection of embedded %v", Nconv(nn, 0), Nconv(r1.Left, 0))
goto ret
default:
Dump("unsafenmagic", r)
Fatal("impossible %v node after dot insertion", Oconv(int(r1.Op), obj.FmtSharp))
goto bad
}
}
v += r1.Xoffset
goto yes
}
if s.Name == "Alignof" {
typecheck(&r, Erv)
defaultlit(&r, nil)
tr = r.Type
if tr == nil {
goto bad
}
// make struct { byte; T; }
t = typ(TSTRUCT)
t.Type = typ(TFIELD)
t.Type.Type = Types[TUINT8]
t.Type.Down = typ(TFIELD)
t.Type.Down.Type = tr
// compute struct widths
dowidth(t)
// the offset of T is its required alignment
v = t.Type.Down.Width
goto yes
}
no:
return nil
bad:
Yyerror("invalid expression %v", Nconv(nn, 0))
v = 0
goto ret
yes:
if args.Next != nil {
Yyerror("extra arguments for %v", Sconv(s, 0))
}
// any side effects disappear; ignore init
ret:
val.Ctype = CTINT
val.U.Xval = new(Mpint)
Mpmovecfix(val.U.Xval, v)
n = Nod(OLITERAL, nil, nil)
n.Orig = nn
n.Val = val
n.Type = Types[TUINTPTR]
nn.Type = Types[TUINTPTR]
return n
}
func isunsafebuiltin(n *Node) int {
if n == nil || n.Op != ONAME || n.Sym == nil || n.Sym.Pkg != unsafepkg {
return 0
}
if n.Sym.Name == "Sizeof" {
return 1
}
if n.Sym.Name == "Offsetof" {
return 1
}
if n.Sym.Name == "Alignof" {
return 1
}
return 0
}

View file

@ -0,0 +1,70 @@
package gc
import (
"cmd/internal/obj"
"strconv"
"strings"
)
func bool2int(b bool) int {
if b {
return 1
}
return 0
}
func (n *Node) Line() string {
return obj.Linklinefmt(Ctxt, int(n.Lineno), false, false)
}
func atoi(s string) int {
// NOTE: Not strconv.Atoi, accepts hex and octal prefixes.
n, _ := strconv.ParseInt(s, 0, 0)
return int(n)
}
func isalnum(c int) bool {
return isalpha(c) || isdigit(c)
}
func isalpha(c int) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
}
func isdigit(c int) bool {
return '0' <= c && c <= '9'
}
func plan9quote(s string) string {
if s == "" {
goto needquote
}
for i := 0; i < len(s); i++ {
if s[i] <= ' ' || s[i] == '\'' {
goto needquote
}
}
return s
needquote:
return "'" + strings.Replace(s, "'", "''", -1) + "'"
}
// simulation of int(*s++) in C
func intstarstringplusplus(s string) (int, string) {
if s == "" {
return 0, ""
}
return int(s[0]), s[1:]
}
// strings.Compare, introduced in Go 1.5.
func stringsCompare(a, b string) int {
if a == b {
return 0
}
if a < b {
return -1
}
return +1
}

4531
src/cmd/internal/gc/walk.go Normal file

File diff suppressed because it is too large Load diff

3524
src/cmd/internal/gc/y.go Normal file

File diff suppressed because it is too large Load diff

10411
src/cmd/internal/gc/y.output Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,45 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package obj
// Inferno utils/include/ar.h
// http://code.google.com/p/inferno-os/source/browse/utils/include/ar.h
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
type ar_hdr struct {
Name string
Date string
Uid string
Gid string
Mode string
Size string
Fmag string
}

View file

@ -87,6 +87,7 @@ const (
REGEXT = REG_R10 REGEXT = REG_R10
REGG = REGEXT - 0 REGG = REGEXT - 0
REGM = REGEXT - 1 REGM = REGEXT - 1
REGCTXT = REG_R7
REGTMP = REG_R11 REGTMP = REG_R11
REGSP = REG_R13 REGSP = REG_R13
REGLINK = REG_R14 REGLINK = REG_R14

View file

@ -87,6 +87,8 @@ var optab = []Optab{
Optab{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, Optab{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
Optab{ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0}, Optab{ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0},
Optab{ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, Optab{ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
Optab{ABEQ, C_RCON, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // prediction hinted form, hint ignored
Optab{AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0}, Optab{AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0},
Optab{ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0}, Optab{ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
Optab{ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0}, Optab{ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
@ -272,12 +274,6 @@ var xcmp [C_GOK + 1][C_GOK + 1]uint8
var deferreturn *obj.LSym var deferreturn *obj.LSym
func nocache(p *obj.Prog) {
p.Optab = 0
p.From.Class = 0
p.To.Class = 0
}
/* size of a case statement including jump table */ /* size of a case statement including jump table */
func casesz(ctxt *obj.Link, p *obj.Prog) int32 { func casesz(ctxt *obj.Link, p *obj.Prog) int32 {
var jt int = 0 var jt int = 0
@ -341,7 +337,6 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
case APLD: case APLD:
out[0] = 0xe1a01001 // (MOVW R1, R1) out[0] = 0xe1a01001 // (MOVW R1, R1)
break
} }
} }
@ -452,8 +447,8 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
} else { } else {
a2 = &q.From a2 = &q.From
} }
nocache(q) obj.Nocache(q)
nocache(p) obj.Nocache(p)
// insert q after p // insert q after p
q.Link = p.Link q.Link = p.Link
@ -526,8 +521,8 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
} else { } else {
a2 = &q.From a2 = &q.From
} }
nocache(q) obj.Nocache(q)
nocache(p) obj.Nocache(p)
// insert q after p // insert q after p
q.Link = p.Link q.Link = p.Link
@ -557,7 +552,6 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
} else if out != nil { } else if out != nil {
asmout(ctxt, p, o, out) asmout(ctxt, p, o, out)
} }
break
} }
// destination register specific // destination register specific
@ -574,7 +568,6 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
p.Pc += 4 p.Pc += 4
} }
size += 4 size += 4
break
} }
} }
@ -667,7 +660,6 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
if p.Scond&C_SCOND == C_SCOND_NONE { if p.Scond&C_SCOND == C_SCOND_NONE {
flushpool(ctxt, p, 0, 0) flushpool(ctxt, p, 0, 0)
} }
break
} }
if p.As == AMOVW && p.To.Type == obj.TYPE_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE { if p.As == AMOVW && p.To.Type == obj.TYPE_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE {
@ -936,7 +928,6 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
C_LACON: C_LACON:
t.To.Type = obj.TYPE_CONST t.To.Type = obj.TYPE_CONST
t.To.Offset = ctxt.Instoffset t.To.Offset = ctxt.Instoffset
break
} }
if t.Pcrel == nil { if t.Pcrel == nil {
@ -1719,8 +1710,11 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
// runtime.tlsg is special. // runtime.tlsg is special.
// Its "address" is the offset from the TLS thread pointer // Its "address" is the offset from the TLS thread pointer
// to the thread-local g and m pointers. // to the thread-local g and m pointers.
// Emit a TLS relocation instead of a standard one. // Emit a TLS relocation instead of a standard one if its
if rel.Sym == ctxt.Tlsg { // type is not explicitly set by runtime. This assumes that
// all references to runtime.tlsg should be accompanied with
// its type declaration if necessary.
if rel.Sym == ctxt.Tlsg && ctxt.Tlsg.Type == 0 {
rel.Type = obj.R_TLS rel.Type = obj.R_TLS
if ctxt.Flag_shared != 0 { if ctxt.Flag_shared != 0 {
rel.Add += ctxt.Pc - p.Pcrel.Pc - 8 - int64(rel.Siz) rel.Add += ctxt.Pc - p.Pcrel.Pc - 8 - int64(rel.Siz)
@ -1932,7 +1926,6 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o1 |= uint32(p.To.Offset & 0xffff) o1 |= uint32(p.To.Offset & 0xffff)
o1 |= (uint32(p.From.Reg) & 15) << 16 o1 |= (uint32(p.From.Reg) & 15) << 16
aclass(ctxt, &p.From) aclass(ctxt, &p.From)
break
} }
if ctxt.Instoffset != 0 { if ctxt.Instoffset != 0 {
@ -2473,7 +2466,6 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if p.As == ADATABUNDLE { if p.As == ADATABUNDLE {
o1 = 0xe125be70 o1 = 0xe125be70
} }
break
} }
out[0] = o1 out[0] = o1

View file

@ -189,7 +189,7 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
str = fmt.Sprintf("$%.17g", a.U.Dval) str = fmt.Sprintf("$%.17g", a.U.Dval)
case obj.TYPE_SCONST: case obj.TYPE_SCONST:
str = fmt.Sprintf("$\"%q\"", a.U.Sval) str = fmt.Sprintf("$%q", a.U.Sval)
case obj.TYPE_REGREG: case obj.TYPE_REGREG:
str = fmt.Sprintf("(%v, %v)", Rconv(int(a.Reg)), Rconv(int(a.Offset))) str = fmt.Sprintf("(%v, %v)", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
@ -318,7 +318,6 @@ func Mconv(a *obj.Addr) string {
case obj.NAME_PARAM: case obj.NAME_PARAM:
str = fmt.Sprintf("%s+%d(FP)", s.Name, int(a.Offset)) str = fmt.Sprintf("%s+%d(FP)", s.Name, int(a.Offset))
break
} }
out: out:

View file

@ -38,10 +38,11 @@ import (
"math" "math"
) )
var progedit_tlsfallback *obj.LSym
func progedit(ctxt *obj.Link, p *obj.Prog) { func progedit(ctxt *obj.Link, p *obj.Prog) {
var literal string var literal string
var s *obj.LSym var s *obj.LSym
var tlsfallback *obj.LSym
p.From.Class = 0 p.From.Class = 0
p.To.Class = 0 p.To.Class = 0
@ -55,7 +56,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
} }
break
} }
// Replace TLS register fetches on older ARM procesors. // Replace TLS register fetches on older ARM procesors.
@ -71,8 +71,8 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if ctxt.Goarm < 7 { if ctxt.Goarm < 7 {
// Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension. // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension.
if tlsfallback == nil { if progedit_tlsfallback == nil {
tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0) progedit_tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0)
} }
// MOVW LR, R11 // MOVW LR, R11
@ -88,7 +88,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.As = ABL p.As = ABL
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
p.To.Sym = tlsfallback p.To.Sym = progedit_tlsfallback
p.To.Offset = 0 p.To.Offset = 0
// MOVW R11, LR // MOVW R11, LR
@ -105,8 +105,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// Otherwise, MRC/MCR instructions need no further treatment. // Otherwise, MRC/MCR instructions need no further treatment.
p.As = AWORD p.As = AWORD
break
} }
// Rewrite float constants to values stored in memory. // Rewrite float constants to values stored in memory.
@ -148,8 +146,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Name = obj.NAME_EXTERN p.From.Name = obj.NAME_EXTERN
p.From.Offset = 0 p.From.Offset = 0
} }
break
} }
if ctxt.Flag_shared != 0 { if ctxt.Flag_shared != 0 {
@ -191,12 +187,6 @@ func linkcase(casep *obj.Prog) {
} }
} }
func nocache5(p *obj.Prog) {
p.Optab = 0
p.From.Class = 0
p.To.Class = 0
}
func preprocess(ctxt *obj.Link, cursym *obj.LSym) { func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
var p *obj.Prog var p *obj.Prog
var pl *obj.Prog var pl *obj.Prog
@ -363,8 +353,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p.Pcond = q1 p.Pcond = q1
} }
} }
break
} }
q = p q = p
@ -503,7 +491,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
} }
case obj.ARET: case obj.ARET:
nocache5(p) obj.Nocache(p)
if cursym.Text.Mark&LEAF != 0 { if cursym.Text.Mark&LEAF != 0 {
if !(autosize != 0) { if !(autosize != 0) {
p.As = AB p.As = AB
@ -609,7 +597,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
case AMODU: case AMODU:
p.To.Sym = ctxt.Sym_modu p.To.Sym = ctxt.Sym_modu
break
} }
/* MOV REGTMP, b */ /* MOV REGTMP, b */
@ -671,7 +658,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP { if p.From.Type == obj.TYPE_ADDR && p.From.Reg == REGSP && p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP {
p.Spadj = int32(-p.From.Offset) p.Spadj = int32(-p.From.Offset)
} }
break
} }
} }
} }
@ -1076,6 +1062,8 @@ loop:
} }
var Linkarm = obj.LinkArch{ var Linkarm = obj.LinkArch{
Dconv: Dconv,
Rconv: Rconv,
ByteOrder: binary.LittleEndian, ByteOrder: binary.LittleEndian,
Pconv: Pconv, Pconv: Pconv,
Name: "arm", Name: "arm",

View file

@ -142,7 +142,7 @@ func Setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
return off + wid return off + wid
} }
func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 { func Adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
var off int64 var off int64
off = s.Size off = s.Size
@ -150,23 +150,23 @@ func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
return off return off
} }
func adduint8(ctxt *Link, s *LSym, v uint8) int64 { func Adduint8(ctxt *Link, s *LSym, v uint8) int64 {
return adduintxx(ctxt, s, uint64(v), 1) return Adduintxx(ctxt, s, uint64(v), 1)
} }
func adduint16(ctxt *Link, s *LSym, v uint16) int64 { func Adduint16(ctxt *Link, s *LSym, v uint16) int64 {
return adduintxx(ctxt, s, uint64(v), 2) return Adduintxx(ctxt, s, uint64(v), 2)
} }
func Adduint32(ctxt *Link, s *LSym, v uint32) int64 { func Adduint32(ctxt *Link, s *LSym, v uint32) int64 {
return adduintxx(ctxt, s, uint64(v), 4) return Adduintxx(ctxt, s, uint64(v), 4)
} }
func Adduint64(ctxt *Link, s *LSym, v uint64) int64 { func Adduint64(ctxt *Link, s *LSym, v uint64) int64 {
return adduintxx(ctxt, s, v, 8) return Adduintxx(ctxt, s, v, 8)
} }
func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 { func Setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 1) return Setuintxx(ctxt, s, r, uint64(v), 1)
} }
@ -174,7 +174,7 @@ func setuint16(ctxt *Link, s *LSym, r int64, v uint16) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 2) return Setuintxx(ctxt, s, r, uint64(v), 2)
} }
func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 { func Setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 4) return Setuintxx(ctxt, s, r, uint64(v), 4)
} }
@ -182,7 +182,7 @@ func setuint64(ctxt *Link, s *LSym, r int64, v uint64) int64 {
return Setuintxx(ctxt, s, r, v, 8) return Setuintxx(ctxt, s, r, v, 8)
} }
func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 { func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64 var i int64
var r *Reloc var r *Reloc
@ -222,11 +222,11 @@ func addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
return i + int64(r.Siz) return i + int64(r.Siz)
} }
func addaddr(ctxt *Link, s *LSym, t *LSym) int64 { func Addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
return addaddrplus(ctxt, s, t, 0) return Addaddrplus(ctxt, s, t, 0)
} }
func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 { func Setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
var r *Reloc var r *Reloc
if s.Type == 0 { if s.Type == 0 {
@ -247,11 +247,11 @@ func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
return off + int64(r.Siz) return off + int64(r.Siz)
} }
func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 { func Setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
return setaddrplus(ctxt, s, off, t, 0) return Setaddrplus(ctxt, s, off, t, 0)
} }
func addsize(ctxt *Link, s *LSym, t *LSym) int64 { func Addsize(ctxt *Link, s *LSym, t *LSym) int64 {
var i int64 var i int64
var r *Reloc var r *Reloc
@ -270,7 +270,7 @@ func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
return i + int64(r.Siz) return i + int64(r.Siz)
} }
func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 { func Addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64 var i int64
var r *Reloc var r *Reloc

View file

@ -0,0 +1,120 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package obj
import (
"flag"
"fmt"
"os"
"strconv"
)
func Flagfn2(string, string, func(string, string)) { panic("flag") }
func Flagcount(name, usage string, val *int) {
flag.Var((*count)(val), name, usage)
}
func Flagint32(name, usage string, val *int32) {
flag.Var((*int32Value)(val), name, usage)
}
func Flagint64(name, usage string, val *int64) {
flag.Int64Var(val, name, *val, usage)
}
func Flagstr(name, usage string, val *string) {
flag.StringVar(val, name, *val, usage)
}
func Flagfn0(name, usage string, f func()) {
flag.Var(fn0(f), name, usage)
}
func Flagfn1(name, usage string, f func(string)) {
flag.Var(fn1(f), name, usage)
}
func Flagprint(fd int) {
if fd == 1 {
flag.CommandLine.SetOutput(os.Stdout)
}
flag.PrintDefaults()
}
func Flagparse(usage func()) {
flag.Usage = usage
flag.Parse()
}
// count is a flag.Value that is like a flag.Bool and a flag.Int.
// If used as -name, it increments the count, but -name=x sets the count.
// Used for verbose flag -v.
type count int
func (c *count) String() string {
return fmt.Sprint(int(*c))
}
func (c *count) Set(s string) error {
switch s {
case "true":
*c++
case "false":
*c = 0
default:
n, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("invalid count %q", s)
}
*c = count(n)
}
return nil
}
func (c *count) IsBoolFlag() bool {
return true
}
type int32Value int32
func newIntValue(val int32, p *int32) *int32Value {
*p = val
return (*int32Value)(p)
}
func (i *int32Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
*i = int32Value(v)
return err
}
func (i *int32Value) Get() interface{} { return int32(*i) }
func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) }
type fn0 func()
func (f fn0) Set(s string) error {
f()
return nil
}
func (f fn0) Get() interface{} { return nil }
func (f fn0) String() string { return "" }
func (f fn0) IsBoolFlag() bool {
return true
}
type fn1 func(string)
func (f fn1) Set(s string) error {
f(s)
return nil
}
func (f fn1) String() string { return "" }

View file

@ -14,28 +14,6 @@
package obj package obj
// (The comments in this file were copied from the manpage files rune.3,
// isalpharune.3, and runestrcat.3. Some formatting changes were also made
// to conform to Google style. /JRM 11/11/05)
type Fmt struct {
runes uint8
start interface{}
to interface{}
stop interface{}
flush func(*Fmt) int
farg interface{}
nfmt int
args []interface{}
r uint
width int
prec int
flags uint32
decimal string
thousands string
grouping string
}
const ( const (
FmtWidth = 1 FmtWidth = 1
FmtLeft = FmtWidth << 1 FmtLeft = FmtWidth << 1
@ -54,7 +32,3 @@ const (
FmtLDouble = FmtByte << 1 FmtLDouble = FmtByte << 1
FmtFlag = FmtLDouble << 1 FmtFlag = FmtLDouble << 1
) )
var fmtdoquote func(int) int
/* Edit .+1,/^$/ | cfn $PLAN9/src/lib9/fmt/?*.c | grep -v static |grep -v __ */

View file

@ -15,7 +15,7 @@ import (
var Framepointer_enabled int var Framepointer_enabled int
var fieldtrack_enabled int var Fieldtrack_enabled int
var Zprog Prog var Zprog Prog
@ -30,7 +30,7 @@ var exper = []struct {
struct { struct {
name string name string
val *int val *int
}{"fieldtrack", &fieldtrack_enabled}, }{"fieldtrack", &Fieldtrack_enabled},
struct { struct {
name string name string
val *int val *int
@ -61,7 +61,79 @@ func linksetexp() {
} }
} }
func expstring() string { // replace all "". with pkg.
func Expandpkg(t0 string, pkg string) string {
return strings.Replace(t0, `"".`, pkg+".", -1)
}
func double2ieee(ieee *uint64, f float64) {
*ieee = math.Float64bits(f)
}
func Nopout(p *Prog) {
p.As = ANOP
p.Scond = Zprog.Scond
p.From = Zprog.From
p.From3 = Zprog.From3
p.Reg = Zprog.Reg
p.To = Zprog.To
}
func Nocache(p *Prog) {
p.Optab = 0
p.From.Class = 0
p.From3.Class = 0
p.To.Class = 0
}
/*
* bv.c
*/
/*
* closure.c
*/
/*
* const.c
*/
/*
* cplx.c
*/
/*
* dcl.c
*/
/*
* esc.c
*/
/*
* export.c
*/
/*
* fmt.c
*/
/*
* gen.c
*/
/*
* init.c
*/
/*
* inl.c
*/
/*
* lex.c
*/
func Expstring() string {
buf := "X" buf := "X"
for i := range exper { for i := range exper {
if *exper[i].val != 0 { if *exper[i].val != 0 {
@ -73,12 +145,3 @@ func expstring() string {
} }
return "X:" + buf[2:] return "X:" + buf[2:]
} }
// replace all "". with pkg.
func expandpkg(t0 string, pkg string) string {
return strings.Replace(t0, `"".`, pkg+".", -1)
}
func double2ieee(ieee *uint64, f float64) {
*ieee = math.Float64bits(f)
}

View file

@ -590,4 +590,5 @@ const (
FREGRET = REG_F0 FREGRET = REG_F0
REGSP = REG_SP REGSP = REG_SP
REGTMP = REG_DI REGTMP = REG_DI
REGCTXT = REG_DX
) )

View file

@ -2237,7 +2237,6 @@ func asmidx(ctxt *obj.Link, scale int, index int, base int) {
REG_SI, REG_SI,
REG_DI: REG_DI:
i = reg[index] << 3 i = reg[index] << 3
break
} }
switch scale { switch scale {
@ -2255,7 +2254,6 @@ func asmidx(ctxt *obj.Link, scale int, index int, base int) {
case 8: case 8:
i |= 3 << 6 i |= 3 << 6
break
} }
bas: bas:
@ -2275,7 +2273,6 @@ bas:
REG_SI, REG_SI,
REG_DI: REG_DI:
i |= reg[base] i |= reg[base]
break
} }
ctxt.Andptr[0] = byte(i) ctxt.Andptr[0] = byte(i)
@ -2404,7 +2401,6 @@ func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int) {
case obj.NAME_AUTO, case obj.NAME_AUTO,
obj.NAME_PARAM: obj.NAME_PARAM:
base = REG_SP base = REG_SP
break
} }
if base == REG_NONE { if base == REG_NONE {
@ -2446,7 +2442,6 @@ func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int) {
case obj.NAME_AUTO, case obj.NAME_AUTO,
obj.NAME_PARAM: obj.NAME_PARAM:
base = REG_SP base = REG_SP
break
} }
if base == REG_TLS { if base == REG_TLS {
@ -3166,7 +3161,6 @@ func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
REG_DL, REG_DL,
REG_DH: REG_DH:
cand = 0 cand = 0
break
} }
} }
@ -3183,7 +3177,6 @@ func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
case REG_DX: case REG_DX:
cand = 0 cand = 0
break
} }
} }
@ -3259,7 +3252,6 @@ func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
ctxt.Andptr[0] = Pm ctxt.Andptr[0] = Pm
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
} }
break
} }
ctxt.Andptr[0] = byte(op) ctxt.Andptr[0] = byte(op)
@ -3900,7 +3892,6 @@ mfound:
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
ctxt.Andptr[0] = 0xb5 ctxt.Andptr[0] = 0xb5
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
break
} }
asmand(ctxt, p, &p.From, reg[p.To.Reg]) asmand(ctxt, p, &p.From, reg[p.To.Reg])
@ -3931,10 +3922,7 @@ mfound:
ctxt.Andptr[0] = t[5] ctxt.Andptr[0] = t[5]
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
asmand(ctxt, p, &p.To, reg[p.From.Index]) asmand(ctxt, p, &p.To, reg[p.From.Index])
break
} }
break
} }
case 7: /* imul rm,r */ case 7: /* imul rm,r */
@ -4004,10 +3992,7 @@ mfound:
ctxt.Andptr[0] = 0x8B ctxt.Andptr[0] = 0x8B
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
asmand(ctxt, p, &pp.From, reg[p.To.Reg]) asmand(ctxt, p, &pp.From, reg[p.To.Reg])
break
} }
break
} }
} }

View file

@ -59,13 +59,13 @@ func Pconv(p *obj.Prog) string {
default: default:
str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To)) str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
// TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
// SHRQ $32(DX*0), AX // SHRQ $32(DX*0), AX
// Remove. // Remove.
if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != 0 { if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != 0 {
str += fmt.Sprintf(":%s", Rconv(int(p.From.Index))) str += fmt.Sprintf(":%v", Rconv(int(p.From.Index)))
} }
break
} }
fp += str fp += str
@ -145,7 +145,6 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
} else { } else {
str = fmt.Sprintf("%d(FP)", a.Offset) str = fmt.Sprintf("%d(FP)", a.Offset)
} }
break
} }
if a.Index != REG_NONE { if a.Index != REG_NONE {
@ -167,13 +166,12 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
str = fmt.Sprintf("$(%.17g)", a.U.Dval) str = fmt.Sprintf("$(%.17g)", a.U.Dval)
case obj.TYPE_SCONST: case obj.TYPE_SCONST:
str = fmt.Sprintf("$\"%q\"", a.U.Sval) str = fmt.Sprintf("$%q", a.U.Sval)
case obj.TYPE_ADDR: case obj.TYPE_ADDR:
a.Type = obj.TYPE_MEM a.Type = obj.TYPE_MEM
str = fmt.Sprintf("$%v", Dconv(p, 0, a)) str = fmt.Sprintf("$%v", Dconv(p, 0, a))
a.Type = obj.TYPE_ADDR a.Type = obj.TYPE_ADDR
break
} }
fp += str fp += str

View file

@ -124,7 +124,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
} }
break
} }
// Rewrite float constants to values stored in memory. // Rewrite float constants to values stored in memory.
@ -224,8 +223,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Sym = s p.From.Sym = s
p.From.Offset = 0 p.From.Offset = 0
} }
break
} }
} }
@ -912,6 +909,8 @@ loop:
} }
var Link386 = obj.LinkArch{ var Link386 = obj.LinkArch{
Dconv: Dconv,
Rconv: Rconv,
ByteOrder: binary.LittleEndian, ByteOrder: binary.LittleEndian,
Pconv: Pconv, Pconv: Pconv,
Name: "386", Name: "386",

View file

@ -4,10 +4,6 @@
package i386 package i386
const (
fmtLong = 1 << iota
)
func bool2int(b bool) int { func bool2int(b bool) int {
if b { if b {
return 1 return 1

View file

@ -72,7 +72,7 @@ func addlib(ctxt *Link, src, obj, pathname string) {
if ctxt.Debugvlog > 1 && ctxt.Bso != nil { if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
fmt.Fprintf(ctxt.Bso, "%5.2f addlib: %s %s pulls in %s\n", Cputime(), obj, src, pname) fmt.Fprintf(ctxt.Bso, "%5.2f addlib: %s %s pulls in %s\n", Cputime(), obj, src, pname)
} }
addlibpath(ctxt, src, obj, pname, name) Addlibpath(ctxt, src, obj, pname, name)
} }
/* /*
@ -82,7 +82,7 @@ func addlib(ctxt *Link, src, obj, pathname string) {
* file: object file, e.g., /home/rsc/go/pkg/container/vector.a * file: object file, e.g., /home/rsc/go/pkg/container/vector.a
* pkg: package import path, e.g. container/vector * pkg: package import path, e.g. container/vector
*/ */
func addlibpath(ctxt *Link, srcref, objref, file, pkg string) { func Addlibpath(ctxt *Link, srcref, objref, file, pkg string) {
for _, lib := range ctxt.Library { for _, lib := range ctxt.Library {
if lib.File == file { if lib.File == file {
return return

View file

@ -0,0 +1,20 @@
package obj
const (
AEXIST = 0
)
var GOEXPERIMENT string
const (
OREAD = iota
OWRITE
ORDWR
SIGBUS
SIGSEGV
NDFLT
FPPDBL
FPRNR
HEADER_IO
BOM = 0xFEFF
)

View file

@ -241,6 +241,8 @@ type Plist struct {
type LinkArch struct { type LinkArch struct {
Pconv func(*Prog) string Pconv func(*Prog) string
Dconv func(*Prog, int, *Addr) string
Rconv func(int) string
ByteOrder binary.ByteOrder ByteOrder binary.ByteOrder
Name string Name string
Thechar int Thechar int
@ -278,14 +280,14 @@ type Pcdata struct {
} }
type Pciter struct { type Pciter struct {
d Pcdata D Pcdata
p []byte P []byte
pc uint32 Pc uint32
nextpc uint32 Nextpc uint32
pcscale uint32 Pcscale uint32
value int32 Value int32
start int Start int
done int Done int
} }
// An Addr is an argument to an instruction. // An Addr is an argument to an instruction.

View file

@ -0,0 +1,37 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package obj
// Garbage collector liveness bitmap generation.
// The command line flag -live causes this code to print debug information.
// The levels are:
//
// -live (aka -live=1): print liveness lists as code warnings at safe points
// -live=2: print an assembly listing with liveness annotations
// -live=3: print information during each computation phase (much chattier)
//
// Each level includes the earlier output as well.
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Used by cmd/gc.
const (
GcBits = 4
BitsPerPointer = 2
BitsDead = 0
BitsScalar = 1
BitsPointer = 2
BitsMask = 3
PointersPerByte = 8 / BitsPerPointer
InsData = 1 + iota - 7
InsArray
InsArrayEnd
InsEnd
MaxGCMask = 65536
)

View file

@ -316,45 +316,45 @@ func getvarint(pp *[]byte) uint32 {
return v return v
} }
func pciternext(it *Pciter) { func Pciternext(it *Pciter) {
var v uint32 var v uint32
var dv int32 var dv int32
it.pc = it.nextpc it.Pc = it.Nextpc
if it.done != 0 { if it.Done != 0 {
return return
} }
if -cap(it.p) >= -cap(it.d.P[len(it.d.P):]) { if -cap(it.P) >= -cap(it.D.P[len(it.D.P):]) {
it.done = 1 it.Done = 1
return return
} }
// value delta // value delta
v = getvarint(&it.p) v = getvarint(&it.P)
if v == 0 && !(it.start != 0) { if v == 0 && !(it.Start != 0) {
it.done = 1 it.Done = 1
return return
} }
it.start = 0 it.Start = 0
dv = int32(v>>1) ^ (int32(v<<31) >> 31) dv = int32(v>>1) ^ (int32(v<<31) >> 31)
it.value += dv it.Value += dv
// pc delta // pc delta
v = getvarint(&it.p) v = getvarint(&it.P)
it.nextpc = it.pc + v*it.pcscale it.Nextpc = it.Pc + v*it.Pcscale
} }
func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) { func Pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
it.d = *d it.D = *d
it.p = it.d.P it.P = it.D.P
it.pc = 0 it.Pc = 0
it.nextpc = 0 it.Nextpc = 0
it.value = -1 it.Value = -1
it.start = 1 it.Start = 1
it.done = 0 it.Done = 0
it.pcscale = uint32(ctxt.Arch.Minlc) it.Pcscale = uint32(ctxt.Arch.Minlc)
pciternext(it) Pciternext(it)
} }

View file

@ -134,7 +134,7 @@ const (
REGRT1 = REG_R3 REGRT1 = REG_R3
REGRT2 = REG_R4 REGRT2 = REG_R4
REGMIN = REG_R7 REGMIN = REG_R7
REGENV = REG_R11 REGCTXT = REG_R11
REGTLS = REG_R13 REGTLS = REG_R13
REGMAX = REG_R27 REGMAX = REG_R27
REGEXT = REG_R30 REGEXT = REG_R30

View file

@ -2443,7 +2443,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
//if(dlm) reloc(&p->from, p->pc, 1); //if(dlm) reloc(&p->from, p->pc, 1);
break
} }
out[0] = o1 out[0] = o1

View file

@ -181,8 +181,7 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
str = fmt.Sprintf("$%.17g", a.U.Dval) str = fmt.Sprintf("$%.17g", a.U.Dval)
case obj.TYPE_SCONST: case obj.TYPE_SCONST:
str = fmt.Sprintf("$\"%q\"", a.U.Sval) str = fmt.Sprintf("$%q", a.U.Sval)
break
} }
fp += str fp += str
@ -241,7 +240,6 @@ func Mconv(a *obj.Addr) string {
} else { } else {
str = fmt.Sprintf("%s+%d(FP)", s.Name, a.Offset) str = fmt.Sprintf("%s+%d(FP)", s.Name, a.Offset)
} }
break
} }
//out: //out:

View file

@ -53,7 +53,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if p.To.Sym != nil { if p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
} }
break
} }
// Rewrite float constants to values stored in memory. // Rewrite float constants to values stored in memory.
@ -118,8 +117,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Offset = -p.From.Offset p.From.Offset = -p.From.Offset
p.As = AADD p.As = AADD
} }
break
} }
} }
@ -596,7 +593,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST { if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
p.Spadj = int32(-p.From.Offset) p.Spadj = int32(-p.From.Offset)
} }
break
} }
} }
} }
@ -969,6 +965,8 @@ loop:
} }
var Linkppc64 = obj.LinkArch{ var Linkppc64 = obj.LinkArch{
Dconv: Dconv,
Rconv: Rconv,
ByteOrder: binary.BigEndian, ByteOrder: binary.BigEndian,
Pconv: Pconv, Pconv: Pconv,
Name: "ppc64", Name: "ppc64",
@ -984,6 +982,8 @@ var Linkppc64 = obj.LinkArch{
} }
var Linkppc64le = obj.LinkArch{ var Linkppc64le = obj.LinkArch{
Dconv: Dconv,
Rconv: Rconv,
ByteOrder: binary.LittleEndian, ByteOrder: binary.LittleEndian,
Pconv: Pconv, Pconv: Pconv,
Name: "ppc64le", Name: "ppc64le",

View file

@ -4,10 +4,6 @@
package ppc64 package ppc64
const (
fmtLong = 1 << iota
)
func bool2int(b bool) int { func bool2int(b bool) int {
if b { if b {
return 1 return 1

View file

@ -40,7 +40,8 @@ package obj
// TODO(rsc): Share Go definitions with linkers directly. // TODO(rsc): Share Go definitions with linkers directly.
const ( const (
StackSystem = 0 STACKSYSTEM = 0
StackSystem = STACKSYSTEM
StackBig = 4096 StackBig = 4096
StackGuard = 640 + StackSystem StackGuard = 640 + StackSystem
StackSmall = 128 StackSmall = 128

View file

@ -100,7 +100,7 @@ var headers = []struct {
}{"windowsgui", Hwindows}, }{"windowsgui", Hwindows},
} }
func headtype(name string) int { func Headtype(name string) int {
var i int var i int
for i = 0; i < len(headers); i++ { for i = 0; i < len(headers); i++ {
@ -111,8 +111,9 @@ func headtype(name string) int {
return -1 return -1
} }
var headstr_buf string
func Headstr(v int) string { func Headstr(v int) string {
var buf string
var i int var i int
for i = 0; i < len(headers); i++ { for i = 0; i < len(headers); i++ {
@ -120,8 +121,8 @@ func Headstr(v int) string {
return headers[i].name return headers[i].name
} }
} }
buf = fmt.Sprintf("%d", v) headstr_buf = fmt.Sprintf("%d", v)
return buf return headstr_buf
} }
func Linknew(arch *LinkArch) *Link { func Linknew(arch *LinkArch) *Link {
@ -145,7 +146,7 @@ func Linknew(arch *LinkArch) *Link {
ctxt.Pathname = buf ctxt.Pathname = buf
ctxt.Headtype = headtype(Getgoos()) ctxt.Headtype = Headtype(Getgoos())
if ctxt.Headtype < 0 { if ctxt.Headtype < 0 {
log.Fatalf("unknown goos %s", Getgoos()) log.Fatalf("unknown goos %s", Getgoos())
} }
@ -178,15 +179,14 @@ func Linknew(arch *LinkArch) *Link {
default: default:
log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name) log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name)
case '5':
ctxt.Tlsoffset = 0
case '6': case '6':
ctxt.Tlsoffset = 0 ctxt.Tlsoffset = 0
case '8': case '8':
ctxt.Tlsoffset = -8 ctxt.Tlsoffset = -8
case '5':
ctxt.Tlsoffset = 0
break
} }
/* /*
@ -203,10 +203,10 @@ func Linknew(arch *LinkArch) *Link {
case '8': case '8':
ctxt.Tlsoffset = 0x468 ctxt.Tlsoffset = 0x468
break
}
break case '5':
ctxt.Tlsoffset = 0 // dummy value, not needed
}
} }
// On arm, record goarm. // On arm, record goarm.
@ -222,7 +222,7 @@ func Linknew(arch *LinkArch) *Link {
return ctxt return ctxt
} }
func linknewsym(ctxt *Link, symb string, v int) *LSym { func Linknewsym(ctxt *Link, symb string, v int) *LSym {
var s *LSym var s *LSym
s = new(LSym) s = new(LSym)
@ -265,7 +265,7 @@ func _lookup(ctxt *Link, symb string, v int, creat int) *LSym {
return nil return nil
} }
s = linknewsym(ctxt, symb, v) s = Linknewsym(ctxt, symb, v)
s.Extname = s.Name s.Extname = s.Name
s.Hash = ctxt.Hash[h] s.Hash = ctxt.Hash[h]
ctxt.Hash[h] = s ctxt.Hash[h] = s
@ -278,6 +278,13 @@ func Linklookup(ctxt *Link, name string, v int) *LSym {
} }
// read-only lookup // read-only lookup
func linkrlookup(ctxt *Link, name string, v int) *LSym { func Linkrlookup(ctxt *Link, name string, v int) *LSym {
return _lookup(ctxt, name, v, 0) return _lookup(ctxt, name, v, 0)
} }
func Linksymfmt(s *LSym) string {
if s == nil {
return "<nil>"
}
return s.Name
}

View file

@ -0,0 +1,45 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package obj
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Must match runtime and reflect.
// Included by cmd/gc.
const (
KindBool = 1 + iota
KindInt
KindInt8
KindInt16
KindInt32
KindInt64
KindUint
KindUint8
KindUint16
KindUint32
KindUint64
KindUintptr
KindFloat32
KindFloat64
KindComplex64
KindComplex128
KindArray
KindChan
KindFunc
KindInterface
KindMap
KindPtr
KindSlice
KindString
KindStruct
KindUnsafePointer
KindDirectIface = 1 << 5
KindGCProg = 1 << 6
KindNoPointers = 1 << 7
KindMask = (1 << 5) - 1
)

View file

@ -8,6 +8,7 @@ import (
"bufio" "bufio"
"fmt" "fmt"
"io" "io"
"log"
"os" "os"
"strconv" "strconv"
"time" "time"
@ -23,21 +24,30 @@ func Cputime() float64 {
} }
type Biobuf struct { type Biobuf struct {
unget int unget [2]int
haveUnget bool numUnget int
f *os.File f *os.File
r *bufio.Reader r *bufio.Reader
w *bufio.Writer w *bufio.Writer
linelen int
} }
func Bopenw(name string) (*Biobuf, error) { func Bopenw(name string) (*Biobuf, error) {
f, err := os.Open(name) f, err := os.Create(name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &Biobuf{f: f, w: bufio.NewWriter(f)}, nil return &Biobuf{f: f, w: bufio.NewWriter(f)}, nil
} }
func Bopenr(name string) (*Biobuf, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
return &Biobuf{f: f, r: bufio.NewReader(f)}, nil
}
func Binitw(w io.Writer) *Biobuf { func Binitw(w io.Writer) *Biobuf {
return &Biobuf{w: bufio.NewWriter(w)} return &Biobuf{w: bufio.NewWriter(w)}
} }
@ -46,6 +56,41 @@ func (b *Biobuf) Write(p []byte) (int, error) {
return b.w.Write(p) return b.w.Write(p)
} }
func Bwritestring(b *Biobuf, p string) (int, error) {
return b.w.WriteString(p)
}
func Bseek(b *Biobuf, offset int64, whence int) int64 {
if b.w != nil {
if err := b.w.Flush(); err != nil {
log.Fatal("writing output: %v", err)
}
} else if b.r != nil {
if whence == 1 {
offset -= int64(b.r.Buffered())
}
}
off, err := b.f.Seek(offset, whence)
if err != nil {
log.Fatal("seeking in output: %v", err)
}
if b.r != nil {
b.r.Reset(b.f)
}
return off
}
func Boffset(b *Biobuf) int64 {
if err := b.w.Flush(); err != nil {
log.Fatal("writing output: %v", err)
}
off, err := b.f.Seek(0, 1)
if err != nil {
log.Fatal("seeking in output: %v", err)
}
return off
}
func (b *Biobuf) Flush() error { func (b *Biobuf) Flush() error {
return b.w.Flush() return b.w.Flush()
} }
@ -58,26 +103,86 @@ func Bputc(b *Biobuf, c byte) {
b.w.WriteByte(c) b.w.WriteByte(c)
} }
const Beof = -1
func Bread(b *Biobuf, p []byte) int {
n, err := io.ReadFull(b.r, p)
if n == 0 {
if err != nil && err != io.EOF {
n = -1
}
}
return n
}
func Bgetc(b *Biobuf) int { func Bgetc(b *Biobuf) int {
if b.haveUnget { if b.numUnget > 0 {
b.haveUnget = false b.numUnget--
return int(b.unget) return int(b.unget[b.numUnget])
} }
c, err := b.r.ReadByte() c, err := b.r.ReadByte()
r := int(c)
if err != nil {
r = -1
}
b.unget[1] = b.unget[0]
b.unget[0] = r
return r
}
func Bgetrune(b *Biobuf) int {
r, _, err := b.r.ReadRune()
if err != nil { if err != nil {
b.unget = -1
return -1 return -1
} }
b.unget = int(c) return int(r)
return int(c) }
func Bungetrune(b *Biobuf) {
b.r.UnreadRune()
}
func (b *Biobuf) Read(p []byte) (int, error) {
return b.r.Read(p)
}
func Brdline(b *Biobuf, delim int) string {
s, err := b.r.ReadBytes(byte(delim))
if err != nil {
log.Fatalf("reading input: %v", err)
}
b.linelen = len(s)
return string(s)
}
func Brdstr(b *Biobuf, delim int, cut int) string {
s, err := b.r.ReadString(byte(delim))
if err != nil {
log.Fatalf("reading input: %v", err)
}
if len(s) > 0 && cut > 0 {
s = s[:len(s)-1]
}
return s
}
func Access(name string, mode int) int {
if mode != 0 {
panic("bad access")
}
_, err := os.Stat(name)
if err != nil {
return -1
}
return 0
}
func Blinelen(b *Biobuf) int {
return b.linelen
} }
func Bungetc(b *Biobuf) { func Bungetc(b *Biobuf) {
b.haveUnget = true b.numUnget++
}
func Boffset(b *Biobuf) int64 {
panic("Boffset")
} }
func Bflush(b *Biobuf) error { func Bflush(b *Biobuf) error {
@ -85,7 +190,10 @@ func Bflush(b *Biobuf) error {
} }
func Bterm(b *Biobuf) error { func Bterm(b *Biobuf) error {
err := b.w.Flush() var err error
if b.w != nil {
err = b.w.Flush()
}
err1 := b.f.Close() err1 := b.f.Close()
if err == nil { if err == nil {
err = err1 err = err1
@ -116,6 +224,10 @@ func Getgoarm() string {
return envOr("GOARM", defaultGOARM) return envOr("GOARM", defaultGOARM)
} }
func Getgo386() string {
return envOr("GO386", defaultGO386)
}
func Getgoversion() string { func Getgoversion() string {
return version return version
} }
@ -145,3 +257,15 @@ func (ctxt *Link) NewProg() *Prog {
func (ctxt *Link) Line(n int) string { func (ctxt *Link) Line(n int) string {
return Linklinefmt(ctxt, n, false, false) return Linklinefmt(ctxt, n, false, false)
} }
func (ctxt *Link) Dconv(a *Addr) string {
return ctxt.Arch.Dconv(nil, 0, a)
}
func (ctxt *Link) Rconv(reg int) string {
return ctxt.Arch.Rconv(reg)
}
func Getcallerpc(interface{}) uintptr {
return 1
}

View file

@ -789,6 +789,7 @@ const (
FREGRET = REG_X0 FREGRET = REG_X0
REGSP = REG_SP REGSP = REG_SP
REGTMP = REG_DI REGTMP = REG_DI
REGCTXT = REG_DX
REGEXT = REG_R15 REGEXT = REG_R15
FREGMIN = REG_X0 + 5 FREGMIN = REG_X0 + 5
FREGEXT = REG_X0 + 15 FREGEXT = REG_X0 + 15

View file

@ -2864,7 +2864,6 @@ func asmidx(ctxt *obj.Link, scale int, index int, base int) {
REG_SI, REG_SI,
REG_DI: REG_DI:
i = reg[index] << 3 i = reg[index] << 3
break
} }
switch scale { switch scale {
@ -2882,7 +2881,6 @@ func asmidx(ctxt *obj.Link, scale int, index int, base int) {
case 8: case 8:
i |= 3 << 6 i |= 3 << 6
break
} }
bas: bas:
@ -2915,7 +2913,6 @@ bas:
REG_SI, REG_SI,
REG_DI: REG_DI:
i |= reg[base] i |= reg[base]
break
} }
ctxt.Andptr[0] = byte(i) ctxt.Andptr[0] = byte(i)
@ -3086,7 +3083,6 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
case obj.NAME_AUTO, case obj.NAME_AUTO,
obj.NAME_PARAM: obj.NAME_PARAM:
base = REG_SP base = REG_SP
break
} }
ctxt.Rexflag |= regrex[int(a.Index)]&Rxx | regrex[base]&Rxb | rex ctxt.Rexflag |= regrex[int(a.Index)]&Rxx | regrex[base]&Rxb | rex
@ -3132,7 +3128,6 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
case obj.NAME_AUTO, case obj.NAME_AUTO,
obj.NAME_PARAM: obj.NAME_PARAM:
base = REG_SP base = REG_SP
break
} }
if base == REG_TLS { if base == REG_TLS {
@ -3443,7 +3438,6 @@ func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
ctxt.Andptr[0] = Pm ctxt.Andptr[0] = Pm
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
} }
break
} }
ctxt.Andptr[0] = byte(op) ctxt.Andptr[0] = byte(op)
@ -3569,7 +3563,6 @@ found:
if p.Mode != 64 { if p.Mode != 64 {
ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p) ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
} }
break
} }
if z >= len(o.op) { if z >= len(o.op) {
@ -4075,8 +4068,6 @@ found:
} }
} }
} }
break
} }
return return
@ -4231,7 +4222,6 @@ mfound:
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
ctxt.Andptr[0] = 0xb5 ctxt.Andptr[0] = 0xb5
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
break
} }
asmand(ctxt, p, &p.From, &p.To) asmand(ctxt, p, &p.From, &p.To)
@ -4274,7 +4264,6 @@ mfound:
ctxt.Andptr[0] = t[1] ctxt.Andptr[0] = t[1]
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0) asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
break
} }
} }
@ -4335,10 +4324,7 @@ mfound:
ctxt.Andptr[0] = 0x8B ctxt.Andptr[0] = 0x8B
ctxt.Andptr = ctxt.Andptr[1:] ctxt.Andptr = ctxt.Andptr[1:]
asmand(ctxt, p, &pp.From, &p.To) asmand(ctxt, p, &pp.From, &p.To)
break
} }
break
} }
} }
@ -4512,7 +4498,6 @@ func asmins(ctxt *obj.Link, p *obj.Prog) {
AMOVSQ: AMOVSQ:
copy(ctxt.Andptr, naclmovs) copy(ctxt.Andptr, naclmovs)
ctxt.Andptr = ctxt.Andptr[len(naclmovs):] ctxt.Andptr = ctxt.Andptr[len(naclmovs):]
break
} }
if ctxt.Rep != 0 { if ctxt.Rep != 0 {
@ -4585,7 +4570,6 @@ func asmins(ctxt *obj.Link, p *obj.Prog) {
case REG_BP: case REG_BP:
copy(ctxt.Andptr, naclbpfix) copy(ctxt.Andptr, naclbpfix)
ctxt.Andptr = ctxt.Andptr[len(naclbpfix):] ctxt.Andptr = ctxt.Andptr[len(naclbpfix):]
break
} }
} }
} }

View file

@ -71,13 +71,13 @@ func Pconv(p *obj.Prog) string {
default: default:
str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To)) str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
// TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as // TODO(rsc): This special case is for SHRQ $32, AX:DX, which encodes as
// SHRQ $32(DX*0), AX // SHRQ $32(DX*0), AX
// Remove. // Remove.
if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != 0 { if (p.From.Type == obj.TYPE_REG || p.From.Type == obj.TYPE_CONST) && p.From.Index != REG_NONE {
str += fmt.Sprintf(":%s", Rconv(int(p.From.Index))) str += fmt.Sprintf(":%v", Rconv(int(p.From.Index)))
} }
break
} }
fp += str fp += str
@ -157,7 +157,6 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
} else { } else {
str = fmt.Sprintf("%d(FP)", a.Offset) str = fmt.Sprintf("%d(FP)", a.Offset)
} }
break
} }
if a.Index != REG_NONE { if a.Index != REG_NONE {
@ -179,13 +178,12 @@ func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
str = fmt.Sprintf("$(%.17g)", a.U.Dval) str = fmt.Sprintf("$(%.17g)", a.U.Dval)
case obj.TYPE_SCONST: case obj.TYPE_SCONST:
str = fmt.Sprintf("$\"%q\"", a.U.Sval) str = fmt.Sprintf("$%q", a.U.Sval)
case obj.TYPE_ADDR: case obj.TYPE_ADDR:
a.Type = obj.TYPE_MEM a.Type = obj.TYPE_MEM
str = fmt.Sprintf("$%v", Dconv(p, 0, a)) str = fmt.Sprintf("$%v", Dconv(p, 0, a))
a.Type = obj.TYPE_ADDR a.Type = obj.TYPE_ADDR
break
} }
fp += str fp += str

View file

@ -38,16 +38,6 @@ import (
"math" "math"
) )
func nopout(p *obj.Prog) {
p.As = obj.ANOP
p.From.Type = obj.TYPE_NONE
p.From.Reg = 0
p.From.Name = 0
p.To.Type = obj.TYPE_NONE
p.To.Reg = 0
p.To.Name = 0
}
func canuselocaltls(ctxt *obj.Link) int { func canuselocaltls(ctxt *obj.Link) int {
switch ctxt.Headtype { switch ctxt.Headtype {
case obj.Hplan9, case obj.Hplan9,
@ -109,7 +99,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// guarantee we are producing byte-identical binaries as before this code. // guarantee we are producing byte-identical binaries as before this code.
// But it should be unnecessary. // But it should be unnecessary.
if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris { if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris {
nopout(p) obj.Nopout(p)
} }
if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 { if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 {
p.From.Reg = REG_TLS p.From.Reg = REG_TLS
@ -175,12 +165,10 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
32, 32,
64: 64:
ctxt.Mode = int(p.From.Offset) ctxt.Mode = int(p.From.Offset)
break
} }
} }
nopout(p) obj.Nopout(p)
break
} }
// Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH. // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH.
@ -191,7 +179,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
p.To.Type = obj.TYPE_BRANCH p.To.Type = obj.TYPE_BRANCH
} }
break
} }
// Rewrite float constants to values stored in memory. // Rewrite float constants to values stored in memory.
@ -290,8 +277,6 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
p.From.Sym = s p.From.Sym = s
p.From.Offset = 0 p.From.Offset = 0
} }
break
} }
} }
@ -325,7 +310,6 @@ func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
a.Scale = 1 a.Scale = 1
} }
a.Reg = REG_R15 a.Reg = REG_R15
break
} }
} }
} }
@ -1063,7 +1047,6 @@ loop:
q.To.Type = obj.TYPE_BRANCH q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc q.To.Offset = p.Pc
q.Pcond = p q.Pcond = p
q.Ctxt = p.Ctxt
p = q p = q
} }
@ -1128,6 +1111,8 @@ loop:
} }
var Linkamd64 = obj.LinkArch{ var Linkamd64 = obj.LinkArch{
Dconv: Dconv,
Rconv: Rconv,
ByteOrder: binary.LittleEndian, ByteOrder: binary.LittleEndian,
Pconv: Pconv, Pconv: Pconv,
Name: "amd64", Name: "amd64",
@ -1143,6 +1128,8 @@ var Linkamd64 = obj.LinkArch{
} }
var Linkamd64p32 = obj.LinkArch{ var Linkamd64p32 = obj.LinkArch{
Dconv: Dconv,
Rconv: Rconv,
ByteOrder: binary.LittleEndian, ByteOrder: binary.LittleEndian,
Pconv: Pconv, Pconv: Pconv,
Name: "amd64p32", Name: "amd64p32",

View file

@ -4,10 +4,6 @@
package x86 package x86
const (
fmtLong = 1 << iota
)
func bool2int(b bool) int { func bool2int(b bool) int {
if b { if b {
return 1 return 1

2004
src/cmd/new5g/cgen.go Normal file

File diff suppressed because it is too large Load diff

836
src/cmd/new5g/cgen64.go Normal file
View file

@ -0,0 +1,836 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
import "cmd/internal/gc"
/*
* attempt to generate 64-bit
* res = n
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
var t1 gc.Node
var t2 gc.Node
var l *gc.Node
var r *gc.Node
var lo1 gc.Node
var lo2 gc.Node
var hi1 gc.Node
var hi2 gc.Node
var al gc.Node
var ah gc.Node
var bl gc.Node
var bh gc.Node
var cl gc.Node
var ch gc.Node
var s gc.Node
var n1 gc.Node
var creg gc.Node
var p1 *obj.Prog
var p2 *obj.Prog
var p3 *obj.Prog
var p4 *obj.Prog
var p5 *obj.Prog
var p6 *obj.Prog
var v uint64
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
l = n.Left
if !(l.Addable != 0) {
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
l = &t1
}
split64(l, &lo1, &hi1)
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
fallthrough
case gc.OMINUS:
split64(res, &lo2, &hi2)
regalloc(&t1, lo1.Type, nil)
regalloc(&al, lo1.Type, nil)
regalloc(&ah, hi1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gmove(ncon(0), &t1)
p1 = gins(arm.ASUB, &al, &t1)
p1.Scond |= arm.C_SBIT
gins(arm.AMOVW, &t1, &lo2)
gmove(ncon(0), &t1)
gins(arm.ASBC, &ah, &t1)
gins(arm.AMOVW, &t1, &hi2)
regfree(&t1)
regfree(&al)
regfree(&ah)
splitclean()
splitclean()
return
case gc.OCOM:
regalloc(&t1, lo1.Type, nil)
gmove(ncon(^uint32(0)), &t1)
split64(res, &lo2, &hi2)
regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &lo2)
gins(arm.AMOVW, &hi1, &n1)
gins(arm.AEOR, &t1, &n1)
gins(arm.AMOVW, &n1, &hi2)
regfree(&t1)
regfree(&n1)
splitclean()
splitclean()
return
// binary operators.
// common setup below.
case gc.OADD,
gc.OSUB,
gc.OMUL,
gc.OLSH,
gc.ORSH,
gc.OAND,
gc.OOR,
gc.OXOR,
gc.OLROT:
break
}
// setup for binary operators
r = n.Right
if r != nil && !(r.Addable != 0) {
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
r = &t2
}
if gc.Is64(r.Type) != 0 {
split64(r, &lo2, &hi2)
}
regalloc(&al, lo1.Type, nil)
regalloc(&ah, hi1.Type, nil)
// Do op. Leave result in ah:al.
switch n.Op {
default:
gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0))
fallthrough
// TODO: Constants
case gc.OADD:
regalloc(&bl, gc.Types[gc.TPTR32], nil)
regalloc(&bh, gc.Types[gc.TPTR32], nil)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi2, &bh)
gins(arm.AMOVW, &lo2, &bl)
p1 = gins(arm.AADD, &bl, &al)
p1.Scond |= arm.C_SBIT
gins(arm.AADC, &bh, &ah)
regfree(&bl)
regfree(&bh)
// TODO: Constants.
case gc.OSUB:
regalloc(&bl, gc.Types[gc.TPTR32], nil)
regalloc(&bh, gc.Types[gc.TPTR32], nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo2, &bl)
gins(arm.AMOVW, &hi2, &bh)
p1 = gins(arm.ASUB, &bl, &al)
p1.Scond |= arm.C_SBIT
gins(arm.ASBC, &bh, &ah)
regfree(&bl)
regfree(&bh)
// TODO(kaib): this can be done with 4 regs and does not need 6
case gc.OMUL:
regalloc(&bl, gc.Types[gc.TPTR32], nil)
regalloc(&bh, gc.Types[gc.TPTR32], nil)
regalloc(&cl, gc.Types[gc.TPTR32], nil)
regalloc(&ch, gc.Types[gc.TPTR32], nil)
// load args into bh:bl and bh:bl.
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
gins(arm.AMOVW, &hi2, &ch)
gins(arm.AMOVW, &lo2, &cl)
// bl * cl -> ah al
p1 = gins(arm.AMULLU, nil, nil)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = bl.Val.U.Reg
p1.Reg = cl.Val.U.Reg
p1.To.Type = obj.TYPE_REGREG
p1.To.Reg = ah.Val.U.Reg
p1.To.Offset = int64(al.Val.U.Reg)
//print("%P\n", p1);
// bl * ch + ah -> ah
p1 = gins(arm.AMULA, nil, nil)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = bl.Val.U.Reg
p1.Reg = ch.Val.U.Reg
p1.To.Type = obj.TYPE_REGREG2
p1.To.Reg = ah.Val.U.Reg
p1.To.Offset = int64(ah.Val.U.Reg)
//print("%P\n", p1);
// bh * cl + ah -> ah
p1 = gins(arm.AMULA, nil, nil)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = bh.Val.U.Reg
p1.Reg = cl.Val.U.Reg
p1.To.Type = obj.TYPE_REGREG2
p1.To.Reg = ah.Val.U.Reg
p1.To.Offset = int64(ah.Val.U.Reg)
//print("%P\n", p1);
regfree(&bh)
regfree(&bl)
regfree(&ch)
regfree(&cl)
// We only rotate by a constant c in [0,64).
// if c >= 32:
// lo, hi = hi, lo
// c -= 32
// if c == 0:
// no-op
// else:
// t = hi
// shld hi:lo, c
// shld lo:t, c
case gc.OLROT:
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
regalloc(&bl, lo1.Type, nil)
regalloc(&bh, hi1.Type, nil)
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
v -= 32
gins(arm.AMOVW, &hi1, &bl)
gins(arm.AMOVW, &lo1, &bh)
} else {
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
}
if v == 0 {
gins(arm.AMOVW, &bh, &ah)
gins(arm.AMOVW, &bl, &al)
} else {
// rotate by 1 <= v <= 31
// MOVW bl<<v, al
// MOVW bh<<v, ah
// OR bl>>(32-v), ah
// OR bh>>(32-v), al
gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
gshift(arm.AORR, &bh, arm.SHIFT_LR, int32(32-v), &al)
}
regfree(&bl)
regfree(&bh)
case gc.OLSH:
regalloc(&bl, lo1.Type, nil)
regalloc(&bh, hi1.Type, nil)
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
if r.Op == gc.OLITERAL {
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
// TODO(kaib): replace with gins(AMOVW, nodintconst(0), &al)
// here and below (verify it optimizes to EOR)
gins(arm.AEOR, &al, &al)
gins(arm.AEOR, &ah, &ah)
} else if v > 32 {
gins(arm.AEOR, &al, &al)
// MOVW bl<<(v-32), ah
gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v-32), &ah)
} else if v == 32 {
gins(arm.AEOR, &al, &al)
gins(arm.AMOVW, &bl, &ah)
} else if v > 0 {
// MOVW bl<<v, al
gshift(arm.AMOVW, &bl, arm.SHIFT_LL, int32(v), &al)
// MOVW bh<<v, ah
gshift(arm.AMOVW, &bh, arm.SHIFT_LL, int32(v), &ah)
// OR bl>>(32-v), ah
gshift(arm.AORR, &bl, arm.SHIFT_LR, int32(32-v), &ah)
} else {
gins(arm.AMOVW, &bl, &al)
gins(arm.AMOVW, &bh, &ah)
}
goto olsh_break
}
regalloc(&s, gc.Types[gc.TUINT32], nil)
regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) != 0 {
// shift is >= 1<<32
split64(r, &cl, &ch)
gmove(&ch, &s)
gins(arm.ATST, &s, nil)
p6 = gc.Gbranch(arm.ABNE, nil, 0)
gmove(&cl, &s)
splitclean()
} else {
gmove(r, &s)
p6 = nil
}
gins(arm.ATST, &s, nil)
// shift == 0
p1 = gins(arm.AMOVW, &bl, &al)
p1.Scond = arm.C_SCOND_EQ
p1 = gins(arm.AMOVW, &bh, &ah)
p1.Scond = arm.C_SCOND_EQ
p2 = gc.Gbranch(arm.ABEQ, nil, 0)
// shift is < 32
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
gmove(&n1, &creg)
gcmp(arm.ACMP, &s, &creg)
// MOVW.LO bl<<s, al
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &al)
p1.Scond = arm.C_SCOND_LO
// MOVW.LO bh<<s, ah
p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LL, &s, &ah)
p1.Scond = arm.C_SCOND_LO
// SUB.LO s, creg
p1 = gins(arm.ASUB, &s, &creg)
p1.Scond = arm.C_SCOND_LO
// OR.LO bl>>creg, ah
p1 = gregshift(arm.AORR, &bl, arm.SHIFT_LR, &creg, &ah)
p1.Scond = arm.C_SCOND_LO
// BLO end
p3 = gc.Gbranch(arm.ABLO, nil, 0)
// shift == 32
p1 = gins(arm.AEOR, &al, &al)
p1.Scond = arm.C_SCOND_EQ
p1 = gins(arm.AMOVW, &bl, &ah)
p1.Scond = arm.C_SCOND_EQ
p4 = gc.Gbranch(arm.ABEQ, nil, 0)
// shift is < 64
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
gmove(&n1, &creg)
gcmp(arm.ACMP, &s, &creg)
// EOR.LO al, al
p1 = gins(arm.AEOR, &al, &al)
p1.Scond = arm.C_SCOND_LO
// MOVW.LO creg>>1, creg
p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
p1.Scond = arm.C_SCOND_LO
// SUB.LO creg, s
p1 = gins(arm.ASUB, &creg, &s)
p1.Scond = arm.C_SCOND_LO
// MOVW bl<<s, ah
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LL, &s, &ah)
p1.Scond = arm.C_SCOND_LO
p5 = gc.Gbranch(arm.ABLO, nil, 0)
// shift >= 64
if p6 != nil {
gc.Patch(p6, gc.Pc)
}
gins(arm.AEOR, &al, &al)
gins(arm.AEOR, &ah, &ah)
gc.Patch(p2, gc.Pc)
gc.Patch(p3, gc.Pc)
gc.Patch(p4, gc.Pc)
gc.Patch(p5, gc.Pc)
regfree(&s)
regfree(&creg)
olsh_break:
regfree(&bl)
regfree(&bh)
case gc.ORSH:
regalloc(&bl, lo1.Type, nil)
regalloc(&bh, hi1.Type, nil)
gins(arm.AMOVW, &hi1, &bh)
gins(arm.AMOVW, &lo1, &bl)
if r.Op == gc.OLITERAL {
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->31, al
gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
// MOVW bh->31, ah
gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
} else {
gins(arm.AEOR, &al, &al)
gins(arm.AEOR, &ah, &ah)
}
} else if v > 32 {
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->(v-32), al
gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v-32), &al)
// MOVW bh->31, ah
gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
} else {
// MOVW bh>>(v-32), al
gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v-32), &al)
gins(arm.AEOR, &ah, &ah)
}
} else if v == 32 {
gins(arm.AMOVW, &bh, &al)
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->31, ah
gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
} else {
gins(arm.AEOR, &ah, &ah)
}
} else if v > 0 {
// MOVW bl>>v, al
gshift(arm.AMOVW, &bl, arm.SHIFT_LR, int32(v), &al)
// OR bh<<(32-v), al
gshift(arm.AORR, &bh, arm.SHIFT_LL, int32(32-v), &al)
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->v, ah
gshift(arm.AMOVW, &bh, arm.SHIFT_AR, int32(v), &ah)
} else {
// MOVW bh>>v, ah
gshift(arm.AMOVW, &bh, arm.SHIFT_LR, int32(v), &ah)
}
} else {
gins(arm.AMOVW, &bl, &al)
gins(arm.AMOVW, &bh, &ah)
}
goto orsh_break
}
regalloc(&s, gc.Types[gc.TUINT32], nil)
regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) != 0 {
// shift is >= 1<<32
split64(r, &cl, &ch)
gmove(&ch, &s)
gins(arm.ATST, &s, nil)
if bh.Type.Etype == gc.TINT32 {
p1 = gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
} else {
p1 = gins(arm.AEOR, &ah, &ah)
}
p1.Scond = arm.C_SCOND_NE
p6 = gc.Gbranch(arm.ABNE, nil, 0)
gmove(&cl, &s)
splitclean()
} else {
gmove(r, &s)
p6 = nil
}
gins(arm.ATST, &s, nil)
// shift == 0
p1 = gins(arm.AMOVW, &bl, &al)
p1.Scond = arm.C_SCOND_EQ
p1 = gins(arm.AMOVW, &bh, &ah)
p1.Scond = arm.C_SCOND_EQ
p2 = gc.Gbranch(arm.ABEQ, nil, 0)
// check if shift is < 32
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 32)
gmove(&n1, &creg)
gcmp(arm.ACMP, &s, &creg)
// MOVW.LO bl>>s, al
p1 = gregshift(arm.AMOVW, &bl, arm.SHIFT_LR, &s, &al)
p1.Scond = arm.C_SCOND_LO
// SUB.LO s,creg
p1 = gins(arm.ASUB, &s, &creg)
p1.Scond = arm.C_SCOND_LO
// OR.LO bh<<(32-s), al
p1 = gregshift(arm.AORR, &bh, arm.SHIFT_LL, &creg, &al)
p1.Scond = arm.C_SCOND_LO
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->s, ah
p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &ah)
} else {
// MOVW bh>>s, ah
p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &ah)
}
p1.Scond = arm.C_SCOND_LO
// BLO end
p3 = gc.Gbranch(arm.ABLO, nil, 0)
// shift == 32
p1 = gins(arm.AMOVW, &bh, &al)
p1.Scond = arm.C_SCOND_EQ
if bh.Type.Etype == gc.TINT32 {
gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &ah)
} else {
gins(arm.AEOR, &ah, &ah)
}
p4 = gc.Gbranch(arm.ABEQ, nil, 0)
// check if shift is < 64
gc.Nodconst(&n1, gc.Types[gc.TUINT32], 64)
gmove(&n1, &creg)
gcmp(arm.ACMP, &s, &creg)
// MOVW.LO creg>>1, creg
p1 = gshift(arm.AMOVW, &creg, arm.SHIFT_LR, 1, &creg)
p1.Scond = arm.C_SCOND_LO
// SUB.LO creg, s
p1 = gins(arm.ASUB, &creg, &s)
p1.Scond = arm.C_SCOND_LO
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->(s-32), al
p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_AR, &s, &al)
p1.Scond = arm.C_SCOND_LO
} else {
// MOVW bh>>(v-32), al
p1 = gregshift(arm.AMOVW, &bh, arm.SHIFT_LR, &s, &al)
p1.Scond = arm.C_SCOND_LO
}
// BLO end
p5 = gc.Gbranch(arm.ABLO, nil, 0)
// s >= 64
if p6 != nil {
gc.Patch(p6, gc.Pc)
}
if bh.Type.Etype == gc.TINT32 {
// MOVW bh->31, al
gshift(arm.AMOVW, &bh, arm.SHIFT_AR, 31, &al)
} else {
gins(arm.AEOR, &al, &al)
}
gc.Patch(p2, gc.Pc)
gc.Patch(p3, gc.Pc)
gc.Patch(p4, gc.Pc)
gc.Patch(p5, gc.Pc)
regfree(&s)
regfree(&creg)
orsh_break:
regfree(&bl)
regfree(&bh)
// TODO(kaib): literal optimizations
// make constant the right side (it usually is anyway).
// if(lo1.op == OLITERAL) {
// nswap(&lo1, &lo2);
// nswap(&hi1, &hi2);
// }
// if(lo2.op == OLITERAL) {
// // special cases for constants.
// lv = mpgetfix(lo2.val.u.xval);
// hv = mpgetfix(hi2.val.u.xval);
// splitclean(); // right side
// split64(res, &lo2, &hi2);
// switch(n->op) {
// case OXOR:
// gmove(&lo1, &lo2);
// gmove(&hi1, &hi2);
// switch(lv) {
// case 0:
// break;
// case 0xffffffffu:
// gins(ANOTL, N, &lo2);
// break;
// default:
// gins(AXORL, ncon(lv), &lo2);
// break;
// }
// switch(hv) {
// case 0:
// break;
// case 0xffffffffu:
// gins(ANOTL, N, &hi2);
// break;
// default:
// gins(AXORL, ncon(hv), &hi2);
// break;
// }
// break;
// case OAND:
// switch(lv) {
// case 0:
// gins(AMOVL, ncon(0), &lo2);
// break;
// default:
// gmove(&lo1, &lo2);
// if(lv != 0xffffffffu)
// gins(AANDL, ncon(lv), &lo2);
// break;
// }
// switch(hv) {
// case 0:
// gins(AMOVL, ncon(0), &hi2);
// break;
// default:
// gmove(&hi1, &hi2);
// if(hv != 0xffffffffu)
// gins(AANDL, ncon(hv), &hi2);
// break;
// }
// break;
// case OOR:
// switch(lv) {
// case 0:
// gmove(&lo1, &lo2);
// break;
// case 0xffffffffu:
// gins(AMOVL, ncon(0xffffffffu), &lo2);
// break;
// default:
// gmove(&lo1, &lo2);
// gins(AORL, ncon(lv), &lo2);
// break;
// }
// switch(hv) {
// case 0:
// gmove(&hi1, &hi2);
// break;
// case 0xffffffffu:
// gins(AMOVL, ncon(0xffffffffu), &hi2);
// break;
// default:
// gmove(&hi1, &hi2);
// gins(AORL, ncon(hv), &hi2);
// break;
// }
// break;
// }
// splitclean();
// splitclean();
// goto out;
// }
case gc.OXOR,
gc.OAND,
gc.OOR:
regalloc(&n1, lo1.Type, nil)
gins(arm.AMOVW, &lo1, &al)
gins(arm.AMOVW, &hi1, &ah)
gins(arm.AMOVW, &lo2, &n1)
gins(optoas(int(n.Op), lo1.Type), &n1, &al)
gins(arm.AMOVW, &hi2, &n1)
gins(optoas(int(n.Op), lo1.Type), &n1, &ah)
regfree(&n1)
}
if gc.Is64(r.Type) != 0 {
splitclean()
}
splitclean()
split64(res, &lo1, &hi1)
gins(arm.AMOVW, &al, &lo1)
gins(arm.AMOVW, &ah, &hi1)
splitclean()
//out:
regfree(&al)
regfree(&ah)
}
/*
* generate comparison of nl, nr, both 64-bit.
* nl is memory; nr is constant or memory.
*/
func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
var lo1 gc.Node
var hi1 gc.Node
var lo2 gc.Node
var hi2 gc.Node
var r1 gc.Node
var r2 gc.Node
var br *obj.Prog
var t *gc.Type
split64(nl, &lo1, &hi1)
split64(nr, &lo2, &hi2)
// compare most significant word;
// if they differ, we're done.
t = hi1.Type
regalloc(&r1, gc.Types[gc.TINT32], nil)
regalloc(&r2, gc.Types[gc.TINT32], nil)
gins(arm.AMOVW, &hi1, &r1)
gins(arm.AMOVW, &hi2, &r2)
gcmp(arm.ACMP, &r1, &r2)
regfree(&r1)
regfree(&r2)
br = nil
switch op {
default:
gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
fallthrough
// cmp hi
// bne L
// cmp lo
// beq to
// L:
case gc.OEQ:
br = gc.Gbranch(arm.ABNE, nil, -likely)
// cmp hi
// bne to
// cmp lo
// bne to
case gc.ONE:
gc.Patch(gc.Gbranch(arm.ABNE, nil, likely), to)
// cmp hi
// bgt to
// blt L
// cmp lo
// bge to (or bgt to)
// L:
case gc.OGE,
gc.OGT:
gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
// cmp hi
// blt to
// bgt L
// cmp lo
// ble to (or jlt to)
// L:
case gc.OLE,
gc.OLT:
gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
}
// compare least significant word
t = lo1.Type
regalloc(&r1, gc.Types[gc.TINT32], nil)
regalloc(&r2, gc.Types[gc.TINT32], nil)
gins(arm.AMOVW, &lo1, &r1)
gins(arm.AMOVW, &lo2, &r2)
gcmp(arm.ACMP, &r1, &r2)
regfree(&r1)
regfree(&r2)
// jump again
gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
// point first branch down here if appropriate
if br != nil {
gc.Patch(br, gc.Pc)
}
splitclean()
splitclean()
}

84
src/cmd/new5g/galign.go Normal file
View file

@ -0,0 +1,84 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
import "cmd/internal/gc"
var thechar int = '5'
var thestring string = "arm"
var thelinkarch *obj.LinkArch = &arm.Linkarm
func linkarchinit() {
}
var MAXWIDTH int64 = (1 << 32) - 1
/*
* go declares several platform-specific type aliases:
* int, uint, float, and uintptr
*/
var typedefs = []gc.Typedef{
gc.Typedef{"int", gc.TINT, gc.TINT32},
gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
}
func betypeinit() {
gc.Widthptr = 4
gc.Widthint = 4
gc.Widthreg = 4
}
func main() {
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = arm.REGSP
gc.Thearch.REGCTXT = arm.REGCTXT
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Bgen = bgen
gc.Thearch.Cgen = cgen
gc.Thearch.Cgen_call = cgen_call
gc.Thearch.Cgen_callinter = cgen_callinter
gc.Thearch.Cgen_ret = cgen_ret
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Gclean = gclean
gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
gc.Thearch.Ginscall = ginscall
gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
gc.Thearch.Regalloc = regalloc
gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = RtoB
gc.Thearch.BtoR = BtoR
gc.Thearch.BtoF = BtoF
gc.Thearch.Optoas = optoas
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
gc.Main()
}

32
src/cmd/new5g/gg.go Normal file
View file

@ -0,0 +1,32 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "cmd/internal/obj/arm"
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
const (
REGALLOC_R0 = arm.REG_R0
REGALLOC_RMAX = arm.REGEXT
REGALLOC_F0 = arm.REG_F0
REGALLOC_FMAX = arm.FREGEXT
)
var reg [REGALLOC_FMAX + 1]uint8
/*
* cgen
*/
/*
* list.c
*/
/*
* reg.c
*/

822
src/cmd/new5g/ggen.go Normal file
View file

@ -0,0 +1,822 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
import "cmd/internal/gc"
func defframe(ptxt *obj.Prog) {
var frame uint32
var r0 uint32
var p *obj.Prog
var hi int64
var lo int64
var l *gc.NodeList
var n *gc.Node
// fill in argument size, stack size
ptxt.To.Type = obj.TYPE_TEXTSIZE
ptxt.To.U.Argsize = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
frame = uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
ptxt.To.Offset = int64(frame)
// insert code to contain ambiguously live variables
// so that garbage collector only sees initialized values
// when it looks for pointers.
p = ptxt
hi = 0
lo = hi
r0 = 0
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if !(n.Needzero != 0) {
continue
}
if n.Class != gc.PAUTO {
gc.Fatal("needzero class %d", n.Class)
}
if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
gc.Fatal("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
}
if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) {
// merge with range we already have
lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr))
continue
}
// zero old range
p = zerorange(p, int64(frame), lo, hi, &r0)
// set new range
hi = n.Xoffset + n.Type.Width
lo = n.Xoffset
}
// zero final range
zerorange(p, int64(frame), lo, hi, &r0)
}
func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog {
var cnt int64
var i int64
var p1 *obj.Prog
var f *gc.Node
cnt = hi - lo
if cnt == 0 {
return p
}
if *r0 == 0 {
p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
if cnt < int64(4*gc.Widthptr) {
for i = 0; i < cnt; i += int64(gc.Widthptr) {
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i))
}
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
f = gc.Sysfunc("duffzero")
gc.Naddr(f, &p.To, 1)
gc.Afunclit(&p.To, f)
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
} else {
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 = p
p.Scond |= arm.C_PBIT
p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
}
return p
}
func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog {
var q *obj.Prog
q = gc.Ctxt.NewProg()
gc.Clearp(q)
q.As = int16(as)
q.Lineno = p.Lineno
q.From.Type = int16(ftype)
q.From.Reg = int16(freg)
q.From.Offset = int64(foffset)
q.To.Type = int16(ttype)
q.To.Reg = int16(treg)
q.To.Offset = int64(toffset)
q.Link = p.Link
p.Link = q
return q
}
/*
* generate:
* call f
* proc=-1 normal call but no return
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
* proc=3 normal call to C pointer (not Go func value)
*/
func ginscall(f *gc.Node, proc int) {
var p *obj.Prog
var r gc.Node
var r1 gc.Node
var con gc.Node
var extra int32
if f.Type != nil {
extra = 0
if proc == 1 || proc == 2 {
extra = 2 * int32(gc.Widthptr)
}
gc.Setmaxarg(f.Type, extra)
}
switch proc {
default:
gc.Fatal("ginscall: bad proc %d", proc)
case 0, // normal call
-1: // normal call but no return
if f.Op == gc.ONAME && f.Class == gc.PFUNC {
if f == gc.Deferreturn {
// Deferred calls will appear to be returning to
// the BL deferreturn(SB) that we are about to emit.
// However, the stack trace code will show the line
// of the instruction before that return PC.
// To avoid that instruction being an unrelated instruction,
// insert a NOP so that we will have the right line number.
// ARM NOP 0x00000000 is really AND.EQ R0, R0, R0.
// Use the latter form because the NOP pseudo-instruction
// would be removed by the linker.
gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0)
p = gins(arm.AAND, &r, &r)
p.Scond = arm.C_SCOND_EQ
}
p = gins(arm.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) != 0 {
gins(obj.AUNDEF, nil, nil)
}
break
}
gc.Nodreg(&r, gc.Types[gc.Tptr], arm.REG_R7)
gc.Nodreg(&r1, gc.Types[gc.Tptr], arm.REG_R1)
gmove(f, &r)
r.Op = gc.OINDREG
gmove(&r, &r1)
r.Op = gc.OREGISTER
r1.Op = gc.OINDREG
gins(arm.ABL, &r, &r1)
case 3: // normal call of c function pointer
gins(arm.ABL, nil, f)
case 1, // call in new proc (go)
2: // deferred call (defer)
regalloc(&r, gc.Types[gc.Tptr], nil)
gc.Nodconst(&con, gc.Types[gc.TINT32], int64(gc.Argsize(f.Type)))
gins(arm.AMOVW, &con, &r)
p = gins(arm.AMOVW, &r, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REGSP
p.To.Offset = 4
gins(arm.AMOVW, f, &r)
p = gins(arm.AMOVW, &r, nil)
p.To.Type = obj.TYPE_MEM
p.To.Reg = arm.REGSP
p.To.Offset = 8
regfree(&r)
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
ginscall(gc.Deferproc, 0)
}
if proc == 2 {
gc.Nodconst(&con, gc.Types[gc.TINT32], 0)
p = gins(arm.ACMP, &con, nil)
p.Reg = arm.REG_R0
p = gc.Gbranch(arm.ABEQ, nil, +1)
cgen_ret(nil)
gc.Patch(p, gc.Pc)
}
}
}
/*
* n is call to interface method.
* generate res = n.
*/
func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
var r int
var i *gc.Node
var f *gc.Node
var tmpi gc.Node
var nodo gc.Node
var nodr gc.Node
var nodsp gc.Node
var p *obj.Prog
i = n.Left
if i.Op != gc.ODOTINTER {
gc.Fatal("cgen_callinter: not ODOTINTER %v", gc.Oconv(int(i.Op), 0))
}
f = i.Right // field
if f.Op != gc.ONAME {
gc.Fatal("cgen_callinter: not ONAME %v", gc.Oconv(int(f.Op), 0))
}
i = i.Left // interface
// Release res register during genlist and cgen,
// which might have their own function calls.
r = -1
if res != nil && (res.Op == gc.OREGISTER || res.Op == gc.OINDREG) {
r = int(res.Val.U.Reg)
reg[r]--
}
if !(i.Addable != 0) {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
}
gc.Genlist(n.List) // args
if r >= 0 {
reg[r]++
}
regalloc(&nodr, gc.Types[gc.Tptr], res)
regalloc(&nodo, gc.Types[gc.Tptr], &nodr)
nodo.Op = gc.OINDREG
agen(i, &nodr) // REG = &inter
gc.Nodindreg(&nodsp, gc.Types[gc.Tptr], arm.REGSP)
nodsp.Xoffset = int64(gc.Widthptr)
if proc != 0 {
nodsp.Xoffset += 2 * int64(gc.Widthptr) // leave room for size & fn
}
nodo.Xoffset += int64(gc.Widthptr)
cgen(&nodo, &nodsp) // {4 or 12}(SP) = 4(REG) -- i.data
nodo.Xoffset -= int64(gc.Widthptr)
cgen(&nodo, &nodr) // REG = 0(REG) -- i.tab
gc.Cgen_checknil(&nodr) // in case offset is huge
nodo.Xoffset = n.Left.Xoffset + 3*int64(gc.Widthptr) + 8
if proc == 0 {
// plain call: use direct c function pointer - more efficient
cgen(&nodo, &nodr) // REG = 20+offset(REG) -- i.tab->fun[f]
nodr.Op = gc.OINDREG
proc = 3
} else {
// go/defer. generate go func value.
p = gins(arm.AMOVW, &nodo, &nodr)
p.From.Type = obj.TYPE_ADDR // REG = &(20+offset(REG)) -- i.tab->fun[f]
}
nodr.Type = n.Left.Type
ginscall(&nodr, proc)
regfree(&nodr)
regfree(&nodo)
}
/*
* generate function call;
* proc=0 normal call
* proc=1 goroutine run in new proc
* proc=2 defer call save away stack
*/
func cgen_call(n *gc.Node, proc int) {
var t *gc.Type
var nod gc.Node
var afun gc.Node
if n == nil {
return
}
if n.Left.Ullman >= gc.UINF {
// if name involves a fn call
// precompute the address of the fn
gc.Tempname(&afun, gc.Types[gc.Tptr])
cgen(n.Left, &afun)
}
gc.Genlist(n.List) // assign the args
t = n.Left.Type
// call tempname pointer
if n.Left.Ullman >= gc.UINF {
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, &afun)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
goto ret
}
// call pointer
if n.Left.Op != gc.ONAME || n.Left.Class != gc.PFUNC {
regalloc(&nod, gc.Types[gc.Tptr], nil)
gc.Cgen_as(&nod, n.Left)
nod.Type = t
ginscall(&nod, proc)
regfree(&nod)
goto ret
}
// call direct
n.Left.Method = 1
ginscall(n.Left, proc)
ret:
}
/*
* call to n has already been generated.
* generate:
* res = return value from call.
*/
func cgen_callret(n *gc.Node, res *gc.Node) {
var nod gc.Node
var fp *gc.Type
var t *gc.Type
var flist gc.Iter
t = n.Left.Type
if t.Etype == gc.TPTR32 || t.Etype == gc.TPTR64 {
t = t.Type
}
fp = gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_callret: nil")
}
nod = gc.Node{}
nod.Op = gc.OINDREG
nod.Val.U.Reg = arm.REGSP
nod.Addable = 1
nod.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
nod.Type = fp.Type
gc.Cgen_as(res, &nod)
}
/*
* call to n has already been generated.
* generate:
* res = &return value from call.
*/
func cgen_aret(n *gc.Node, res *gc.Node) {
var nod1 gc.Node
var nod2 gc.Node
var fp *gc.Type
var t *gc.Type
var flist gc.Iter
t = n.Left.Type
if gc.Isptr[t.Etype] != 0 {
t = t.Type
}
fp = gc.Structfirst(&flist, gc.Getoutarg(t))
if fp == nil {
gc.Fatal("cgen_aret: nil")
}
nod1 = gc.Node{}
nod1.Op = gc.OINDREG
nod1.Val.U.Reg = arm.REGSP
nod1.Addable = 1
nod1.Xoffset = fp.Width + 4 // +4: saved lr at 0(SP)
nod1.Type = fp.Type
if res.Op != gc.OREGISTER {
regalloc(&nod2, gc.Types[gc.Tptr], res)
agen(&nod1, &nod2)
gins(arm.AMOVW, &nod2, res)
regfree(&nod2)
} else {
agen(&nod1, res)
}
}
/*
* generate return.
* n->left is assignments to return values.
*/
func cgen_ret(n *gc.Node) {
var p *obj.Prog
if n != nil {
gc.Genlist(n.List) // copy out args
}
if gc.Hasdefer != 0 {
ginscall(gc.Deferreturn, 0)
}
gc.Genlist(gc.Curfn.Exit)
p = gins(obj.ARET, nil, nil)
if n != nil && n.Op == gc.ORETJMP {
p.To.Name = obj.NAME_EXTERN
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Linksym(n.Left.Sym)
}
}
/*
* generate high multiply
* res = (nl * nr) >> wordsize
*/
func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
var w int
var n1 gc.Node
var n2 gc.Node
var tmp *gc.Node
var t *gc.Type
var p *obj.Prog
if nl.Ullman < nr.Ullman {
tmp = nl
nl = nr
nr = tmp
}
t = nl.Type
w = int(t.Width * 8)
regalloc(&n1, t, res)
cgen(nl, &n1)
regalloc(&n2, t, nil)
cgen(nr, &n2)
switch gc.Simtype[t.Etype] {
case gc.TINT8,
gc.TINT16:
gins(optoas(gc.OMUL, t), &n2, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
case gc.TUINT8,
gc.TUINT16:
gins(optoas(gc.OMUL, t), &n2, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1)
// perform a long multiplication.
case gc.TINT32,
gc.TUINT32:
if gc.Issigned[t.Etype] != 0 {
p = gins(arm.AMULL, &n2, nil)
} else {
p = gins(arm.AMULLU, &n2, nil)
}
// n2 * n1 -> (n1 n2)
p.Reg = n1.Val.U.Reg
p.To.Type = obj.TYPE_REGREG
p.To.Reg = n1.Val.U.Reg
p.To.Offset = int64(n2.Val.U.Reg)
default:
gc.Fatal("cgen_hmul %v", gc.Tconv(t, 0))
}
cgen(&n1, res)
regfree(&n1)
regfree(&n2)
}
/*
* generate shift according to op, one of:
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
var nt gc.Node
var t gc.Node
var lo gc.Node
var hi gc.Node
var w int
var v int
var p1 *obj.Prog
var p2 *obj.Prog
var p3 *obj.Prog
var tr *gc.Type
var sc uint64
if nl.Type.Width > 4 {
gc.Fatal("cgen_shift %v", gc.Tconv(nl.Type, 0))
}
w = int(nl.Type.Width * 8)
if op == gc.OLROT {
v = int(gc.Mpgetfix(nr.Val.U.Xval))
regalloc(&n1, nl.Type, res)
if w == 32 {
cgen(nl, &n1)
gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1)
} else {
regalloc(&n2, nl.Type, nil)
cgen(nl, &n2)
gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1)
gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1)
regfree(&n2)
// Ensure sign/zero-extended result.
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
regfree(&n1)
return
}
if nr.Op == gc.OLITERAL {
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
sc = uint64(gc.Mpgetfix(nr.Val.U.Xval))
if sc == 0 {
} else // nothing to do
if sc >= uint64(nl.Type.Width*8) {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1)
} else {
gins(arm.AEOR, &n1, &n1)
}
} else {
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] != 0 {
gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1)
} else if op == gc.ORSH {
gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH
} else {
gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1)
}
}
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n1, &n1)
}
gmove(&n1, res)
regfree(&n1)
return
}
tr = nr.Type
if tr.Width > 4 {
gc.Tempname(&nt, nr.Type)
if nl.Ullman >= nr.Ullman {
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
cgen(nr, &nt)
n1 = nt
} else {
cgen(nr, &nt)
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
}
split64(&nt, &lo, &hi)
regalloc(&n1, gc.Types[gc.TUINT32], nil)
regalloc(&n3, gc.Types[gc.TUINT32], nil)
gmove(&lo, &n1)
gmove(&hi, &n3)
splitclean()
gins(arm.ATST, &n3, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
p1 = gins(arm.AMOVW, &t, &n1)
p1.Scond = arm.C_SCOND_NE
tr = gc.Types[gc.TUINT32]
regfree(&n3)
} else {
if nl.Ullman >= nr.Ullman {
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
regalloc(&n1, nr.Type, nil)
cgen(nr, &n1)
} else {
regalloc(&n1, nr.Type, nil)
cgen(nr, &n1)
regalloc(&n2, nl.Type, res)
cgen(nl, &n2)
}
}
// test for shift being 0
gins(arm.ATST, &n1, nil)
p3 = gc.Gbranch(arm.ABEQ, nil, -1)
// test and fix up large shifts
// TODO: if(!bounded), don't emit some of this.
regalloc(&n3, tr, nil)
gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w))
gmove(&t, &n3)
gcmp(arm.ACMP, &n1, &n3)
if op == gc.ORSH {
if gc.Issigned[nl.Type.Etype] != 0 {
p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2)
} else {
p1 = gins(arm.AEOR, &n2, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2)
}
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
} else {
p1 = gins(arm.AEOR, &n2, &n2)
p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2)
p1.Scond = arm.C_SCOND_HS
p2.Scond = arm.C_SCOND_LO
}
regfree(&n3)
gc.Patch(p3, gc.Pc)
// Left-shift of smaller word must be sign/zero-extended.
if w < 32 && op == gc.OLSH {
gins(optoas(gc.OAS, nl.Type), &n2, &n2)
}
gmove(&n2, res)
regfree(&n1)
regfree(&n2)
}
func clearfat(nl *gc.Node) {
var w uint32
var c uint32
var q uint32
var dst gc.Node
var nc gc.Node
var nz gc.Node
var end gc.Node
var r0 gc.Node
var r1 gc.Node
var f *gc.Node
var p *obj.Prog
var pl *obj.Prog
/* clear a fat object */
if gc.Debug['g'] != 0 {
gc.Dump("\nclearfat", nl)
}
w = uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if componentgen(nil, nl) != 0 {
return
}
c = w % 4 // bytes
q = w / 4 // quads
r0.Op = gc.OREGISTER
r0.Val.U.Reg = REGALLOC_R0
r1.Op = gc.OREGISTER
r1.Val.U.Reg = REGALLOC_R0 + 1
regalloc(&dst, gc.Types[gc.Tptr], &r1)
agen(nl, &dst)
gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0)
regalloc(&nz, gc.Types[gc.TUINT32], &r0)
cgen(&nc, &nz)
if q > 128 {
regalloc(&end, gc.Types[gc.Tptr], nil)
p = gins(arm.AMOVW, &dst, &end)
p.From.Type = obj.TYPE_ADDR
p.From.Offset = int64(q) * 4
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
pl = p
p = gins(arm.ACMP, &dst, nil)
raddr(&end, p)
gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl)
regfree(&end)
} else if q >= 4 && !gc.Nacl {
f = gc.Sysfunc("duffzero")
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
// 4 and 128 = magic constants: see ../../runtime/asm_arm.s
p.To.Offset = 4 * (128 - int64(q))
} else {
for q > 0 {
p = gins(arm.AMOVW, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 4
p.Scond |= arm.C_PBIT
//print("1. %P\n", p);
q--
}
}
for c > 0 {
p = gins(arm.AMOVB, &nz, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 1
p.Scond |= arm.C_PBIT
//print("2. %P\n", p);
c--
}
regfree(&dst)
regfree(&nz)
}
// Called after regopt and peep have run.
// Expand CHECKNIL pseudo-op into actual nil pointer check.
func expandchecks(firstp *obj.Prog) {
var reg int
var p *obj.Prog
var p1 *obj.Prog
for p = firstp; p != nil; p = p.Link {
if p.As != obj.ACHECKNIL {
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
gc.Warnl(int(p.Lineno), "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatal("invalid nil check %v", p)
}
reg = int(p.From.Reg)
// check is
// CMP arg, $0
// MOV.EQ arg, 0(arg)
p1 = gc.Ctxt.NewProg()
gc.Clearp(p1)
p1.Link = p.Link
p.Link = p1
p1.Lineno = p.Lineno
p1.Pc = 9999
p1.As = arm.AMOVW
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(reg)
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = int16(reg)
p1.To.Offset = 0
p1.Scond = arm.C_SCOND_EQ
p.As = arm.ACMP
p.From.Type = obj.TYPE_CONST
p.From.Reg = 0
p.From.Offset = 0
p.Reg = int16(reg)
}
}

1599
src/cmd/new5g/gsubr.go Normal file

File diff suppressed because it is too large Load diff

1868
src/cmd/new5g/peep.go Normal file

File diff suppressed because it is too large Load diff

163
src/cmd/new5g/prog.go Normal file
View file

@ -0,0 +1,163 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
import "cmd/internal/gc"
const (
RightRdwr = gc.RightRead | gc.RightWrite
)
// This table gives the basic information about instruction
// generated by the compiler and processed in the optimizer.
// See opt.h for bit definitions.
//
// Instructions not generated need not be listed.
// As an exception to that rule, we typically write down all the
// size variants of an operation even if we just use a subset.
//
// The table is formatted for 8-space tabs.
var progtable = [arm.ALAST]gc.ProgInfo{
obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
// NOP is an internal no-op that also stands
// for USED and SET annotations, not the Intel opcode.
obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
// Integer.
arm.AADC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AADD: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AAND: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ABIC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ACMN: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
arm.ACMP: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
arm.ADIVU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ADIV: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AEOR: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AMODU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AMOD: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AMULALU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
arm.AMULAL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
arm.AMULA: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | RightRdwr, 0, 0, 0},
arm.AMULU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AMUL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AMULLU: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.AMVN: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite, 0, 0, 0},
arm.AORR: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ARSB: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ARSC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ASBC: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ASLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ASRA: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ASRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ASUB: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite, 0, 0, 0},
arm.ATEQ: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
arm.ATST: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead, 0, 0, 0},
// Floating point.
arm.AADDD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
arm.AADDF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
arm.ACMPD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
arm.ACMPF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
arm.ADIVD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
arm.ADIVF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
arm.AMULD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
arm.AMULF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
arm.ASUBD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
arm.ASUBF: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
// Conversions.
arm.AMOVWD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
arm.AMOVWF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
arm.AMOVDF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
arm.AMOVDW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
arm.AMOVFD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
arm.AMOVFW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
// Moves.
arm.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
arm.AMOVD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
arm.AMOVF: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
arm.AMOVH: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
arm.AMOVW: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
// In addtion, duffzero reads R0,R1 and writes R1. This fact is
// encoded in peep.c
obj.ADUFFZERO: gc.ProgInfo{gc.Call, 0, 0, 0},
// In addtion, duffcopy reads R1,R2 and writes R0,R1,R2. This fact is
// encoded in peep.c
obj.ADUFFCOPY: gc.ProgInfo{gc.Call, 0, 0, 0},
// These should be split into the two different conversions instead
// of overloading the one.
arm.AMOVBS: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
arm.AMOVBU: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
arm.AMOVHS: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
arm.AMOVHU: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
// Jumps.
arm.AB: gc.ProgInfo{gc.Jump | gc.Break, 0, 0, 0},
arm.ABL: gc.ProgInfo{gc.Call, 0, 0, 0},
arm.ABEQ: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABNE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABCS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABHS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABCC: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABLO: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABMI: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABPL: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABVS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABVC: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABHI: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABLS: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABGE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABLT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABGT: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
arm.ABLE: gc.ProgInfo{gc.Cjmp, 0, 0, 0},
obj.ARET: gc.ProgInfo{gc.Break, 0, 0, 0},
}
func proginfo(info *gc.ProgInfo, p *obj.Prog) {
*info = progtable[p.As]
if info.Flags == 0 {
gc.Fatal("unknown instruction %v", p)
}
if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
info.Flags &^= gc.LeftRead
info.Flags |= gc.LeftAddr
}
if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
info.Flags &^= gc.RegRead
info.Flags |= gc.CanRegRead | gc.RightRead
}
if (p.Scond&arm.C_SCOND != arm.C_SCOND_NONE) && (info.Flags&gc.RightWrite != 0) {
info.Flags |= gc.RightRead
}
switch p.As {
case arm.ADIV,
arm.ADIVU,
arm.AMOD,
arm.AMODU:
info.Regset |= RtoB(arm.REG_R12)
}
}

136
src/cmd/new5g/reg.go Normal file
View file

@ -0,0 +1,136 @@
// Inferno utils/5c/reg.c
// http://code.google.com/p/inferno-os/source/browse/utils/5c/reg.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package main
import "cmd/internal/obj/arm"
import "cmd/internal/gc"
const (
NREGVAR = 32
)
var regname = []string{
".R0",
".R1",
".R2",
".R3",
".R4",
".R5",
".R6",
".R7",
".R8",
".R9",
".R10",
".R11",
".R12",
".R13",
".R14",
".R15",
".F0",
".F1",
".F2",
".F3",
".F4",
".F5",
".F6",
".F7",
".F8",
".F9",
".F10",
".F11",
".F12",
".F13",
".F14",
".F15",
}
func regnames(n *int) []string {
*n = NREGVAR
return regname
}
func excludedregs() uint64 {
return RtoB(arm.REGSP) | RtoB(arm.REGLINK) | RtoB(arm.REGPC)
}
func doregbits(r int) uint64 {
return 0
}
/*
* bit reg
* 0 R0
* 1 R1
* ... ...
* 10 R10
* 12 R12
*
* bit reg
* 18 F2
* 19 F3
* ... ...
* 31 F15
*/
func RtoB(r int) uint64 {
if arm.REG_R0 <= r && r <= arm.REG_R15 {
if r >= arm.REGTMP-2 && r != arm.REG_R12 { // excluded R9 and R10 for m and g, but not R12
return 0
}
return 1 << uint(r-arm.REG_R0)
}
if arm.REG_F0 <= r && r <= arm.REG_F15 {
if r < arm.REG_F2 || r > arm.REG_F0+arm.NFREG-1 {
return 0
}
return 1 << uint((r-arm.REG_F0)+16)
}
return 0
}
func BtoR(b uint64) int {
// TODO Allow R0 and R1, but be careful with a 0 return
// TODO Allow R9. Only R10 is reserved now (just g, not m).
b &= 0x11fc // excluded R9 and R10 for m and g, but not R12
if b == 0 {
return 0
}
return gc.Bitno(b) + arm.REG_R0
}
func BtoF(b uint64) int {
b &= 0xfffc0000
if b == 0 {
return 0
}
return gc.Bitno(b) - 16 + arm.REG_F0
}

12
src/cmd/new5g/util.go Normal file
View file

@ -0,0 +1,12 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func bool2int(b bool) int {
if b {
return 1
}
return 0
}

1889
src/cmd/new6g/cgen.go Normal file

File diff suppressed because it is too large Load diff

109
src/cmd/new6g/galign.go Normal file
View file

@ -0,0 +1,109 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
import "cmd/internal/gc"
var thechar int = '6'
var thestring string = "amd64"
var thelinkarch *obj.LinkArch = &x86.Linkamd64
func linkarchinit() {
if obj.Getgoarch() == "amd64p32" {
thelinkarch = &x86.Linkamd64p32
gc.Thearch.Thelinkarch = thelinkarch
thestring = "amd64p32"
gc.Thearch.Thestring = "amd64p32"
}
}
var MAXWIDTH int64 = 1 << 50
var addptr int = x86.AADDQ
var movptr int = x86.AMOVQ
var leaptr int = x86.ALEAQ
var cmpptr int = x86.ACMPQ
/*
* go declares several platform-specific type aliases:
* int, uint, float, and uintptr
*/
var typedefs = []gc.Typedef{
gc.Typedef{"int", gc.TINT, gc.TINT64},
gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
}
func betypeinit() {
gc.Widthptr = 8
gc.Widthint = 8
gc.Widthreg = 8
if obj.Getgoarch() == "amd64p32" {
gc.Widthptr = 4
gc.Widthint = 4
addptr = x86.AADDL
movptr = x86.AMOVL
leaptr = x86.ALEAL
cmpptr = x86.ACMPL
typedefs[0].Sameas = gc.TINT32
typedefs[1].Sameas = gc.TUINT32
typedefs[2].Sameas = gc.TUINT32
}
}
func main() {
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = x86.REGSP
gc.Thearch.REGCTXT = x86.REGCTXT
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Bgen = bgen
gc.Thearch.Cgen = cgen
gc.Thearch.Cgen_call = cgen_call
gc.Thearch.Cgen_callinter = cgen_callinter
gc.Thearch.Cgen_ret = cgen_ret
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Gclean = gclean
gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
gc.Thearch.Ginscall = ginscall
gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
gc.Thearch.Regalloc = regalloc
gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = FtoB
gc.Thearch.BtoR = BtoR
gc.Thearch.BtoF = BtoF
gc.Thearch.Optoas = optoas
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
gc.Main()
}

24
src/cmd/new6g/gg.go Normal file
View file

@ -0,0 +1,24 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "cmd/internal/obj/x86"
import "cmd/internal/gc"
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
var reg [x86.MAXREG]uint8
var panicdiv *gc.Node
/*
* cgen.c
*/
/*
* list.c
*/

1169
src/cmd/new6g/ggen.go Normal file

File diff suppressed because it is too large Load diff

1755
src/cmd/new6g/gsubr.go Normal file

File diff suppressed because it is too large Load diff

1077
src/cmd/new6g/peep.go Normal file

File diff suppressed because it is too large Load diff

272
src/cmd/new6g/prog.go Normal file
View file

@ -0,0 +1,272 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
import "cmd/internal/gc"
var (
AX = RtoB(x86.REG_AX)
BX = RtoB(x86.REG_BX)
CX = RtoB(x86.REG_CX)
DX = RtoB(x86.REG_DX)
DI = RtoB(x86.REG_DI)
SI = RtoB(x86.REG_SI)
LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
RightRdwr uint32 = gc.RightRead | gc.RightWrite
)
// This table gives the basic information about instruction
// generated by the compiler and processed in the optimizer.
// See opt.h for bit definitions.
//
// Instructions not generated need not be listed.
// As an exception to that rule, we typically write down all the
// size variants of an operation even if we just use a subset.
//
// The table is formatted for 8-space tabs.
var progtable = [x86.ALAST]gc.ProgInfo{
obj.ATYPE: gc.ProgInfo{gc.Pseudo | gc.Skip, 0, 0, 0},
obj.ATEXT: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
obj.AFUNCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
obj.APCDATA: gc.ProgInfo{gc.Pseudo, 0, 0, 0},
obj.AUNDEF: gc.ProgInfo{gc.Break, 0, 0, 0},
obj.AUSEFIELD: gc.ProgInfo{gc.OK, 0, 0, 0},
obj.ACHECKNIL: gc.ProgInfo{gc.LeftRead, 0, 0, 0},
obj.AVARDEF: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
obj.AVARKILL: gc.ProgInfo{gc.Pseudo | gc.RightWrite, 0, 0, 0},
// NOP is an internal no-op that also stands
// for USED and SET annotations, not the Intel opcode.
obj.ANOP: gc.ProgInfo{gc.LeftRead | gc.RightWrite, 0, 0, 0},
x86.AADCL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.AADCQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.AADCW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.AADDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AADDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AADDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AADDQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AADDSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
x86.AADDSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
x86.AANDB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AANDL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AANDQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AANDW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
obj.ACALL: gc.ProgInfo{gc.RightAddr | gc.Call | gc.KillCarry, 0, 0, 0},
x86.ACDQ: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
x86.ACQO: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
x86.ACWD: gc.ProgInfo{gc.OK, AX, AX | DX, 0},
x86.ACLD: gc.ProgInfo{gc.OK, 0, 0, 0},
x86.ASTD: gc.ProgInfo{gc.OK, 0, 0, 0},
x86.ACMPB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ACMPL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ACMPQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ACMPW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ACOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ACOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ACVTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSD2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSD2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSL2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSL2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSQ2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSQ2SS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSS2SD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTSS2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTTSD2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTTSD2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTTSS2SL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ACVTTSS2SQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.ADECB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
x86.ADECL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
x86.ADECQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
x86.ADECW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
x86.ADIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
x86.ADIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
x86.ADIVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
x86.ADIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
x86.ADIVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
x86.ADIVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
x86.AIDIVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
x86.AIDIVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
x86.AIDIVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
x86.AIDIVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX | DX, AX | DX, 0},
x86.AIMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
x86.AIMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
x86.AIMULQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
x86.AIMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.ImulAXDX | gc.SetCarry, 0, 0, 0},
x86.AINCB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
x86.AINCL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
x86.AINCQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
x86.AINCW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
x86.AJCC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJCS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJEQ: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJGE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJGT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJHI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJLE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJLS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJLT: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJMI: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJNE: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJOC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJOS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJPC: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJPL: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
x86.AJPS: gc.ProgInfo{gc.Cjmp | gc.UseCarry, 0, 0, 0},
obj.AJMP: gc.ProgInfo{gc.Jump | gc.Break | gc.KillCarry, 0, 0, 0},
x86.ALEAL: gc.ProgInfo{gc.LeftAddr | gc.RightWrite, 0, 0, 0},
x86.ALEAQ: gc.ProgInfo{gc.LeftAddr | gc.RightWrite, 0, 0, 0},
x86.AMOVBLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVBLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVBQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVBQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVBWSX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVBWZX: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVLQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVLQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVWLSX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVWLZX: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVWQSX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVWQZX: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVQL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv, 0, 0, 0},
x86.AMOVB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
x86.AMOVL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
x86.AMOVQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
x86.AMOVW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
x86.AMOVSB: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
x86.AMOVSL: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
x86.AMOVSQ: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
x86.AMOVSW: gc.ProgInfo{gc.OK, DI | SI, DI | SI, 0},
obj.ADUFFCOPY: gc.ProgInfo{gc.OK, DI | SI, DI | SI | CX, 0},
x86.AMOVSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
x86.AMOVSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
// We use MOVAPD as a faster synonym for MOVSD.
x86.AMOVAPD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move, 0, 0, 0},
x86.AMULB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.SetCarry, AX, AX, 0},
x86.AMULL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
x86.AMULQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
x86.AMULW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.SetCarry, AX, AX | DX, 0},
x86.AMULSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
x86.AMULSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
x86.ANEGB: gc.ProgInfo{gc.SizeB | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.ANEGL: gc.ProgInfo{gc.SizeL | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.ANEGQ: gc.ProgInfo{gc.SizeQ | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.ANEGW: gc.ProgInfo{gc.SizeW | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.ANOTB: gc.ProgInfo{gc.SizeB | RightRdwr, 0, 0, 0},
x86.ANOTL: gc.ProgInfo{gc.SizeL | RightRdwr, 0, 0, 0},
x86.ANOTQ: gc.ProgInfo{gc.SizeQ | RightRdwr, 0, 0, 0},
x86.ANOTW: gc.ProgInfo{gc.SizeW | RightRdwr, 0, 0, 0},
x86.AORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.APOPQ: gc.ProgInfo{gc.SizeQ | gc.RightWrite, 0, 0, 0},
x86.APUSHQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead, 0, 0, 0},
x86.ARCLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ARCLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ARCLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ARCLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ARCRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ARCRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ARCRQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ARCRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.AREP: gc.ProgInfo{gc.OK, CX, CX, 0},
x86.AREPN: gc.ProgInfo{gc.OK, CX, CX, 0},
obj.ARET: gc.ProgInfo{gc.Break | gc.KillCarry, 0, 0, 0},
x86.AROLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.AROLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.AROLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.AROLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ARORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ARORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ARORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ARORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASALB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASALL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASALQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASALW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASARB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASARL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASARQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASARW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASBBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ASBBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ASBBQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ASBBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry | gc.UseCarry, 0, 0, 0},
x86.ASHLB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASHLL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASHLQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASHLW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASHRB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASHRL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASHRQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASHRW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.ShiftCX | gc.SetCarry, 0, 0, 0},
x86.ASTOSB: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
x86.ASTOSL: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
x86.ASTOSQ: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
x86.ASTOSW: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
obj.ADUFFZERO: gc.ProgInfo{gc.OK, AX | DI, DI, 0},
x86.ASUBB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.ASUBL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.ASUBQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.ASUBW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.ASUBSD: gc.ProgInfo{gc.SizeD | gc.LeftRead | RightRdwr, 0, 0, 0},
x86.ASUBSS: gc.ProgInfo{gc.SizeF | gc.LeftRead | RightRdwr, 0, 0, 0},
x86.ATESTB: gc.ProgInfo{gc.SizeB | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ATESTL: gc.ProgInfo{gc.SizeL | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ATESTQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.ATESTW: gc.ProgInfo{gc.SizeW | gc.LeftRead | gc.RightRead | gc.SetCarry, 0, 0, 0},
x86.AUCOMISD: gc.ProgInfo{gc.SizeD | gc.LeftRead | gc.RightRead, 0, 0, 0},
x86.AUCOMISS: gc.ProgInfo{gc.SizeF | gc.LeftRead | gc.RightRead, 0, 0, 0},
x86.AXCHGB: gc.ProgInfo{gc.SizeB | LeftRdwr | RightRdwr, 0, 0, 0},
x86.AXCHGL: gc.ProgInfo{gc.SizeL | LeftRdwr | RightRdwr, 0, 0, 0},
x86.AXCHGQ: gc.ProgInfo{gc.SizeQ | LeftRdwr | RightRdwr, 0, 0, 0},
x86.AXCHGW: gc.ProgInfo{gc.SizeW | LeftRdwr | RightRdwr, 0, 0, 0},
x86.AXORB: gc.ProgInfo{gc.SizeB | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AXORL: gc.ProgInfo{gc.SizeL | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AXORQ: gc.ProgInfo{gc.SizeQ | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
x86.AXORW: gc.ProgInfo{gc.SizeW | gc.LeftRead | RightRdwr | gc.SetCarry, 0, 0, 0},
}
func proginfo(info *gc.ProgInfo, p *obj.Prog) {
*info = progtable[p.As]
if info.Flags == 0 {
gc.Fatal("unknown instruction %v", p)
}
if (info.Flags&gc.ShiftCX != 0) && p.From.Type != obj.TYPE_CONST {
info.Reguse |= CX
}
if info.Flags&gc.ImulAXDX != 0 {
if p.To.Type == obj.TYPE_NONE {
info.Reguse |= AX
info.Regset |= AX | DX
} else {
info.Flags |= RightRdwr
}
}
// Addressing makes some registers used.
if p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_NONE {
info.Regindex |= RtoB(int(p.From.Reg))
}
if p.From.Index != x86.REG_NONE {
info.Regindex |= RtoB(int(p.From.Index))
}
if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE {
info.Regindex |= RtoB(int(p.To.Reg))
}
if p.To.Index != x86.REG_NONE {
info.Regindex |= RtoB(int(p.To.Index))
}
}

144
src/cmd/new6g/reg.go Normal file
View file

@ -0,0 +1,144 @@
// Derived from Inferno utils/6c/reg.c
// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
import "cmd/internal/gc"
const (
NREGVAR = 32
)
var regname = []string{
".AX",
".CX",
".DX",
".BX",
".SP",
".BP",
".SI",
".DI",
".R8",
".R9",
".R10",
".R11",
".R12",
".R13",
".R14",
".R15",
".X0",
".X1",
".X2",
".X3",
".X4",
".X5",
".X6",
".X7",
".X8",
".X9",
".X10",
".X11",
".X12",
".X13",
".X14",
".X15",
}
func regnames(n *int) []string {
*n = NREGVAR
return regname
}
func excludedregs() uint64 {
return RtoB(x86.REG_SP)
}
func doregbits(r int) uint64 {
var b uint64
b = 0
if r >= x86.REG_AX && r <= x86.REG_R15 {
b |= RtoB(r)
} else if r >= x86.REG_AL && r <= x86.REG_R15B {
b |= RtoB(r - x86.REG_AL + x86.REG_AX)
} else if r >= x86.REG_AH && r <= x86.REG_BH {
b |= RtoB(r - x86.REG_AH + x86.REG_AX)
} else if r >= x86.REG_X0 && r <= x86.REG_X0+15 {
b |= FtoB(r)
}
return b
}
func RtoB(r int) uint64 {
if r < x86.REG_AX || r > x86.REG_R15 {
return 0
}
return 1 << uint(r-x86.REG_AX)
}
func BtoR(b uint64) int {
b &= 0xffff
if gc.Nacl {
b &^= (1<<(x86.REG_BP-x86.REG_AX) | 1<<(x86.REG_R15-x86.REG_AX))
} else if obj.Framepointer_enabled != 0 {
// BP is part of the calling convention if framepointer_enabled.
b &^= (1 << (x86.REG_BP - x86.REG_AX))
}
if b == 0 {
return 0
}
return gc.Bitno(b) + x86.REG_AX
}
/*
* bit reg
* 16 X0
* ...
* 31 X15
*/
func FtoB(f int) uint64 {
if f < x86.REG_X0 || f > x86.REG_X15 {
return 0
}
return 1 << uint(f-x86.REG_X0+16)
}
func BtoF(b uint64) int {
b &= 0xFFFF0000
if b == 0 {
return 0
}
return gc.Bitno(b) - 16 + x86.REG_X0
}

12
src/cmd/new6g/util.go Normal file
View file

@ -0,0 +1,12 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
func bool2int(b bool) int {
if b {
return 1
}
return 0
}

1731
src/cmd/new8g/cgen.go Normal file

File diff suppressed because it is too large Load diff

609
src/cmd/new8g/cgen64.go Normal file
View file

@ -0,0 +1,609 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/i386"
)
import "cmd/internal/gc"
/*
* attempt to generate 64-bit
* res = n
* return 1 on success, 0 if op not handled.
*/
func cgen64(n *gc.Node, res *gc.Node) {
var t1 gc.Node
var t2 gc.Node
var ax gc.Node
var dx gc.Node
var cx gc.Node
var ex gc.Node
var fx gc.Node
var l *gc.Node
var r *gc.Node
var lo1 gc.Node
var lo2 gc.Node
var hi1 gc.Node
var hi2 gc.Node
var p1 *obj.Prog
var p2 *obj.Prog
var v uint64
var lv uint32
var hv uint32
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
gc.Fatal("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
}
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
fallthrough
case gc.OMINUS:
cgen(n.Left, res)
split64(res, &lo1, &hi1)
gins(i386.ANEGL, nil, &lo1)
gins(i386.AADCL, ncon(0), &hi1)
gins(i386.ANEGL, nil, &hi1)
splitclean()
return
case gc.OCOM:
cgen(n.Left, res)
split64(res, &lo1, &hi1)
gins(i386.ANOTL, nil, &lo1)
gins(i386.ANOTL, nil, &hi1)
splitclean()
return
// binary operators.
// common setup below.
case gc.OADD,
gc.OSUB,
gc.OMUL,
gc.OLROT,
gc.OLSH,
gc.ORSH,
gc.OAND,
gc.OOR,
gc.OXOR:
break
}
l = n.Left
r = n.Right
if !(l.Addable != 0) {
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
l = &t1
}
if r != nil && !(r.Addable != 0) {
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
r = &t2
}
gc.Nodreg(&ax, gc.Types[gc.TINT32], i386.REG_AX)
gc.Nodreg(&cx, gc.Types[gc.TINT32], i386.REG_CX)
gc.Nodreg(&dx, gc.Types[gc.TINT32], i386.REG_DX)
// Setup for binary operation.
split64(l, &lo1, &hi1)
if gc.Is64(r.Type) != 0 {
split64(r, &lo2, &hi2)
}
// Do op. Leave result in DX:AX.
switch n.Op {
// TODO: Constants
case gc.OADD:
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
gins(i386.AADDL, &lo2, &ax)
gins(i386.AADCL, &hi2, &dx)
// TODO: Constants.
case gc.OSUB:
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
gins(i386.ASUBL, &lo2, &ax)
gins(i386.ASBBL, &hi2, &dx)
// let's call the next two EX and FX.
case gc.OMUL:
regalloc(&ex, gc.Types[gc.TPTR32], nil)
regalloc(&fx, gc.Types[gc.TPTR32], nil)
// load args into DX:AX and EX:CX.
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
gins(i386.AMOVL, &lo2, &cx)
gins(i386.AMOVL, &hi2, &ex)
// if DX and EX are zero, use 32 x 32 -> 64 unsigned multiply.
gins(i386.AMOVL, &dx, &fx)
gins(i386.AORL, &ex, &fx)
p1 = gc.Gbranch(i386.AJNE, nil, 0)
gins(i386.AMULL, &cx, nil) // implicit &ax
p2 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// full 64x64 -> 64, from 32x32 -> 64.
gins(i386.AIMULL, &cx, &dx)
gins(i386.AMOVL, &ax, &fx)
gins(i386.AIMULL, &ex, &fx)
gins(i386.AADDL, &dx, &fx)
gins(i386.AMOVL, &cx, &dx)
gins(i386.AMULL, &dx, nil) // implicit &ax
gins(i386.AADDL, &fx, &dx)
gc.Patch(p2, gc.Pc)
regfree(&ex)
regfree(&fx)
// We only rotate by a constant c in [0,64).
// if c >= 32:
// lo, hi = hi, lo
// c -= 32
// if c == 0:
// no-op
// else:
// t = hi
// shld hi:lo, c
// shld lo:t, c
case gc.OLROT:
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 32 {
// reverse during load to do the first 32 bits of rotate
v -= 32
gins(i386.AMOVL, &lo1, &dx)
gins(i386.AMOVL, &hi1, &ax)
} else {
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
}
if v == 0 {
} else // done
{
gins(i386.AMOVL, &dx, &cx)
p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
p1.From.Index = i386.REG_AX // double-width shift
p1.From.Scale = 0
p1 = gins(i386.ASHLL, ncon(uint32(v)), &ax)
p1.From.Index = i386.REG_CX // double-width shift
p1.From.Scale = 0
}
case gc.OLSH:
if r.Op == gc.OLITERAL {
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if gc.Is64(r.Type) != 0 {
splitclean()
}
splitclean()
split64(res, &lo2, &hi2)
gins(i386.AMOVL, ncon(0), &lo2)
gins(i386.AMOVL, ncon(0), &hi2)
splitclean()
goto out
}
if v >= 32 {
if gc.Is64(r.Type) != 0 {
splitclean()
}
split64(res, &lo2, &hi2)
gmove(&lo1, &hi2)
if v > 32 {
gins(i386.ASHLL, ncon(uint32(v-32)), &hi2)
}
gins(i386.AMOVL, ncon(0), &lo2)
splitclean()
splitclean()
goto out
}
// general shift
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
p1 = gins(i386.ASHLL, ncon(uint32(v)), &dx)
p1.From.Index = i386.REG_AX // double-width shift
p1.From.Scale = 0
gins(i386.ASHLL, ncon(uint32(v)), &ax)
break
}
// load value into DX:AX.
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
// load shift value into register.
// if high bits are set, zero value.
p1 = nil
if gc.Is64(r.Type) != 0 {
gins(i386.ACMPL, &hi2, ncon(0))
p1 = gc.Gbranch(i386.AJNE, nil, +1)
gins(i386.AMOVL, &lo2, &cx)
} else {
cx.Type = gc.Types[gc.TUINT32]
gmove(r, &cx)
}
// if shift count is >=64, zero value
gins(i386.ACMPL, &cx, ncon(64))
p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
if p1 != nil {
gc.Patch(p1, gc.Pc)
}
gins(i386.AXORL, &dx, &dx)
gins(i386.AXORL, &ax, &ax)
gc.Patch(p2, gc.Pc)
// if shift count is >= 32, zero low.
gins(i386.ACMPL, &cx, ncon(32))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
gins(i386.AMOVL, &ax, &dx)
gins(i386.ASHLL, &cx, &dx) // SHLL only uses bottom 5 bits of count
gins(i386.AXORL, &ax, &ax)
p2 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// general shift
p1 = gins(i386.ASHLL, &cx, &dx)
p1.From.Index = i386.REG_AX // double-width shift
p1.From.Scale = 0
gins(i386.ASHLL, &cx, &ax)
gc.Patch(p2, gc.Pc)
case gc.ORSH:
if r.Op == gc.OLITERAL {
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if gc.Is64(r.Type) != 0 {
splitclean()
}
splitclean()
split64(res, &lo2, &hi2)
if hi1.Type.Etype == gc.TINT32 {
gmove(&hi1, &lo2)
gins(i386.ASARL, ncon(31), &lo2)
gmove(&hi1, &hi2)
gins(i386.ASARL, ncon(31), &hi2)
} else {
gins(i386.AMOVL, ncon(0), &lo2)
gins(i386.AMOVL, ncon(0), &hi2)
}
splitclean()
goto out
}
if v >= 32 {
if gc.Is64(r.Type) != 0 {
splitclean()
}
split64(res, &lo2, &hi2)
gmove(&hi1, &lo2)
if v > 32 {
gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v-32)), &lo2)
}
if hi1.Type.Etype == gc.TINT32 {
gmove(&hi1, &hi2)
gins(i386.ASARL, ncon(31), &hi2)
} else {
gins(i386.AMOVL, ncon(0), &hi2)
}
splitclean()
splitclean()
goto out
}
// general shift
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
p1 = gins(i386.ASHRL, ncon(uint32(v)), &ax)
p1.From.Index = i386.REG_DX // double-width shift
p1.From.Scale = 0
gins(optoas(gc.ORSH, hi1.Type), ncon(uint32(v)), &dx)
break
}
// load value into DX:AX.
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
// load shift value into register.
// if high bits are set, zero value.
p1 = nil
if gc.Is64(r.Type) != 0 {
gins(i386.ACMPL, &hi2, ncon(0))
p1 = gc.Gbranch(i386.AJNE, nil, +1)
gins(i386.AMOVL, &lo2, &cx)
} else {
cx.Type = gc.Types[gc.TUINT32]
gmove(r, &cx)
}
// if shift count is >=64, zero or sign-extend value
gins(i386.ACMPL, &cx, ncon(64))
p2 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
if p1 != nil {
gc.Patch(p1, gc.Pc)
}
if hi1.Type.Etype == gc.TINT32 {
gins(i386.ASARL, ncon(31), &dx)
gins(i386.AMOVL, &dx, &ax)
} else {
gins(i386.AXORL, &dx, &dx)
gins(i386.AXORL, &ax, &ax)
}
gc.Patch(p2, gc.Pc)
// if shift count is >= 32, sign-extend hi.
gins(i386.ACMPL, &cx, ncon(32))
p1 = gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT32]), nil, +1)
gins(i386.AMOVL, &dx, &ax)
if hi1.Type.Etype == gc.TINT32 {
gins(i386.ASARL, &cx, &ax) // SARL only uses bottom 5 bits of count
gins(i386.ASARL, ncon(31), &dx)
} else {
gins(i386.ASHRL, &cx, &ax)
gins(i386.AXORL, &dx, &dx)
}
p2 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
// general shift
p1 = gins(i386.ASHRL, &cx, &ax)
p1.From.Index = i386.REG_DX // double-width shift
p1.From.Scale = 0
gins(optoas(gc.ORSH, hi1.Type), &cx, &dx)
gc.Patch(p2, gc.Pc)
// make constant the right side (it usually is anyway).
case gc.OXOR,
gc.OAND,
gc.OOR:
if lo1.Op == gc.OLITERAL {
nswap(&lo1, &lo2)
nswap(&hi1, &hi2)
}
if lo2.Op == gc.OLITERAL {
// special cases for constants.
lv = uint32(gc.Mpgetfix(lo2.Val.U.Xval))
hv = uint32(gc.Mpgetfix(hi2.Val.U.Xval))
splitclean() // right side
split64(res, &lo2, &hi2)
switch n.Op {
case gc.OXOR:
gmove(&lo1, &lo2)
gmove(&hi1, &hi2)
switch lv {
case 0:
break
case 0xffffffff:
gins(i386.ANOTL, nil, &lo2)
default:
gins(i386.AXORL, ncon(lv), &lo2)
}
switch hv {
case 0:
break
case 0xffffffff:
gins(i386.ANOTL, nil, &hi2)
default:
gins(i386.AXORL, ncon(hv), &hi2)
}
case gc.OAND:
switch lv {
case 0:
gins(i386.AMOVL, ncon(0), &lo2)
default:
gmove(&lo1, &lo2)
if lv != 0xffffffff {
gins(i386.AANDL, ncon(lv), &lo2)
}
}
switch hv {
case 0:
gins(i386.AMOVL, ncon(0), &hi2)
default:
gmove(&hi1, &hi2)
if hv != 0xffffffff {
gins(i386.AANDL, ncon(hv), &hi2)
}
}
case gc.OOR:
switch lv {
case 0:
gmove(&lo1, &lo2)
case 0xffffffff:
gins(i386.AMOVL, ncon(0xffffffff), &lo2)
default:
gmove(&lo1, &lo2)
gins(i386.AORL, ncon(lv), &lo2)
}
switch hv {
case 0:
gmove(&hi1, &hi2)
case 0xffffffff:
gins(i386.AMOVL, ncon(0xffffffff), &hi2)
default:
gmove(&hi1, &hi2)
gins(i386.AORL, ncon(hv), &hi2)
}
}
splitclean()
splitclean()
goto out
}
gins(i386.AMOVL, &lo1, &ax)
gins(i386.AMOVL, &hi1, &dx)
gins(optoas(int(n.Op), lo1.Type), &lo2, &ax)
gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
}
if gc.Is64(r.Type) != 0 {
splitclean()
}
splitclean()
split64(res, &lo1, &hi1)
gins(i386.AMOVL, &ax, &lo1)
gins(i386.AMOVL, &dx, &hi1)
splitclean()
out:
}
/*
* generate comparison of nl, nr, both 64-bit.
* nl is memory; nr is constant or memory.
*/
func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
var lo1 gc.Node
var hi1 gc.Node
var lo2 gc.Node
var hi2 gc.Node
var rr gc.Node
var br *obj.Prog
var t *gc.Type
split64(nl, &lo1, &hi1)
split64(nr, &lo2, &hi2)
// compare most significant word;
// if they differ, we're done.
t = hi1.Type
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(i386.ACMPL, &hi1, &hi2)
} else {
regalloc(&rr, gc.Types[gc.TINT32], nil)
gins(i386.AMOVL, &hi1, &rr)
gins(i386.ACMPL, &rr, &hi2)
regfree(&rr)
}
br = nil
switch op {
default:
gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
fallthrough
// cmp hi
// jne L
// cmp lo
// jeq to
// L:
case gc.OEQ:
br = gc.Gbranch(i386.AJNE, nil, -likely)
// cmp hi
// jne to
// cmp lo
// jne to
case gc.ONE:
gc.Patch(gc.Gbranch(i386.AJNE, nil, likely), to)
// cmp hi
// jgt to
// jlt L
// cmp lo
// jge to (or jgt to)
// L:
case gc.OGE,
gc.OGT:
gc.Patch(gc.Gbranch(optoas(gc.OGT, t), nil, likely), to)
br = gc.Gbranch(optoas(gc.OLT, t), nil, -likely)
// cmp hi
// jlt to
// jgt L
// cmp lo
// jle to (or jlt to)
// L:
case gc.OLE,
gc.OLT:
gc.Patch(gc.Gbranch(optoas(gc.OLT, t), nil, likely), to)
br = gc.Gbranch(optoas(gc.OGT, t), nil, -likely)
}
// compare least significant word
t = lo1.Type
if nl.Op == gc.OLITERAL || nr.Op == gc.OLITERAL {
gins(i386.ACMPL, &lo1, &lo2)
} else {
regalloc(&rr, gc.Types[gc.TINT32], nil)
gins(i386.AMOVL, &lo1, &rr)
gins(i386.ACMPL, &rr, &lo2)
regfree(&rr)
}
// jump again
gc.Patch(gc.Gbranch(optoas(op, t), nil, likely), to)
// point first branch down here if appropriate
if br != nil {
gc.Patch(br, gc.Pc)
}
splitclean()
splitclean()
}

84
src/cmd/new8g/galign.go Normal file
View file

@ -0,0 +1,84 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/obj"
"cmd/internal/obj/i386"
)
import "cmd/internal/gc"
var thechar int = '8'
var thestring string = "386"
var thelinkarch *obj.LinkArch = &i386.Link386
func linkarchinit() {
}
var MAXWIDTH int64 = (1 << 32) - 1
/*
* go declares several platform-specific type aliases:
* int, uint, float, and uintptr
*/
var typedefs = []gc.Typedef{
gc.Typedef{"int", gc.TINT, gc.TINT32},
gc.Typedef{"uint", gc.TUINT, gc.TUINT32},
gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT32},
}
func betypeinit() {
gc.Widthptr = 4
gc.Widthint = 4
gc.Widthreg = 4
}
func main() {
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = i386.REGSP
gc.Thearch.REGCTXT = i386.REGCTXT
gc.Thearch.MAXWIDTH = MAXWIDTH
gc.Thearch.Anyregalloc = anyregalloc
gc.Thearch.Betypeinit = betypeinit
gc.Thearch.Bgen = bgen
gc.Thearch.Cgen = cgen
gc.Thearch.Cgen_call = cgen_call
gc.Thearch.Cgen_callinter = cgen_callinter
gc.Thearch.Cgen_ret = cgen_ret
gc.Thearch.Clearfat = clearfat
gc.Thearch.Defframe = defframe
gc.Thearch.Excise = excise
gc.Thearch.Expandchecks = expandchecks
gc.Thearch.Gclean = gclean
gc.Thearch.Ginit = ginit
gc.Thearch.Gins = gins
gc.Thearch.Ginscall = ginscall
gc.Thearch.Igen = igen
gc.Thearch.Linkarchinit = linkarchinit
gc.Thearch.Peep = peep
gc.Thearch.Proginfo = proginfo
gc.Thearch.Regalloc = regalloc
gc.Thearch.Regfree = regfree
gc.Thearch.Regtyp = regtyp
gc.Thearch.Sameaddr = sameaddr
gc.Thearch.Smallindir = smallindir
gc.Thearch.Stackaddr = stackaddr
gc.Thearch.Excludedregs = excludedregs
gc.Thearch.RtoB = RtoB
gc.Thearch.FtoB = FtoB
gc.Thearch.BtoR = BtoR
gc.Thearch.BtoF = BtoF
gc.Thearch.Optoas = optoas
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
gc.Main()
}

34
src/cmd/new8g/gg.go Normal file
View file

@ -0,0 +1,34 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "cmd/internal/obj/i386"
import "cmd/internal/gc"
// TODO(rsc):
// assume CLD?
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// foptoas flags
const (
Frev = 1 << 0
Fpop = 1 << 1
Fpop2 = 1 << 2
)
var reg [i386.MAXREG]uint8
var panicdiv *gc.Node
/*
* cgen.c
*/
/*
* list.c
*/

1297
src/cmd/new8g/ggen.go Normal file

File diff suppressed because it is too large Load diff

Some files were not shown because too many files have changed in this diff Show more