[dev.cc] cmd/internal/obj, cmd/internal/gc, new6g: reconvert

Reconvert using rsc.io/c2go rev 27b3f59.

Changes to converter:
 - fatal does not return, so no fallthrough after fatal in switch
 - many more function results and variables identified as bool
 - simplification of negated boolean expressions

Change-Id: I3bc67da5e46cb7ee613e230cf7e9533036cc870b
Reviewed-on: https://go-review.googlesource.com/5171
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
This commit is contained in:
Russ Cox 2015-02-17 22:13:49 -05:00
parent 786825c5e8
commit dc7b54bed2
67 changed files with 2410 additions and 2509 deletions

View file

@ -138,7 +138,7 @@ func dowidth(t *Type) {
if t.Width == -2 {
lno = int(lineno)
lineno = int32(t.Lineno)
if !(t.Broke != 0) {
if t.Broke == 0 {
t.Broke = 1
Yyerror("invalid recursive type %v", Tconv(t, 0))
}
@ -253,14 +253,14 @@ func dowidth(t *Type) {
checkwidth(t.Down)
case TFORW: // should have been filled in
if !(t.Broke != 0) {
if t.Broke == 0 {
Yyerror("invalid recursive type %v", Tconv(t, 0))
}
w = 1 // anything will do
// dummy type; should be replaced before use.
case TANY:
if !(Debug['A'] != 0) {
if Debug['A'] == 0 {
Fatal("dowidth any")
}
w = 1 // anything will do
@ -294,7 +294,7 @@ func dowidth(t *Type) {
checkwidth(t.Type)
t.Align = uint8(Widthptr)
} else if t.Bound == -100 {
if !(t.Broke != 0) {
if t.Broke == 0 {
Yyerror("use of [...] array outside of array literal")
t.Broke = 1
}
@ -394,7 +394,7 @@ func checkwidth(t *Type) {
Fatal("checkwidth %v", Tconv(t, 0))
}
if !(defercalc != 0) {
if defercalc == 0 {
dowidth(t)
return
}
@ -427,7 +427,7 @@ func defercheckwidth() {
func resumecheckwidth() {
var l *TypeList
if !(defercalc != 0) {
if defercalc == 0 {
Fatal("resumecheckwidth")
}
for l = tlq; l != nil; l = tlq {

View file

@ -66,15 +66,15 @@ bnot(Bits a)
return c;
}
*/
func bany(a *Bits) int {
func bany(a *Bits) bool {
var i int
for i = 0; i < BITS; i++ {
if a.b[i] != 0 {
return 1
return true
}
}
return 0
return false
}
/*
@ -112,8 +112,8 @@ func blsh(n uint) Bits {
return c
}
func btest(a *Bits, n uint) int {
return bool2int(a.b[n/64]&(1<<(n%64)) != 0)
func btest(a *Bits, n uint) bool {
return a.b[n/64]&(1<<(n%64)) != 0
}
func biset(a *Bits, n uint) {
@ -144,7 +144,7 @@ func Qconv(bits Bits, flag int) string {
first = 1
for bany(&bits) != 0 {
for bany(&bits) {
i = bnum(bits)
if first != 0 {
first = 0

View file

@ -120,15 +120,15 @@ func bvnext(bv *Bvec, i int32) int {
return int(i)
}
func bvisempty(bv *Bvec) int {
func bvisempty(bv *Bvec) bool {
var i int32
for i = 0; i < bv.n; i += WORDBITS {
if bv.b[i>>WORDSHIFT] != 0 {
return 0
return false
}
}
return 1
return true
}
func bvnot(bv *Bvec) {

View file

@ -91,7 +91,7 @@ func typecheckclosure(func_ *Node, top int) {
for l = func_.Cvars; l != nil; l = l.Next {
n = l.N.Closure
if !(n.Captured != 0) {
if n.Captured == 0 {
n.Captured = 1
if n.Decldepth == 0 {
Fatal("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
@ -218,7 +218,7 @@ func capturevars(xfunc *Node) {
v.Outerexpr = nil
// out parameters will be assigned to implicitly upon return.
if outer.Class != PPARAMOUT && !(v.Closure.Addrtaken != 0) && !(v.Closure.Assigned != 0) && v.Type.Width <= 128 {
if outer.Class != PPARAMOUT && v.Closure.Addrtaken == 0 && v.Closure.Assigned == 0 && v.Type.Width <= 128 {
v.Byval = 1
} else {
v.Closure.Addrtaken = 1
@ -351,7 +351,7 @@ func transformclosure(xfunc *Node) {
cv = Nod(OCLOSUREVAR, nil, nil)
cv.Type = v.Type
if !(v.Byval != 0) {
if v.Byval == 0 {
cv.Type = Ptrto(v.Type)
}
offset = Rnd(offset, int64(cv.Type.Align))
@ -389,7 +389,7 @@ func transformclosure(xfunc *Node) {
typechecklist(body, Etop)
walkstmtlist(body)
xfunc.Enter = body
xfunc.Needctxt = uint8(bool2int(nvar > 0))
xfunc.Needctxt = nvar > 0
}
lineno = int32(lno)
@ -430,7 +430,7 @@ func walkclosure(func_ *Node, init **NodeList) *Node {
continue
}
typ1 = typenod(v.Type)
if !(v.Byval != 0) {
if v.Byval == 0 {
typ1 = Nod(OIND, typ1, nil)
}
typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
@ -594,7 +594,7 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
// Declare and initialize variable holding receiver.
body = nil
xfunc.Needctxt = 1
xfunc.Needctxt = true
cv = Nod(OCLOSUREVAR, nil, nil)
cv.Xoffset = int64(Widthptr)
cv.Type = rcvrtype
@ -609,7 +609,7 @@ func makepartialcall(fn *Node, t0 *Type, meth *Node) *Node {
ptr.Used = 1
ptr.Curfn = xfunc
xfunc.Dcl = list(xfunc.Dcl, ptr)
if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) != 0 {
if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) {
ptr.Ntype = typenod(rcvrtype)
body = list(body, Nod(OAS, ptr, cv))
} else {
@ -652,7 +652,7 @@ func walkpartialcall(n *Node, init **NodeList) *Node {
//
// Like walkclosure above.
if Isinter(n.Left.Type) != 0 {
if Isinter(n.Left.Type) {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
n.Left = cheapexpr(n.Left, init)

View file

@ -47,7 +47,7 @@ func truncfltlit(oldv *Mpflt, t *Type) *Mpflt {
* implicit conversion.
*/
func Convlit(np **Node, t *Type) {
convlit1(np, t, 0)
convlit1(np, t, false)
}
/*
@ -55,17 +55,17 @@ func Convlit(np **Node, t *Type) {
* return a new node if necessary
* (if n is a named constant, can't edit n->type directly).
*/
func convlit1(np **Node, t *Type, explicit int) {
func convlit1(np **Node, t *Type, explicit bool) {
var ct int
var et int
var n *Node
var nn *Node
n = *np
if n == nil || t == nil || n.Type == nil || isideal(t) != 0 || n.Type == t {
if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
return
}
if !(explicit != 0) && !(isideal(n.Type) != 0) {
if !explicit && !isideal(n.Type) {
return
}
@ -96,7 +96,7 @@ func convlit1(np **Node, t *Type, explicit int) {
// target is invalid type for a constant? leave alone.
case OLITERAL:
if !(okforconst[t.Etype] != 0) && n.Type.Etype != TNIL {
if okforconst[t.Etype] == 0 && n.Type.Etype != TNIL {
defaultlit(&n, nil)
*np = n
return
@ -104,12 +104,12 @@ func convlit1(np **Node, t *Type, explicit int) {
case OLSH,
ORSH:
convlit1(&n.Left, t, bool2int(explicit != 0 && isideal(n.Left.Type) != 0))
convlit1(&n.Left, t, explicit && isideal(n.Left.Type))
t = n.Left.Type
if t != nil && t.Etype == TIDEAL && n.Val.Ctype != CTINT {
n.Val = toint(n.Val)
}
if t != nil && !(Isint[t.Etype] != 0) {
if t != nil && Isint[t.Etype] == 0 {
Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
t = nil
}
@ -179,7 +179,7 @@ func convlit1(np **Node, t *Type, explicit int) {
return
case TARRAY:
if !(Isslice(t) != 0) {
if !Isslice(t) {
goto bad
}
@ -258,7 +258,7 @@ func convlit1(np **Node, t *Type, explicit int) {
case CTCPLX:
overflow(n.Val, t)
}
} else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit != 0 {
} else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit {
n.Val = tostr(n.Val)
} else {
goto bad
@ -269,14 +269,14 @@ func convlit1(np **Node, t *Type, explicit int) {
return
bad:
if !(n.Diag != 0) {
if !(t.Broke != 0) {
if n.Diag == 0 {
if t.Broke == 0 {
Yyerror("cannot convert %v to type %v", Nconv(n, 0), Tconv(t, 0))
}
n.Diag = 1
}
if isideal(n.Type) != 0 {
if isideal(n.Type) {
defaultlit(&n, nil)
*np = n
}
@ -388,35 +388,35 @@ func toint(v Val) Val {
return v
}
func doesoverflow(v Val, t *Type) int {
func doesoverflow(v Val, t *Type) bool {
switch v.Ctype {
case CTINT,
CTRUNE:
if !(Isint[t.Etype] != 0) {
if Isint[t.Etype] == 0 {
Fatal("overflow: %v integer constant", Tconv(t, 0))
}
if Mpcmpfixfix(v.U.Xval, Minintval[t.Etype]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[t.Etype]) > 0 {
return 1
return true
}
case CTFLT:
if !(Isfloat[t.Etype] != 0) {
if Isfloat[t.Etype] == 0 {
Fatal("overflow: %v floating-point constant", Tconv(t, 0))
}
if mpcmpfltflt(v.U.Fval, minfltval[t.Etype]) <= 0 || mpcmpfltflt(v.U.Fval, maxfltval[t.Etype]) >= 0 {
return 1
return true
}
case CTCPLX:
if !(Iscomplex[t.Etype] != 0) {
if Iscomplex[t.Etype] == 0 {
Fatal("overflow: %v complex constant", Tconv(t, 0))
}
if mpcmpfltflt(&v.U.Cval.Real, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Real, maxfltval[t.Etype]) >= 0 || mpcmpfltflt(&v.U.Cval.Imag, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Imag, maxfltval[t.Etype]) >= 0 {
return 1
return true
}
}
return 0
return false
}
func overflow(v Val, t *Type) {
@ -426,7 +426,7 @@ func overflow(v Val, t *Type) {
return
}
if !(doesoverflow(v, t) != 0) {
if !doesoverflow(v, t) {
return
}
@ -479,14 +479,14 @@ func consttype(n *Node) int {
return int(n.Val.Ctype)
}
func Isconst(n *Node, ct int) int {
func Isconst(n *Node, ct int) bool {
var t int
t = consttype(n)
// If the caller is asking for CTINT, allow CTRUNE too.
// Makes life easier for back ends.
return bool2int(t == ct || (ct == CTINT && t == CTRUNE))
return t == ct || (ct == CTINT && t == CTRUNE)
}
func saveorig(n *Node) *Node {
@ -557,18 +557,18 @@ func evconst(n *Node) {
if n.Type == nil {
return
}
if !(okforconst[n.Type.Etype] != 0) && n.Type.Etype != TNIL {
if okforconst[n.Type.Etype] == 0 && n.Type.Etype != TNIL {
return
}
// merge adjacent constants in the argument list.
case OADDSTR:
for l1 = n.List; l1 != nil; l1 = l1.Next {
if Isconst(l1.N, CTSTR) != 0 && l1.Next != nil && Isconst(l1.Next.N, CTSTR) != 0 {
if Isconst(l1.N, CTSTR) && l1.Next != nil && Isconst(l1.Next.N, CTSTR) {
// merge from l1 up to but not including l2
str = new(Strlit)
l2 = l1
for l2 != nil && Isconst(l2.N, CTSTR) != 0 {
for l2 != nil && Isconst(l2.N, CTSTR) {
nr = l2.N
str.S += nr.Val.U.Sval.S
l2 = l2.Next
@ -590,7 +590,7 @@ func evconst(n *Node) {
}
// collapse single-constant list to single constant.
if count(n.List) == 1 && Isconst(n.List.N, CTSTR) != 0 {
if count(n.List) == 1 && Isconst(n.List.N, CTSTR) {
n.Op = OLITERAL
n.Val = n.List.N.Val
}
@ -655,7 +655,7 @@ func evconst(n *Node) {
defaultlit(&nr, Types[TUINT])
n.Right = nr
if nr.Type != nil && (Issigned[nr.Type.Etype] != 0 || !(Isint[nr.Type.Etype] != 0)) {
if nr.Type != nil && (Issigned[nr.Type.Etype] != 0 || Isint[nr.Type.Etype] == 0) {
goto illegal
}
if nl.Val.Ctype != CTRUNE {
@ -787,7 +787,7 @@ func evconst(n *Node) {
// The default case above would print 'ideal % ideal',
// which is not quite an ideal error.
case OMOD<<16 | CTFLT:
if !(n.Diag != 0) {
if n.Diag == 0 {
Yyerror("illegal constant expression: floating-point % operation")
n.Diag = 1
}
@ -985,7 +985,7 @@ unary:
switch uint32(n.Op)<<16 | uint32(v.Ctype) {
default:
if !(n.Diag != 0) {
if n.Diag == 0 {
Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
n.Diag = 1
}
@ -1006,7 +1006,7 @@ unary:
OCONV<<16 | CTRUNE,
OCONV<<16 | CTFLT,
OCONV<<16 | CTSTR:
convlit1(&nl, n.Type, 1)
convlit1(&nl, n.Type, true)
v = nl.Val
@ -1058,7 +1058,7 @@ unary:
mpnegflt(&v.U.Cval.Imag)
case ONOT<<16 | CTBOOL:
if !(v.U.Bval != 0) {
if v.U.Bval == 0 {
goto settrue
}
goto setfalse
@ -1087,18 +1087,18 @@ ret:
settrue:
norig = saveorig(n)
*n = *Nodbool(1)
*n = *Nodbool(true)
n.Orig = norig
return
setfalse:
norig = saveorig(n)
*n = *Nodbool(0)
*n = *Nodbool(false)
n.Orig = norig
return
illegal:
if !(n.Diag != 0) {
if n.Diag == 0 {
Yyerror("illegal constant expression: %v %v %v", Tconv(nl.Type, 0), Oconv(int(n.Op), 0), Tconv(nr.Type, 0))
n.Diag = 1
}
@ -1114,7 +1114,6 @@ func nodlit(v Val) *Node {
switch v.Ctype {
default:
Fatal("nodlit ctype %d", v.Ctype)
fallthrough
case CTSTR:
n.Type = idealstring
@ -1163,7 +1162,7 @@ func idealkind(n *Node) int {
var k1 int
var k2 int
if n == nil || !(isideal(n.Type) != 0) {
if n == nil || !isideal(n.Type) {
return CTxxx
}
@ -1235,7 +1234,7 @@ func defaultlit(np **Node, t *Type) {
var t1 *Type
n = *np
if n == nil || !(isideal(n.Type) != 0) {
if n == nil || !isideal(n.Type) {
return
}
@ -1257,7 +1256,7 @@ func defaultlit(np **Node, t *Type) {
if n.Val.Ctype == CTNIL {
lineno = int32(lno)
if !(n.Diag != 0) {
if n.Diag == 0 {
Yyerror("use of untyped nil")
n.Diag = 1
}
@ -1341,17 +1340,17 @@ func defaultlit2(lp **Node, rp **Node, force int) {
if l.Type == nil || r.Type == nil {
return
}
if !(isideal(l.Type) != 0) {
if !isideal(l.Type) {
Convlit(rp, l.Type)
return
}
if !(isideal(r.Type) != 0) {
if !isideal(r.Type) {
Convlit(lp, r.Type)
return
}
if !(force != 0) {
if force == 0 {
return
}
if l.Type.Etype == TBOOL {
@ -1387,8 +1386,8 @@ func cmpslit(l, r *Node) int {
return stringsCompare(l.Val.U.Sval.S, r.Val.U.Sval.S)
}
func Smallintconst(n *Node) int {
if n.Op == OLITERAL && Isconst(n, CTINT) != 0 && n.Type != nil {
func Smallintconst(n *Node) bool {
if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
switch Simtype[n.Type.Etype] {
case TINT8,
TUINT8,
@ -1398,7 +1397,7 @@ func Smallintconst(n *Node) int {
TUINT32,
TBOOL,
TPTR32:
return 1
return true
case TIDEAL,
TINT64,
@ -1407,11 +1406,11 @@ func Smallintconst(n *Node) int {
if Mpcmpfixfix(n.Val.U.Xval, Minintval[TINT32]) < 0 || Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT32]) > 0 {
break
}
return 1
return true
}
}
return 0
return false
}
func nonnegconst(n *Node) int {
@ -1491,7 +1490,6 @@ func Convconst(con *Node, t *Type, val *Val) {
switch val.Ctype {
default:
Fatal("convconst ctype=%d %v", val.Ctype, Tconv(t, obj.FmtLong))
fallthrough
case CTINT,
CTRUNE:
@ -1615,7 +1613,7 @@ func cmplxdiv(v *Mpcplx, rv *Mpcplx) {
// may be known at compile time, are not Go language constants.
// Only called for expressions known to evaluated to compile-time
// constants.
func isgoconst(n *Node) int {
func isgoconst(n *Node) bool {
var l *Node
var t *Type
@ -1652,20 +1650,20 @@ func isgoconst(n *Node) int {
OCOMPLEX,
OREAL,
OIMAG:
if isgoconst(n.Left) != 0 && (n.Right == nil || isgoconst(n.Right) != 0) {
return 1
if isgoconst(n.Left) && (n.Right == nil || isgoconst(n.Right)) {
return true
}
case OCONV:
if okforconst[n.Type.Etype] != 0 && isgoconst(n.Left) != 0 {
return 1
if okforconst[n.Type.Etype] != 0 && isgoconst(n.Left) {
return true
}
case OLEN,
OCAP:
l = n.Left
if isgoconst(l) != 0 {
return 1
if isgoconst(l) {
return true
}
// Special case: len/cap is constant when applied to array or
@ -1676,24 +1674,24 @@ func isgoconst(n *Node) int {
if t != nil && Isptr[t.Etype] != 0 {
t = t.Type
}
if Isfixedarray(t) != 0 && !(hascallchan(l) != 0) {
return 1
if Isfixedarray(t) && !hascallchan(l) {
return true
}
case OLITERAL:
if n.Val.Ctype != CTNIL {
return 1
return true
}
case ONAME:
l = n.Sym.Def
if l != nil && l.Op == OLITERAL && n.Val.Ctype != CTNIL {
return 1
return true
}
case ONONAME:
if n.Sym.Def != nil && n.Sym.Def.Op == OIOTA {
return 1
return true
}
// Only constant calls are unsafe.Alignof, Offsetof, and Sizeof.
@ -1707,19 +1705,19 @@ func isgoconst(n *Node) int {
break
}
if l.Sym.Name == "Alignof" || l.Sym.Name == "Offsetof" || l.Sym.Name == "Sizeof" {
return 1
return true
}
}
//dump("nonconst", n);
return 0
return false
}
func hascallchan(n *Node) int {
func hascallchan(n *Node) bool {
var l *NodeList
if n == nil {
return 0
return false
}
switch n.Op {
case OAPPEND,
@ -1742,23 +1740,23 @@ func hascallchan(n *Node) int {
OREAL,
ORECOVER,
ORECV:
return 1
return true
}
if hascallchan(n.Left) != 0 || hascallchan(n.Right) != 0 {
return 1
if hascallchan(n.Left) || hascallchan(n.Right) {
return true
}
for l = n.List; l != nil; l = l.Next {
if hascallchan(l.N) != 0 {
return 1
if hascallchan(l.N) {
return true
}
}
for l = n.Rlist; l != nil; l = l.Next {
if hascallchan(l.N) != 0 {
return 1
if hascallchan(l.N) {
return true
}
}
return 0
return false
}

View file

@ -10,12 +10,12 @@ func CASE(a int, b int) int {
return a<<16 | b
}
func overlap_cplx(f *Node, t *Node) int {
func overlap_cplx(f *Node, t *Node) bool {
// check whether f and t could be overlapping stack references.
// not exact, because it's hard to check for the stack register
// in portable code. close enough: worst case we will allocate
// an extra temporary and the registerizer will clean it up.
return bool2int(f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset)
return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
}
func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Prog) {
@ -31,20 +31,20 @@ func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Pro
// make both sides addable in ullman order
if nr != nil {
if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
}
if !(nr.Addable != 0) {
if nr.Addable == 0 {
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
nr = &tnr
}
}
if !(nl.Addable != 0) {
if nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
@ -87,7 +87,7 @@ func subnode(nr *Node, ni *Node, nc *Node) {
var tc int
var t *Type
if !(nc.Addable != 0) {
if nc.Addable == 0 {
Fatal("subnode not addable")
}
@ -243,7 +243,7 @@ func nodfconst(n *Node, t *Type, fval *Mpflt) {
n.Val.Ctype = CTFLT
n.Type = t
if !(Isfloat[t.Etype] != 0) {
if Isfloat[t.Etype] == 0 {
Fatal("nodfconst: bad type %v", Tconv(t, 0))
}
}
@ -251,7 +251,7 @@ func nodfconst(n *Node, t *Type, fval *Mpflt) {
/*
* cplx.c
*/
func Complexop(n *Node, res *Node) int {
func Complexop(n *Node, res *Node) bool {
if n != nil && n.Type != nil {
if Iscomplex[n.Type.Etype] != 0 {
goto maybe
@ -292,11 +292,11 @@ maybe:
//dump("\ncomplex-no", n);
no:
return 0
return false
//dump("\ncomplex-yes", n);
yes:
return 1
return true
}
func Complexmove(f *Node, t *Node) {
@ -313,7 +313,7 @@ func Complexmove(f *Node, t *Node) {
Dump("complexmove-t", t)
}
if !(t.Addable != 0) {
if t.Addable == 0 {
Fatal("complexmove: to not addable")
}
@ -322,7 +322,6 @@ func Complexmove(f *Node, t *Node) {
switch uint32(ft)<<16 | uint32(tt) {
default:
Fatal("complexmove: unknown conversion: %v -> %v\n", Tconv(f.Type, 0), Tconv(t.Type, 0))
fallthrough
// complex to complex move/convert.
// make f addable.
@ -331,7 +330,7 @@ func Complexmove(f *Node, t *Node) {
TCOMPLEX64<<16 | TCOMPLEX128,
TCOMPLEX128<<16 | TCOMPLEX64,
TCOMPLEX128<<16 | TCOMPLEX128:
if !(f.Addable != 0) || overlap_cplx(f, t) != 0 {
if f.Addable == 0 || overlap_cplx(f, t) {
Tempname(&tmp, f.Type)
Complexmove(f, &tmp)
f = &tmp
@ -380,7 +379,7 @@ func Complexgen(n *Node, res *Node) {
case OREAL,
OIMAG:
nl = n.Left
if !(nl.Addable != 0) {
if nl.Addable == 0 {
Tempname(&tmp, nl.Type)
Complexgen(nl, &tmp)
nl = &tmp
@ -403,7 +402,7 @@ func Complexgen(n *Node, res *Node) {
tr = Simsimtype(n.Type)
tr = cplxsubtype(tr)
if tl != tr {
if !(n.Addable != 0) {
if n.Addable == 0 {
Tempname(&n1, n.Type)
Complexmove(n, &n1)
n = &n1
@ -413,7 +412,7 @@ func Complexgen(n *Node, res *Node) {
return
}
if !(res.Addable != 0) {
if res.Addable == 0 {
Thearch.Igen(res, &n1, nil)
Thearch.Cgen(n, &n1)
Thearch.Regfree(&n1)
@ -429,7 +428,6 @@ func Complexgen(n *Node, res *Node) {
default:
Dump("complexgen: unknown op", n)
Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
fallthrough
case ODOT,
ODOTPTR,
@ -464,20 +462,20 @@ func Complexgen(n *Node, res *Node) {
// make both sides addable in ullman order
if nr != nil {
if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
}
if !(nr.Addable != 0) {
if nr.Addable == 0 {
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
nr = &tnr
}
}
if !(nl.Addable != 0) {
if nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl

View file

@ -10,17 +10,17 @@ import (
"strings"
)
func dflag() int {
if !(Debug['d'] != 0) {
return 0
func dflag() bool {
if Debug['d'] == 0 {
return false
}
if Debug['y'] != 0 {
return 1
return true
}
if incannedimport != 0 {
return 0
return false
}
return 1
return true
}
/*
@ -49,7 +49,7 @@ func pushdcl(s *Sym) *Sym {
d = push()
dcopy(d, s)
if dflag() != 0 {
if dflag() {
fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
}
return d
@ -71,7 +71,7 @@ func popdcl() {
lno = int(s.Lastlineno)
dcopy(s, d)
d.Lastlineno = int32(lno)
if dflag() != 0 {
if dflag() {
fmt.Printf("\t%v pop %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
}
}
@ -195,7 +195,7 @@ func declare(n *Node, ctxt int) {
s = n.Sym
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
if importpkg == nil && !(typecheckok != 0) && s.Pkg != localpkg {
if importpkg == nil && typecheckok == 0 && s.Pkg != localpkg {
Yyerror("cannot declare name %v", Sconv(s, 0))
}
@ -206,7 +206,7 @@ func declare(n *Node, ctxt int) {
gen = 0
if ctxt == PEXTERN {
externdcl = list(externdcl, n)
if dflag() != 0 {
if dflag() {
fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), n)
}
} else {
@ -264,14 +264,14 @@ func addvar(n *Node, t *Type, ctxt int) {
* new_name_list (type | [type] = expr_list)
*/
func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
var doexpr int
var doexpr bool
var v *Node
var e *Node
var as2 *Node
var init *NodeList
init = nil
doexpr = bool2int(el != nil)
doexpr = el != nil
if count(el) == 1 && count(vl) > 1 {
e = el.N
@ -293,7 +293,7 @@ func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
}
for ; vl != nil; vl = vl.Next {
if doexpr != 0 {
if doexpr {
if el == nil {
Yyerror("missing expression in var declaration")
break
@ -479,17 +479,17 @@ func oldname(s *Sym) *Node {
/*
* := declarations
*/
func colasname(n *Node) int {
func colasname(n *Node) bool {
switch n.Op {
case ONAME,
ONONAME,
OPACK,
OTYPE,
OLITERAL:
return bool2int(n.Sym != nil)
return n.Sym != nil
}
return 0
return false
}
func colasdefn(left *NodeList, defn *Node) {
@ -511,7 +511,7 @@ func colasdefn(left *NodeList, defn *Node) {
if isblank(n) {
continue
}
if !(colasname(n) != 0) {
if !colasname(n) {
yyerrorl(int(defn.Lineno), "non-name %v on left side of :=", Nconv(n, 0))
nerr++
continue
@ -735,7 +735,7 @@ func funcargs2(t *Type) {
if t.Thistuple != 0 {
for ft = getthisx(t).Type; ft != nil; ft = ft.Down {
if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
n = ft.Nname // no need for newname(ft->nname->sym)
@ -746,7 +746,7 @@ func funcargs2(t *Type) {
if t.Intuple != 0 {
for ft = getinargx(t).Type; ft != nil; ft = ft.Down {
if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
n = ft.Nname
@ -757,7 +757,7 @@ func funcargs2(t *Type) {
if t.Outtuple != 0 {
for ft = getoutargx(t).Type; ft != nil; ft = ft.Down {
if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
n = ft.Nname
@ -925,7 +925,7 @@ func tostruct(l *NodeList) *Type {
tp = &f.Down
}
for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
@ -934,7 +934,7 @@ func tostruct(l *NodeList) *Type {
uniqgen++
checkdupfields(t.Type, "field")
if !(t.Broke != 0) {
if t.Broke == 0 {
checkwidth(t)
}
@ -962,7 +962,7 @@ func tofunargs(l *NodeList) *Type {
tp = &f.Down
}
for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
@ -1072,7 +1072,7 @@ func tointerface(l *NodeList) *Type {
}
}
for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
@ -1199,7 +1199,7 @@ func checkarglist(all *NodeList, input int) *NodeList {
}
n = Nod(ODCLFIELD, n, t)
if n.Right != nil && n.Right.Op == ODDD {
if !(input != 0) {
if input == 0 {
Yyerror("cannot use ... in output argument list")
} else if l.Next != nil {
Yyerror("can only use ... as final argument in list")
@ -1232,23 +1232,23 @@ func fakethis() *Node {
* *struct{} as the receiver.
* (See fakethis above.)
*/
func isifacemethod(f *Type) int {
func isifacemethod(f *Type) bool {
var rcvr *Type
var t *Type
rcvr = getthisx(f).Type
if rcvr.Sym != nil {
return 0
return false
}
t = rcvr.Type
if !(Isptr[t.Etype] != 0) {
return 0
if Isptr[t.Etype] == 0 {
return false
}
t = t.Type
if t.Sym != nil || t.Etype != TSTRUCT || t.Type != nil {
return 0
return false
}
return 1
return true
}
/*
@ -1480,7 +1480,7 @@ func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
}
}
if local && !(pa.Local != 0) {
if local && pa.Local == 0 {
// defining method on non-local type.
Yyerror("cannot define new methods on non-local type %v", Tconv(pa, 0))
@ -1506,7 +1506,7 @@ func addmethod(sf *Sym, t *Type, local bool, nointerface bool) {
}
f = structfield(n)
f.Nointerface = uint8(bool2int(nointerface))
f.Nointerface = nointerface
// during import unexported method names should be in the type's package
if importpkg != nil && f.Sym != nil && !exportname(f.Sym.Name) && f.Sym.Pkg != structpkg {

View file

@ -69,7 +69,7 @@ func escapes(all *NodeList) {
func visit(n *Node) uint32 {
var min uint32
var recursive uint32
var recursive bool
var l *NodeList
var block *NodeList
@ -95,7 +95,7 @@ func visit(n *Node) uint32 {
// If visitcodelist found its way back to n->walkgen, then this
// block is a set of mutually recursive functions.
// Otherwise it's just a lone function that does not recurse.
recursive = uint32(bool2int(min == n.Walkgen))
recursive = min == n.Walkgen
// Remove connected component from stack.
// Mark walkgen so that future visits return a large number
@ -110,7 +110,7 @@ func visit(n *Node) uint32 {
l.Next = nil
// Run escape analysis on this set of functions.
analyze(block, int(recursive))
analyze(block, recursive)
}
return min
@ -199,7 +199,7 @@ type EscState struct {
dstcount int
edgecount int
noesc *NodeList
recursive int
recursive bool
}
var tags [16]*Strlit
@ -247,7 +247,7 @@ func parsetag(note *Strlit) int {
return EscReturn | em<<EscBits
}
func analyze(all *NodeList, recursive int) {
func analyze(all *NodeList, recursive bool) {
var l *NodeList
var es EscState
var e *EscState
@ -351,7 +351,7 @@ func escfunc(e *EscState, func_ *Node) {
}
// in a mutually recursive group we lose track of the return values
if e.recursive != 0 {
if e.recursive {
for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
if ll.N.Op == ONAME && ll.N.Class == PPARAMOUT {
escflows(e, &e.theSink, ll.N)
@ -387,7 +387,7 @@ func escloopdepth(e *EscState, n *Node) {
switch n.Op {
case OLABEL:
if !(n.Left != nil) || !(n.Left.Sym != nil) {
if n.Left == nil || n.Left.Sym == nil {
Fatal("esc:label without label: %v", Nconv(n, obj.FmtSign))
}
@ -398,7 +398,7 @@ func escloopdepth(e *EscState, n *Node) {
n.Left.Sym.Label = &nonlooping
case OGOTO:
if !(n.Left != nil) || !(n.Left.Sym != nil) {
if n.Left == nil || n.Left.Sym == nil {
Fatal("esc:goto without label: %v", Nconv(n, obj.FmtSign))
}
@ -509,7 +509,7 @@ func esc(e *EscState, n *Node, up *Node) {
// Everything but fixed array is a dereference.
case ORANGE:
if Isfixedarray(n.Type) != 0 && n.List != nil && n.List.Next != nil {
if Isfixedarray(n.Type) && n.List != nil && n.List.Next != nil {
escassign(e, n.List.Next.N, n.Right)
}
@ -639,7 +639,7 @@ func esc(e *EscState, n *Node, up *Node) {
escassign(e, &e.theSink, n.Left)
case OAPPEND:
if !(n.Isddd != 0) {
if n.Isddd == 0 {
for ll = n.List.Next; ll != nil; ll = ll.Next {
escassign(e, &e.theSink, ll.N) // lose track of assign to dereference
}
@ -651,7 +651,7 @@ func esc(e *EscState, n *Node, up *Node) {
escassign(e, n, n.Left)
case OARRAYLIT:
if Isslice(n.Type) != 0 {
if Isslice(n.Type) {
n.Esc = EscNone // until proven otherwise
e.noesc = list(e.noesc, n)
n.Escloopdepth = e.loopdepth
@ -708,7 +708,7 @@ func esc(e *EscState, n *Node, up *Node) {
continue
}
a = v.Closure
if !(v.Byval != 0) {
if v.Byval == 0 {
a = Nod(OADDR, a, nil)
a.Lineno = v.Lineno
a.Escloopdepth = e.loopdepth
@ -805,7 +805,6 @@ func escassign(e *EscState, dst *Node, src *Node) {
default:
Dump("dst", dst)
Fatal("escassign: unexpected dst")
fallthrough
case OARRAYLIT,
OCLOSURE,
@ -829,7 +828,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
return
case OINDEX:
if Isfixedarray(dst.Left.Type) != 0 {
if Isfixedarray(dst.Left.Type) {
escassign(e, dst.Left, src)
return
}
@ -914,7 +913,7 @@ func escassign(e *EscState, dst *Node, src *Node) {
// Index of array preserves input value.
case OINDEX:
if Isfixedarray(src.Left.Type) != 0 {
if Isfixedarray(src.Left.Type) {
escassign(e, dst, src.Left)
}
@ -999,7 +998,6 @@ func esccall(e *EscState, n *Node, up *Node) {
switch n.Op {
default:
Fatal("esccall")
fallthrough
case OCALLFUNC:
fn = n.Left
@ -1044,7 +1042,7 @@ func esccall(e *EscState, n *Node, up *Node) {
for lr = fn.Ntype.List; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
src = ll.N
if lr.N.Isddd != 0 && !(n.Isddd != 0) {
if lr.N.Isddd != 0 && n.Isddd == 0 {
// Introduce ODDDARG node to represent ... allocation.
src = Nod(ODDDARG, nil, nil)
@ -1110,7 +1108,7 @@ func esccall(e *EscState, n *Node, up *Node) {
for t = getinargx(fntype).Type; ll != nil; ll = ll.Next {
src = ll.N
if t.Isddd != 0 && !(n.Isddd != 0) {
if t.Isddd != 0 && n.Isddd == 0 {
// Introduce ODDDARG node to represent ... allocation.
src = Nod(ODDDARG, nil, nil)
@ -1243,7 +1241,7 @@ const (
func escwalk(e *EscState, level int, dst *Node, src *Node) {
var ll *NodeList
var leaks int
var leaks bool
var newlevel int
if src.Walkgen == walkgen && src.Esclevel <= int32(level) {
@ -1292,11 +1290,11 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
// The second clause is for values pointed at by an object passed to a call
// that returns something reached via indirect from the object.
// We don't know which result it is or how many indirects, so we treat it as leaking.
leaks = bool2int(level <= 0 && dst.Escloopdepth < src.Escloopdepth || level < 0 && dst == &e.funcParam && haspointers(src.Type))
leaks = level <= 0 && dst.Escloopdepth < src.Escloopdepth || level < 0 && dst == &e.funcParam && haspointers(src.Type)
switch src.Op {
case ONAME:
if src.Class == PPARAM && (leaks != 0 || dst.Escloopdepth < 0) && src.Esc != EscHeap {
if src.Class == PPARAM && (leaks || dst.Escloopdepth < 0) && src.Esc != EscHeap {
src.Esc = EscScope
if Debug['m'] != 0 {
Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
@ -1306,7 +1304,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
// Treat a PPARAMREF closure variable as equivalent to the
// original variable.
if src.Class == PPARAMREF {
if leaks != 0 && Debug['m'] != 0 {
if leaks && Debug['m'] != 0 {
Warnl(int(src.Lineno), "leaking closure reference %v", Nconv(src, obj.FmtShort))
}
escwalk(e, level, dst, src.Closure)
@ -1314,7 +1312,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
case OPTRLIT,
OADDR:
if leaks != 0 {
if leaks {
src.Esc = EscHeap
addrescapes(src.Left)
if Debug['m'] != 0 {
@ -1329,7 +1327,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
escwalk(e, newlevel, dst, src.Left)
case OARRAYLIT:
if Isfixedarray(src.Type) != 0 {
if Isfixedarray(src.Type) {
break
}
fallthrough
@ -1349,7 +1347,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
OCLOSURE,
OCALLPART,
ORUNESTR:
if leaks != 0 {
if leaks {
src.Esc = EscHeap
if Debug['m'] != 0 {
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
@ -1365,7 +1363,7 @@ func escwalk(e *EscState, level int, dst *Node, src *Node) {
escwalk(e, level, dst, src.Left)
case OINDEX:
if Isfixedarray(src.Left.Type) != 0 {
if Isfixedarray(src.Left.Type) {
escwalk(e, level, dst, src.Left)
break
}

View file

@ -42,19 +42,19 @@ func exportname(s string) bool {
return unicode.IsUpper(r)
}
func initname(s string) int {
return bool2int(s == "init")
func initname(s string) bool {
return s == "init"
}
// exportedsym reports whether a symbol will be visible
// to files that import our package.
func exportedsym(sym *Sym) int {
func exportedsym(sym *Sym) bool {
// Builtins are visible everywhere.
if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
return 1
return true
}
return bool2int(sym.Pkg == localpkg && exportname(sym.Name))
return sym.Pkg == localpkg && exportname(sym.Name)
}
func autoexport(n *Node, ctxt int) {
@ -69,10 +69,10 @@ func autoexport(n *Node, ctxt int) {
}
// -A is for cmd/gc/mkbuiltin script, so export everything
if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) != 0 {
if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) {
exportsym(n)
}
if asmhdr != "" && n.Sym.Pkg == localpkg && !(n.Sym.Flags&SymAsm != 0) {
if asmhdr != "" && n.Sym.Pkg == localpkg && n.Sym.Flags&SymAsm == 0 {
n.Sym.Flags |= SymAsm
asmlist = list(asmlist, n)
}
@ -86,7 +86,7 @@ func dumppkg(p *Pkg) {
}
p.Exported = 1
suffix = ""
if !(p.Direct != 0) {
if p.Direct == 0 {
suffix = " // indirect"
}
fmt.Fprintf(bout, "\timport %s \"%v\"%s\n", p.Name, Zconv(p.Path, 0), suffix)
@ -102,7 +102,7 @@ func reexportdeplist(ll *NodeList) {
func reexportdep(n *Node) {
var t *Type
if !(n != nil) {
if n == nil {
return
}
@ -118,14 +118,14 @@ func reexportdep(n *Node) {
}
// nodes for method calls.
if !(n.Type != nil) || n.Type.Thistuple > 0 {
if n.Type == nil || n.Type.Thistuple > 0 {
break
}
fallthrough
// fallthrough
case PEXTERN:
if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
if n.Sym != nil && !exportedsym(n.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport name %v\n", Sconv(n.Sym, 0))
}
@ -141,7 +141,7 @@ func reexportdep(n *Node) {
if Isptr[t.Etype] != 0 {
t = t.Type
}
if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport type %v from declaration\n", Sconv(t.Sym, 0))
}
@ -155,7 +155,7 @@ func reexportdep(n *Node) {
if Isptr[t.Etype] != 0 {
t = t.Type
}
if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport literal type %v\n", Sconv(t.Sym, 0))
}
@ -166,7 +166,7 @@ func reexportdep(n *Node) {
// fallthrough
case OTYPE:
if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
if n.Sym != nil && !exportedsym(n.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport literal/type %v\n", Sconv(n.Sym, 0))
}
@ -192,10 +192,10 @@ func reexportdep(n *Node) {
OMAKECHAN:
t = n.Type
if !(t.Sym != nil) && t.Type != nil {
if t.Sym == nil && t.Type != nil {
t = t.Type
}
if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport type for expression %v\n", Sconv(t.Sym, 0))
}
@ -227,7 +227,7 @@ func dumpexportconst(s *Sym) {
t = n.Type // may or may not be specified
dumpexporttype(t)
if t != nil && !(isideal(t) != 0) {
if t != nil && !isideal(t) {
fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
} else {
fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
@ -329,7 +329,7 @@ func dumpexporttype(t *Type) {
fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
for i = 0; i < n; i++ {
f = m[i]
if f.Nointerface != 0 {
if f.Nointerface {
fmt.Fprintf(bout, "\t//go:nointerface\n")
}
if f.Type.Nname != nil && f.Type.Nname.Inl != nil { // nname was set by caninl
@ -428,7 +428,7 @@ func importsym(s *Sym, op int) *Sym {
// mark the symbol so it is not reexported
if s.Def == nil {
if exportname(s.Name) || initname(s.Name) != 0 {
if exportname(s.Name) || initname(s.Name) {
s.Flags |= SymExport
} else {
s.Flags |= SymPackage // package scope
@ -474,7 +474,7 @@ func importimport(s *Sym, z *Strlit) {
Yyerror("conflicting names %s and %s for package \"%v\"", p.Name, s.Name, Zconv(p.Path, 0))
}
if !(incannedimport != 0) && myimportpath != "" && z.S == myimportpath {
if incannedimport == 0 && myimportpath != "" && z.S == myimportpath {
Yyerror("import \"%v\": package depends on \"%v\" (import cycle)", Zconv(importpkg.Path, 0), Zconv(z, 0))
errorexit()
}

View file

@ -207,15 +207,15 @@ func Jconv(n *Node, flag int) string {
c = flag & obj.FmtShort
if !(c != 0) && n.Ullman != 0 {
if c == 0 && n.Ullman != 0 {
fp += fmt.Sprintf(" u(%d)", n.Ullman)
}
if !(c != 0) && n.Addable != 0 {
if c == 0 && n.Addable != 0 {
fp += fmt.Sprintf(" a(%d)", n.Addable)
}
if !(c != 0) && n.Vargen != 0 {
if c == 0 && n.Vargen != 0 {
fp += fmt.Sprintf(" g(%d)", n.Vargen)
}
@ -223,7 +223,7 @@ func Jconv(n *Node, flag int) string {
fp += fmt.Sprintf(" l(%d)", n.Lineno)
}
if !(c != 0) && n.Xoffset != BADWIDTH {
if c == 0 && n.Xoffset != BADWIDTH {
fp += fmt.Sprintf(" x(%d%+d)", n.Xoffset, n.Stkdelta)
}
@ -261,7 +261,7 @@ func Jconv(n *Node, flag int) string {
fp += fmt.Sprintf(" esc(no)")
case EscNever:
if !(c != 0) {
if c == 0 {
fp += fmt.Sprintf(" esc(N)")
}
@ -273,11 +273,11 @@ func Jconv(n *Node, flag int) string {
fp += fmt.Sprintf(" ld(%d)", n.Escloopdepth)
}
if !(c != 0) && n.Typecheck != 0 {
if c == 0 && n.Typecheck != 0 {
fp += fmt.Sprintf(" tc(%d)", n.Typecheck)
}
if !(c != 0) && n.Dodata != 0 {
if c == 0 && n.Dodata != 0 {
fp += fmt.Sprintf(" dd(%d)", n.Dodata)
}
@ -301,7 +301,7 @@ func Jconv(n *Node, flag int) string {
fp += fmt.Sprintf(" assigned")
}
if !(c != 0) && n.Used != 0 {
if c == 0 && n.Used != 0 {
fp += fmt.Sprintf(" used(%d)", n.Used)
}
return fp
@ -497,7 +497,7 @@ func symfmt(s *Sym, flag int) string {
var p string
if s.Pkg != nil && !(flag&obj.FmtShort != 0 /*untyped*/) {
if s.Pkg != nil && flag&obj.FmtShort == 0 /*untyped*/ {
switch fmtmode {
case FErr: // This is for the user
if s.Pkg == localpkg {
@ -608,7 +608,7 @@ func typefmt(t *Type, flag int) string {
}
// Unless the 'l' flag was specified, if the type has a name, just print that name.
if !(flag&obj.FmtLong != 0 /*untyped*/) && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
if flag&obj.FmtLong == 0 /*untyped*/ && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
switch fmtmode {
case FTypeId:
if flag&obj.FmtShort != 0 /*untyped*/ {
@ -802,7 +802,7 @@ func typefmt(t *Type, flag int) string {
return fp
case TFIELD:
if !(flag&obj.FmtShort != 0 /*untyped*/) {
if flag&obj.FmtShort == 0 /*untyped*/ {
s = t.Sym
// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
@ -822,7 +822,7 @@ func typefmt(t *Type, flag int) string {
}
}
if s != nil && !(t.Embedded != 0) {
if s != nil && t.Embedded == 0 {
if t.Funarg != 0 {
fp += fmt.Sprintf("%v ", Nconv(t.Nname, 0))
} else if flag&obj.FmtLong != 0 /*untyped*/ {
@ -850,7 +850,7 @@ func typefmt(t *Type, flag int) string {
fp += fmt.Sprintf("%v", Tconv(t.Type, 0))
}
if !(flag&obj.FmtShort != 0 /*untyped*/) && t.Note != nil {
if flag&obj.FmtShort == 0 /*untyped*/ && t.Note != nil {
fp += fmt.Sprintf(" \"%v\"", Zconv(t.Note, 0))
}
return fp
@ -882,23 +882,23 @@ func typefmt(t *Type, flag int) string {
}
// Statements which may be rendered with a simplestmt as init.
func stmtwithinit(op int) int {
func stmtwithinit(op int) bool {
switch op {
case OIF,
OFOR,
OSWITCH:
return 1
return true
}
return 0
return false
}
func stmtfmt(n *Node) string {
var f string
var complexinit int
var simpleinit int
var extrablock int
var complexinit bool
var simpleinit bool
var extrablock bool
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
@ -906,19 +906,19 @@ func stmtfmt(n *Node) string {
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
simpleinit = bool2int(n.Ninit != nil && !(n.Ninit.Next != nil) && !(n.Ninit.N.Ninit != nil) && stmtwithinit(int(n.Op)) != 0)
simpleinit = n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
// otherwise, print the inits as separate statements
complexinit = bool2int(n.Ninit != nil && !(simpleinit != 0) && (fmtmode != FErr))
complexinit = n.Ninit != nil && !simpleinit && (fmtmode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
extrablock = bool2int(complexinit != 0 && stmtwithinit(int(n.Op)) != 0)
extrablock = complexinit && stmtwithinit(int(n.Op))
if extrablock != 0 {
if extrablock {
f += "{"
}
if complexinit != 0 {
if complexinit {
f += fmt.Sprintf(" %v; ", Hconv(n.Ninit, 0))
}
@ -951,7 +951,7 @@ func stmtfmt(n *Node) string {
break
}
if n.Colas != 0 && !(complexinit != 0) {
if n.Colas != 0 && !complexinit {
f += fmt.Sprintf("%v := %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
} else {
f += fmt.Sprintf("%v = %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
@ -970,7 +970,7 @@ func stmtfmt(n *Node) string {
f += fmt.Sprintf("%v %v= %v", Nconv(n.Left, 0), Oconv(int(n.Etype), obj.FmtSharp), Nconv(n.Right, 0))
case OAS2:
if n.Colas != 0 && !(complexinit != 0) {
if n.Colas != 0 && !complexinit {
f += fmt.Sprintf("%v := %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
break
}
@ -996,7 +996,7 @@ func stmtfmt(n *Node) string {
f += fmt.Sprintf("defer %v", Nconv(n.Left, 0))
case OIF:
if simpleinit != 0 {
if simpleinit {
f += fmt.Sprintf("if %v; %v { %v }", Nconv(n.Ninit.N, 0), Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
} else {
f += fmt.Sprintf("if %v { %v }", Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
@ -1012,7 +1012,7 @@ func stmtfmt(n *Node) string {
}
f += "for"
if simpleinit != 0 {
if simpleinit {
f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
} else if n.Nincr != nil {
f += " ;"
@ -1024,7 +1024,7 @@ func stmtfmt(n *Node) string {
if n.Nincr != nil {
f += fmt.Sprintf("; %v", Nconv(n.Nincr, 0))
} else if simpleinit != 0 {
} else if simpleinit {
f += ";"
}
@ -1051,7 +1051,7 @@ func stmtfmt(n *Node) string {
}
f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
if simpleinit != 0 {
if simpleinit {
f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
}
if n.Ntest != nil {
@ -1087,7 +1087,7 @@ func stmtfmt(n *Node) string {
}
ret:
if extrablock != 0 {
if extrablock {
f += "}"
}
@ -1211,7 +1211,7 @@ func exprfmt(n *Node, prec int) string {
var f string
var nprec int
var ptrlit int
var ptrlit bool
var l *NodeList
for n != nil && n.Implicit != 0 && (n.Op == OIND || n.Op == OADDR) {
@ -1368,10 +1368,10 @@ func exprfmt(n *Node, prec int) string {
return f
case OCOMPLIT:
ptrlit = bool2int(n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0)
ptrlit = n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0
if fmtmode == FErr {
if n.Right != nil && n.Right.Type != nil && !(n.Implicit != 0) {
if ptrlit != 0 {
if n.Right != nil && n.Right.Type != nil && n.Implicit == 0 {
if ptrlit {
f += fmt.Sprintf("&%v literal", Tconv(n.Right.Type.Type, 0))
return f
} else {
@ -1384,7 +1384,7 @@ func exprfmt(n *Node, prec int) string {
return f
}
if fmtmode == FExp && ptrlit != 0 {
if fmtmode == FExp && ptrlit {
// typecheck has overwritten OIND by OTYPE with pointer type.
f += fmt.Sprintf("(&%v{ %v })", Tconv(n.Right.Type.Type, 0), Hconv(n.List, obj.FmtComma))
return f
@ -1418,7 +1418,7 @@ func exprfmt(n *Node, prec int) string {
}
}
if !(n.Implicit != 0) {
if n.Implicit == 0 {
f += "})"
return f
}
@ -1454,11 +1454,11 @@ func exprfmt(n *Node, prec int) string {
}
}
if !(n.Left != nil) && n.Right != nil {
if n.Left == nil && n.Right != nil {
f += fmt.Sprintf(":%v", Nconv(n.Right, 0))
return f
}
if n.Left != nil && !(n.Right != nil) {
if n.Left != nil && n.Right == nil {
f += fmt.Sprintf("%v:", Nconv(n.Left, 0))
return f
}
@ -1686,15 +1686,15 @@ func indent(s string) string {
func nodedump(n *Node, flag int) string {
var fp string
var recur int
var recur bool
if n == nil {
return fp
}
recur = bool2int(!(flag&obj.FmtShort != 0 /*untyped*/))
recur = flag&obj.FmtShort == 0 /*untyped*/
if recur != 0 {
if recur {
fp = indent(fp)
if dumpdepth > 10 {
fp += "..."
@ -1727,7 +1727,7 @@ func nodedump(n *Node, flag int) string {
} else {
fp += fmt.Sprintf("%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
}
if recur != 0 && n.Type == nil && n.Ntype != nil {
if recur && n.Type == nil && n.Ntype != nil {
fp = indent(fp)
fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
}
@ -1737,7 +1737,7 @@ func nodedump(n *Node, flag int) string {
case OTYPE:
fp += fmt.Sprintf("%v %v%v type=%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0), Tconv(n.Type, 0))
if recur != 0 && n.Type == nil && n.Ntype != nil {
if recur && n.Type == nil && n.Ntype != nil {
fp = indent(fp)
fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
}
@ -1751,7 +1751,7 @@ func nodedump(n *Node, flag int) string {
fp += fmt.Sprintf(" %v", Tconv(n.Type, 0))
}
if recur != 0 {
if recur {
if n.Left != nil {
fp += fmt.Sprintf("%v", Nconv(n.Left, 0))
}

View file

@ -109,7 +109,7 @@ func addrescapes(n *Node) {
// is always a heap pointer anyway.
case ODOT,
OINDEX:
if !(Isslice(n.Left.Type) != 0) {
if !Isslice(n.Left.Type) {
addrescapes(n.Left)
}
}
@ -253,7 +253,6 @@ func cgen_proc(n *Node, proc int) {
switch n.Left.Op {
default:
Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
fallthrough
case OCALLMETH:
Cgen_callmeth(n.Left, proc)
@ -280,7 +279,7 @@ func cgen_dcl(n *Node) {
Fatal("cgen_dcl")
}
if !(n.Class&PHEAP != 0) {
if n.Class&PHEAP == 0 {
return
}
if compiling_runtime != 0 {
@ -304,7 +303,7 @@ func cgen_discard(nr *Node) {
switch nr.Op {
case ONAME:
if !(nr.Class&PHEAP != 0) && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
if nr.Class&PHEAP == 0 && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
gused(nr)
}
@ -480,7 +479,7 @@ func Cgen_slice(n *Node, res *Node) {
tmpcap = tmplen
}
if isnil(n.Left) != 0 {
if isnil(n.Left) {
Tempname(&src, n.Left.Type)
Thearch.Cgen(n.Left, &src)
} else {
@ -491,7 +490,7 @@ func Cgen_slice(n *Node, res *Node) {
}
if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
if !(Isptr[n.Left.Type.Etype] != 0) {
if Isptr[n.Left.Type.Etype] == 0 {
Fatal("slicearr is supposed to work on pointer: %v\n", Nconv(n, obj.FmtSign))
}
Thearch.Cgen(&src, base)
@ -668,13 +667,12 @@ func gen(n *Node) {
var p2 *obj.Prog
var p3 *obj.Prog
var lab *Label
var wasregalloc int32
//dump("gen", n);
lno = setlineno(n)
wasregalloc = int32(Thearch.Anyregalloc())
wasregalloc := Thearch.Anyregalloc()
if n == nil {
goto ret
@ -879,7 +877,7 @@ func gen(n *Node) {
cgen_dcl(n.Left)
case OAS:
if gen_as_init(n) != 0 {
if gen_as_init(n) {
break
}
Cgen_as(n.Left, n.Right)
@ -911,7 +909,7 @@ func gen(n *Node) {
}
ret:
if int32(Thearch.Anyregalloc()) != wasregalloc {
if Thearch.Anyregalloc() != wasregalloc {
Dump("node", n)
Fatal("registers left allocated")
}
@ -936,7 +934,7 @@ func Cgen_as(nl *Node, nr *Node) {
return
}
if nr == nil || iszero(nr) != 0 {
if nr == nil || iszero(nr) {
// heaps should already be clear
if nr == nil && (nl.Class&PHEAP != 0) {
return
@ -946,7 +944,7 @@ func Cgen_as(nl *Node, nr *Node) {
if tl == nil {
return
}
if Isfat(tl) != 0 {
if Isfat(tl) {
if nl.Op == ONAME {
Gvardef(nl)
}
@ -1002,7 +1000,7 @@ func checklabels() {
continue
}
if lab.Use == nil && !(lab.Used != 0) {
if lab.Use == nil && lab.Used == 0 {
yyerrorl(int(lab.Def.Lineno), "label %v defined and not used", Sconv(lab.Sym, 0))
continue
}

View file

@ -7,7 +7,6 @@ package gc
import (
"bytes"
"cmd/internal/obj"
"encoding/binary"
)
// Copyright 2009 The Go Authors. All rights reserved.
@ -162,7 +161,7 @@ type Node struct {
Addable uint8
Trecur uint8
Etype uint8
Bounded uint8
Bounded bool
Class uint8
Method uint8
Embedded uint8
@ -191,7 +190,7 @@ type Node struct {
Likely int8
Hasbreak uint8
Needzero uint8
Needctxt uint8
Needctxt bool
Esc uint
Funcdepth int
Type *Type
@ -245,7 +244,7 @@ type NodeList struct {
type Type struct {
Etype uint8
Nointerface uint8
Nointerface bool
Noalg uint8
Chan uint8
Trecur uint8
@ -918,6 +917,10 @@ var nblank *Node
var Use_sse int
var hunk string
var nhunk int32
var thunk int32
var Funcdepth int
@ -1119,7 +1122,6 @@ const (
)
type Arch struct {
ByteOrder binary.ByteOrder
Thechar int
Thestring string
Thelinkarch *obj.LinkArch
@ -1127,7 +1129,7 @@ type Arch struct {
REGSP int
REGCTXT int
MAXWIDTH int64
Anyregalloc func() int
Anyregalloc func() bool
Betypeinit func()
Bgen func(*Node, bool, int, *obj.Prog)
Cgen func(*Node, *Node)
@ -1148,10 +1150,10 @@ type Arch struct {
Proginfo func(*ProgInfo, *obj.Prog)
Regalloc func(*Node, *Type, *Node)
Regfree func(*Node)
Regtyp func(*obj.Addr) int
Sameaddr func(*obj.Addr, *obj.Addr) int
Smallindir func(*obj.Addr, *obj.Addr) int
Stackaddr func(*obj.Addr) int
Regtyp func(*obj.Addr) bool
Sameaddr func(*obj.Addr, *obj.Addr) bool
Smallindir func(*obj.Addr, *obj.Addr) bool
Stackaddr func(*obj.Addr) bool
Excludedregs func() uint64
RtoB func(int) uint64
FtoB func(int) uint64

View file

@ -41,7 +41,7 @@ var dpc *obj.Prog
/*
* Is this node a memory operand?
*/
func Ismem(n *Node) int {
func Ismem(n *Node) bool {
switch n.Op {
case OITAB,
OSPTR,
@ -51,29 +51,29 @@ func Ismem(n *Node) int {
ONAME,
OPARAM,
OCLOSUREVAR:
return 1
return true
case OADDR:
return bool2int(Thearch.Thechar == '6' || Thearch.Thechar == '9') // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
}
return 0
return false
}
func Samereg(a *Node, b *Node) int {
func Samereg(a *Node, b *Node) bool {
if a == nil || b == nil {
return 0
return false
}
if a.Op != OREGISTER {
return 0
return false
}
if b.Op != OREGISTER {
return 0
return false
}
if a.Val.U.Reg != b.Val.U.Reg {
return 0
return false
}
return 1
return true
}
/*
@ -174,15 +174,15 @@ func fixautoused(p *obj.Prog) {
for lp = &p; ; {
p = *lp
if !(p != nil) {
if p == nil {
break
}
if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !(((p.From.Node).(*Node)).Used != 0) {
if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && ((p.From.Node).(*Node)).Used == 0 {
*lp = p.Link
continue
}
if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && !(((p.To.Node).(*Node)).Used != 0) {
if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && ((p.To.Node).(*Node)).Used == 0 {
// Cannot remove VARDEF instruction, because - unlike TYPE handled above -
// VARDEFs are interspersed with other code, and a jump might be using the
// VARDEF as a target. Replace with a no-op instead. A later pass will remove
@ -256,18 +256,18 @@ func gused(n *Node) {
Thearch.Gins(obj.ANOP, n, nil) // used
}
func Isfat(t *Type) int {
func Isfat(t *Type) bool {
if t != nil {
switch t.Etype {
case TSTRUCT,
TARRAY,
TSTRING,
TINTER: // maybe remove later
return 1
return true
}
}
return 0
return false
}
func markautoused(p *obj.Prog) {
@ -289,7 +289,7 @@ func markautoused(p *obj.Prog) {
func Naddr(n *Node, a *obj.Addr, canemitcode int) {
var s *Sym
*a = obj.Zprog.From
*a = obj.Addr{}
if n == nil {
return
}
@ -343,7 +343,7 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
a.Node = n.Left.Orig
case OCLOSUREVAR:
if !(Curfn.Needctxt != 0) {
if !Curfn.Needctxt {
Fatal("closurevar without needctxt")
}
a.Type = obj.TYPE_MEM
@ -383,7 +383,6 @@ func Naddr(n *Node, a *obj.Addr, canemitcode int) {
switch n.Class {
default:
Fatal("naddr: ONAME class %v %d\n", Sconv(n.Sym, 0), n.Class)
fallthrough
case PEXTERN:
a.Name = obj.NAME_EXTERN

View file

@ -53,7 +53,7 @@ func renameinit() *Sym {
* return (11)
* }
*/
func anyinit(n *NodeList) int {
func anyinit(n *NodeList) bool {
var h uint32
var s *Sym
var l *NodeList
@ -68,20 +68,20 @@ func anyinit(n *NodeList) int {
break
case OAS:
if isblank(l.N.Left) && candiscard(l.N.Right) != 0 {
if isblank(l.N.Left) && candiscard(l.N.Right) {
break
}
fallthrough
// fall through
default:
return 1
return true
}
}
// is this main
if localpkg.Name == "main" {
return 1
return true
}
// is there an explicit init function
@ -89,7 +89,7 @@ func anyinit(n *NodeList) int {
s = Lookup(namebuf)
if s.Def != nil {
return 1
return true
}
// are there any imported init functions
@ -101,12 +101,12 @@ func anyinit(n *NodeList) int {
if s.Def == nil {
continue
}
return 1
return true
}
}
// then none
return 0
return false
}
func fninit(n *NodeList) {
@ -126,7 +126,7 @@ func fninit(n *NodeList) {
}
n = initfix(n)
if !(anyinit(n) != 0) {
if !anyinit(n) {
return
}

View file

@ -56,7 +56,7 @@ func fnpkg(fn *Node) *Pkg {
if Isptr[rcvr.Etype] != 0 {
rcvr = rcvr.Type
}
if !(rcvr.Sym != nil) {
if rcvr.Sym == nil {
Fatal("receiver with no sym: [%v] %v (%v)", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Tconv(rcvr, 0))
}
return rcvr.Sym.Pkg
@ -114,7 +114,7 @@ func caninl(fn *Node) {
if fn.Op != ODCLFUNC {
Fatal("caninl %v", Nconv(fn, 0))
}
if !(fn.Nname != nil) {
if fn.Nname == nil {
Fatal("caninl no nname %v", Nconv(fn, obj.FmtSign))
}
@ -137,7 +137,7 @@ func caninl(fn *Node) {
}
budget = 40 // allowed hairyness
if ishairylist(fn.Nbody, &budget) != 0 {
if ishairylist(fn.Nbody, &budget) {
return
}
@ -162,18 +162,18 @@ func caninl(fn *Node) {
}
// Look for anything we want to punt on.
func ishairylist(ll *NodeList, budget *int) int {
func ishairylist(ll *NodeList, budget *int) bool {
for ; ll != nil; ll = ll.Next {
if ishairy(ll.N, budget) != 0 {
return 1
if ishairy(ll.N, budget) {
return true
}
}
return 0
return false
}
func ishairy(n *Node, budget *int) int {
if !(n != nil) {
return 0
func ishairy(n *Node, budget *int) bool {
if n == nil {
return false
}
// Things that are too hairy, irrespective of the budget
@ -185,7 +185,7 @@ func ishairy(n *Node, budget *int) int {
OPANIC,
ORECOVER:
if Debug['l'] < 4 {
return 1
return true
}
case OCLOSURE,
@ -199,12 +199,12 @@ func ishairy(n *Node, budget *int) int {
ODCLTYPE, // can't print yet
ODCLCONST, // can't print yet
ORETJMP:
return 1
return true
}
(*budget)--
return bool2int(*budget < 0 || ishairy(n.Left, budget) != 0 || ishairy(n.Right, budget) != 0 || ishairylist(n.List, budget) != 0 || ishairylist(n.Rlist, budget) != 0 || ishairylist(n.Ninit, budget) != 0 || ishairy(n.Ntest, budget) != 0 || ishairy(n.Nincr, budget) != 0 || ishairylist(n.Nbody, budget) != 0 || ishairylist(n.Nelse, budget) != 0)
return *budget < 0 || ishairy(n.Left, budget) || ishairy(n.Right, budget) || ishairylist(n.List, budget) || ishairylist(n.Rlist, budget) || ishairylist(n.Ninit, budget) || ishairy(n.Ntest, budget) || ishairy(n.Nincr, budget) || ishairylist(n.Nbody, budget) || ishairylist(n.Nelse, budget)
}
// Inlcopy and inlcopylist recursively copy the body of a function.
@ -506,7 +506,7 @@ func mkinlcall(np **Node, fn *Node, isddd int) {
func tinlvar(t *Type) *Node {
if t.Nname != nil && !isblank(t.Nname) {
if !(t.Nname.Inlvar != nil) {
if t.Nname.Inlvar == nil {
Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
}
return t.Nname.Inlvar
@ -524,7 +524,7 @@ var inlgen int
// parameters.
func mkinlcall1(np **Node, fn *Node, isddd int) {
var i int
var chkargcount int
var chkargcount bool
var n *Node
var call *Node
var saveinlfn *Node
@ -535,7 +535,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
var ninit *NodeList
var body *NodeList
var t *Type
var variadic int
var variadic bool
var varargcount int
var multiret int
var vararg *Node
@ -623,10 +623,10 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
// method call with a receiver.
t = getthisx(fn.Type).Type
if t != nil && t.Nname != nil && !isblank(t.Nname) && !(t.Nname.Inlvar != nil) {
if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
}
if !(n.Left.Left != nil) {
if n.Left.Left == nil {
Fatal("method call without receiver: %v", Nconv(n, obj.FmtSign))
}
if t == nil {
@ -640,26 +640,26 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
}
// check if inlined function is variadic.
variadic = 0
variadic = false
varargtype = nil
varargcount = 0
for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
if t.Isddd != 0 {
variadic = 1
variadic = true
varargtype = t.Type
}
}
// but if argument is dotted too forget about variadicity.
if variadic != 0 && isddd != 0 {
variadic = 0
if variadic && isddd != 0 {
variadic = false
}
// check if argument is actually a returned tuple from call.
multiret = 0
if n.List != nil && !(n.List.Next != nil) {
if n.List != nil && n.List.Next == nil {
switch n.List.N.Op {
case OCALL,
OCALLFUNC,
@ -671,7 +671,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
}
}
if variadic != 0 {
if variadic {
varargcount = count(n.List) + multiret
if n.Left.Op != ODOTMETH {
varargcount -= fn.Type.Thistuple
@ -688,14 +688,14 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
// TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
// non-method call to method
if !(n.List != nil) {
if n.List == nil {
Fatal("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
}
// append receiver inlvar to LHS.
t = getthisx(fn.Type).Type
if t != nil && t.Nname != nil && !isblank(t.Nname) && !(t.Nname.Inlvar != nil) {
if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
}
if t == nil {
@ -706,14 +706,14 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
}
// append ordinary arguments to LHS.
chkargcount = bool2int(n.List != nil && n.List.Next != nil)
chkargcount = n.List != nil && n.List.Next != nil
vararg = nil // the slice argument to a variadic call
varargs = nil // the list of LHS names to put in vararg.
if !(chkargcount != 0) {
if !chkargcount {
// 0 or 1 expression on RHS.
for t = getinargx(fn.Type).Type; t != nil; t = t.Down {
if variadic != 0 && t.Isddd != 0 {
if variadic && t.Isddd != 0 {
vararg = tinlvar(t)
for i = 0; i < varargcount && ll != nil; i++ {
m = argvar(varargtype, i)
@ -729,10 +729,10 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
} else {
// match arguments except final variadic (unless the call is dotted itself)
for t = getinargx(fn.Type).Type; t != nil; {
if !(ll != nil) {
if ll == nil {
break
}
if variadic != 0 && t.Isddd != 0 {
if variadic && t.Isddd != 0 {
break
}
as.List = list(as.List, tinlvar(t))
@ -741,7 +741,7 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
}
// match varargcount arguments with variadic parameters.
if variadic != 0 && t != nil && t.Isddd != 0 {
if variadic && t != nil && t.Isddd != 0 {
vararg = tinlvar(t)
for i = 0; i < varargcount && ll != nil; i++ {
m = argvar(varargtype, i)
@ -766,9 +766,9 @@ func mkinlcall1(np **Node, fn *Node, isddd int) {
}
// turn the variadic args into a slice.
if variadic != 0 {
if variadic {
as = Nod(OAS, vararg, nil)
if !(varargcount != 0) {
if varargcount == 0 {
as.Right = nodnil()
as.Right.Type = varargtype
} else {
@ -1019,7 +1019,7 @@ func setlnolist(ll *NodeList, lno int) {
}
func setlno(n *Node, lno int) {
if !(n != nil) {
if n == nil {
return
}

View file

@ -504,20 +504,20 @@ func arsize(b *obj.Biobuf, name string) int {
return i
}
func skiptopkgdef(b *obj.Biobuf) int {
func skiptopkgdef(b *obj.Biobuf) bool {
var p string
var sz int
/* archive header */
p = obj.Brdline(b, '\n')
if p == "" {
return 0
return false
}
if obj.Blinelen(b) != 8 {
return 0
return false
}
if p != "!<arch>\n" {
return 0
return false
}
/* symbol table may be first; skip it */
@ -533,9 +533,9 @@ func skiptopkgdef(b *obj.Biobuf) int {
sz = arsize(b, "__.PKGDEF")
if sz <= 0 {
return 0
return false
}
return 1
return true
}
func addidir(dir string) {
@ -560,7 +560,7 @@ func islocalname(name *Strlit) bool {
strings.HasPrefix(name.S, "../") || name.S == ".."
}
func findpkg(name *Strlit) int {
func findpkg(name *Strlit) bool {
var p *Idir
var q string
var suffix string
@ -568,7 +568,7 @@ func findpkg(name *Strlit) int {
if islocalname(name) {
if safemode != 0 || nolocalimports != 0 {
return 0
return false
}
// try .a before .6. important for building libraries:
@ -577,13 +577,13 @@ func findpkg(name *Strlit) int {
namebuf = fmt.Sprintf("%v.a", Zconv(name, 0))
if obj.Access(namebuf, 0) >= 0 {
return 1
return true
}
namebuf = fmt.Sprintf("%v.%c", Zconv(name, 0), Thearch.Thechar)
if obj.Access(namebuf, 0) >= 0 {
return 1
return true
}
return 0
return false
}
// local imports should be canonicalized already.
@ -592,17 +592,17 @@ func findpkg(name *Strlit) int {
_ = q
if path.Clean(name.S) != name.S {
Yyerror("non-canonical import path %v (should be %s)", Zconv(name, 0), q)
return 0
return false
}
for p = idirs; p != nil; p = p.link {
namebuf = fmt.Sprintf("%s/%v.a", p.dir, Zconv(name, 0))
if obj.Access(namebuf, 0) >= 0 {
return 1
return true
}
namebuf = fmt.Sprintf("%s/%v.%c", p.dir, Zconv(name, 0), Thearch.Thechar)
if obj.Access(namebuf, 0) >= 0 {
return 1
return true
}
}
@ -619,15 +619,15 @@ func findpkg(name *Strlit) int {
namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.a", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0))
if obj.Access(namebuf, 0) >= 0 {
return 1
return true
}
namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.%c", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0), Thearch.Thechar)
if obj.Access(namebuf, 0) >= 0 {
return 1
return true
}
}
return 0
return false
}
func fakeimport() {
@ -714,7 +714,7 @@ func importfile(f *Val, line int) {
}
}
if !(findpkg(path_) != 0) {
if !findpkg(path_) {
Yyerror("can't find import: \"%v\"", Zconv(f.U.Sval, 0))
errorexit()
}
@ -748,7 +748,7 @@ func importfile(f *Val, line int) {
n = len(namebuf)
if n > 2 && namebuf[n-2] == '.' && namebuf[n-1] == 'a' {
if !(skiptopkgdef(imp) != 0) {
if !skiptopkgdef(imp) {
Yyerror("import %s: not a package file", file)
errorexit()
}
@ -946,7 +946,7 @@ l0:
for {
if escchar('"', &escflag, &v) != 0 {
if escchar('"', &escflag, &v) {
break
}
if v < utf8.RuneSelf || escflag != 0 {
@ -988,12 +988,12 @@ l0:
/* '.' */
case '\'':
if escchar('\'', &escflag, &v) != 0 {
if escchar('\'', &escflag, &v) {
Yyerror("empty character literal or unescaped ' in character literal")
v = '\''
}
if !(escchar('\'', &escflag, &v) != 0) {
if !escchar('\'', &escflag, &v) {
Yyerror("missing '")
ungetc(int(v))
}
@ -1629,7 +1629,7 @@ go_:
}
if verb == "go:linkname" {
if !(imported_unsafe != 0) {
if imported_unsafe == 0 {
Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
}
f := strings.Fields(cmd)
@ -1658,7 +1658,7 @@ go_:
}
if verb == "go:nowritebarrier" {
if !(compiling_runtime != 0) {
if compiling_runtime == 0 {
Yyerror("//go:nowritebarrier only allowed in runtime")
}
nowritebarrier = true
@ -1961,7 +1961,7 @@ func getr() int32 {
}
}
func escchar(e int, escflg *int, val *int64) int {
func escchar(e int, escflg *int, val *int64) bool {
var i int
var u int
var c int
@ -1973,21 +1973,21 @@ func escchar(e int, escflg *int, val *int64) int {
switch c {
case EOF:
Yyerror("eof in string")
return 1
return true
case '\n':
Yyerror("newline in string")
return 1
return true
case '\\':
break
default:
if c == e {
return 1
return true
}
*val = int64(c)
return 0
return false
}
u = 0
@ -2043,7 +2043,7 @@ func escchar(e int, escflg *int, val *int64) int {
}
*val = int64(c)
return 0
return false
hex:
l = 0
@ -2075,7 +2075,7 @@ hex:
}
*val = l
return 0
return false
oct:
l = int64(c) - '0'
@ -2095,7 +2095,7 @@ oct:
}
*val = l
return 0
return false
}
var syms = []struct {
@ -2530,12 +2530,12 @@ func lexinit() {
idealbool = typ(TBOOL)
s = Pkglookup("true", builtinpkg)
s.Def = Nodbool(1)
s.Def = Nodbool(true)
s.Def.Sym = Lookup("true")
s.Def.Type = idealbool
s = Pkglookup("false", builtinpkg)
s.Def = Nodbool(0)
s.Def = Nodbool(false)
s.Def.Sym = Lookup("false")
s.Def.Type = idealbool
@ -2704,14 +2704,14 @@ func lexfini() {
s = Lookup("true")
if s.Def == nil {
s.Def = Nodbool(1)
s.Def = Nodbool(true)
s.Def.Sym = s
s.Origpkg = builtinpkg
}
s = Lookup("false")
if s.Def == nil {
s.Def = Nodbool(0)
s.Def = Nodbool(false)
s.Def.Sym = s
s.Origpkg = builtinpkg
}
@ -3163,7 +3163,7 @@ func mkpackage(pkgname string) {
// leave s->block set to cause redeclaration
// errors if a conflicting top-level name is
// introduced by a different file.
if !(s.Def.Used != 0) && !(nsyntaxerrors != 0) {
if s.Def.Used == 0 && nsyntaxerrors == 0 {
pkgnotused(int(s.Def.Lineno), s.Def.Pkg.Path, s.Name)
}
s.Def = nil
@ -3173,7 +3173,7 @@ func mkpackage(pkgname string) {
if s.Def.Sym != s {
// throw away top-level name left over
// from previous import . "x"
if s.Def.Pack != nil && !(s.Def.Pack.Used != 0) && !(nsyntaxerrors != 0) {
if s.Def.Pack != nil && s.Def.Pack.Used == 0 && nsyntaxerrors == 0 {
pkgnotused(int(s.Def.Pack.Lineno), s.Def.Pack.Pkg.Path, "")
s.Def.Pack.Used = 1
}

View file

@ -44,7 +44,7 @@ func mplsh(a *Mpint, quiet int) {
}
a.Ovf = uint8(c)
if a.Ovf != 0 && !(quiet != 0) {
if a.Ovf != 0 && quiet == 0 {
Yyerror("constant shift overflow")
}
}
@ -59,7 +59,7 @@ func mplshw(a *Mpint, quiet int) {
i = Mpprec - 1
if a.A[i] != 0 {
a.Ovf = 1
if !(quiet != 0) {
if quiet == 0 {
Yyerror("constant shift overflow")
}
}
@ -223,7 +223,7 @@ func mpaddfixfix(a *Mpint, b *Mpint, quiet int) {
}
a.Ovf = uint8(c)
if a.Ovf != 0 && !(quiet != 0) {
if a.Ovf != 0 && quiet == 0 {
Yyerror("constant addition overflow")
}
@ -663,15 +663,15 @@ func mpdivmodfixfix(q *Mpint, r *Mpint, n *Mpint, d *Mpint) {
q.Neg = uint8(ns ^ ds)
}
func mpiszero(a *Mpint) int {
func mpiszero(a *Mpint) bool {
var i int
for i = Mpprec - 1; i >= 0; i-- {
if a.A[i] != 0 {
return 0
return false
}
}
return 1
return true
}
func mpdivfract(a *Mpint, b *Mpint) {
@ -694,7 +694,7 @@ func mpdivfract(a *Mpint, b *Mpint) {
for j = 0; j < Mpscale; j++ {
x <<= 1
if mpcmp(&d, &n) <= 0 {
if !(mpiszero(&d) != 0) {
if !mpiszero(&d) {
x |= 1
}
mpsubfixfix(&n, &d)

View file

@ -62,13 +62,13 @@ func order(fn *Node) {
// Ordertemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, ordertemp emits code to zero the temporary.
func ordertemp(t *Type, order *Order, clear int) *Node {
func ordertemp(t *Type, order *Order, clear bool) *Node {
var var_ *Node
var a *Node
var l *NodeList
var_ = temp(t)
if clear != 0 {
if clear {
a = Nod(OAS, var_, nil)
typecheck(&a, Etop)
order.out = list(order.out, a)
@ -101,7 +101,7 @@ func ordercopyexpr(n *Node, t *Type, order *Order, clear int) *Node {
var a *Node
var var_ *Node
var_ = ordertemp(t, order, clear)
var_ = ordertemp(t, order, clear != 0)
a = Nod(OAS, var_, n)
typecheck(&a, Etop)
order.out = list(order.out, a)
@ -135,10 +135,6 @@ func ordersafeexpr(n *Node, order *Order) *Node {
var a *Node
switch n.Op {
default:
Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
fallthrough
case ONAME,
OLITERAL:
return n
@ -170,7 +166,7 @@ func ordersafeexpr(n *Node, order *Order) *Node {
case OINDEX,
OINDEXMAP:
if Isfixedarray(n.Left.Type) != 0 {
if Isfixedarray(n.Left.Type) {
l = ordersafeexpr(n.Left, order)
} else {
l = ordercheapexpr(n.Left, order)
@ -187,14 +183,17 @@ func ordersafeexpr(n *Node, order *Order) *Node {
typecheck(&a, Erv)
return a
}
Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
return nil // not reached
}
// Istemp reports whether n is a temporary variable.
func istemp(n *Node) int {
func istemp(n *Node) bool {
if n.Op != ONAME {
return 0
return false
}
return bool2int(strings.HasPrefix(n.Sym.Name, "autotmp_"))
return strings.HasPrefix(n.Sym.Name, "autotmp_")
}
// Isaddrokay reports whether it is okay to pass n's address to runtime routines.
@ -203,8 +202,8 @@ func istemp(n *Node) int {
// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
func isaddrokay(n *Node) int {
return bool2int(islvalue(n) != 0 && (n.Op != ONAME || n.Class == PEXTERN || istemp(n) != 0))
func isaddrokay(n *Node) bool {
return islvalue(n) && (n.Op != ONAME || n.Class == PEXTERN || istemp(n))
}
// Orderaddrtemp ensures that *np is okay to pass by address to runtime routines.
@ -214,7 +213,7 @@ func orderaddrtemp(np **Node, order *Order) {
var n *Node
n = *np
if isaddrokay(n) != 0 {
if isaddrokay(n) {
return
}
*np = ordercopyexpr(n, n.Type, order, 0)
@ -232,7 +231,7 @@ func poptemp(mark *NodeList, order *Order) {
for {
l = order.temp
if !(l != mark) {
if l == mark {
break
}
order.temp = l.Next
@ -330,19 +329,19 @@ func orderinit(n *Node, order *Order) {
// Ismulticall reports whether the list l is f() for a multi-value function.
// Such an f() could appear as the lone argument to a multi-arg function.
func ismulticall(l *NodeList) int {
func ismulticall(l *NodeList) bool {
var n *Node
// one arg only
if l == nil || l.Next != nil {
return 0
return false
}
n = l.N
// must be call
switch n.Op {
default:
return 0
return false
case OCALLFUNC,
OCALLMETH,
@ -351,7 +350,7 @@ func ismulticall(l *NodeList) int {
}
// call must return multiple values
return bool2int(n.Left.Type.Outtuple > 1)
return n.Left.Type.Outtuple > 1
}
// Copyret emits t1, t2, ... = n, where n is a function call,
@ -364,7 +363,7 @@ func copyret(n *Node, order *Order) *NodeList {
var l2 *NodeList
var tl Iter
if n.Type.Etype != TSTRUCT || !(n.Type.Funarg != 0) {
if n.Type.Etype != TSTRUCT || n.Type.Funarg == 0 {
Fatal("copyret %v %d", Tconv(n.Type, 0), n.Left.Type.Outtuple)
}
@ -387,7 +386,7 @@ func copyret(n *Node, order *Order) *NodeList {
// Ordercallargs orders the list of call arguments *l.
func ordercallargs(l **NodeList, order *Order) {
if ismulticall(*l) != 0 {
if ismulticall(*l) {
// return f() where f() is multiple values.
*l = copyret((*l).N, order)
} else {
@ -435,15 +434,14 @@ func ordermapassign(n *Node, order *Order) {
switch n.Op {
default:
Fatal("ordermapassign %v", Oconv(int(n.Op), 0))
fallthrough
case OAS:
order.out = list(order.out, n)
// We call writebarrierfat only for values > 4 pointers long. See walk.c.
if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) != 0 && n.Left.Type.Width > int64(4*Widthptr))) && !(isaddrokay(n.Right) != 0) {
if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr))) && !isaddrokay(n.Right) {
m = n.Left
n.Left = ordertemp(m.Type, order, 0)
n.Left = ordertemp(m.Type, order, false)
a = Nod(OAS, m, n.Left)
typecheck(&a, Etop)
order.out = list(order.out, a)
@ -457,13 +455,13 @@ func ordermapassign(n *Node, order *Order) {
for l = n.List; l != nil; l = l.Next {
if l.N.Op == OINDEXMAP {
m = l.N
if !(istemp(m.Left) != 0) {
if !istemp(m.Left) {
m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
}
if !(istemp(m.Right) != 0) {
if !istemp(m.Right) {
m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
}
l.N = ordertemp(m.Type, order, 0)
l.N = ordertemp(m.Type, order, false)
a = Nod(OAS, m, l.N)
typecheck(&a, Etop)
post = list(post, a)
@ -501,7 +499,6 @@ func orderstmt(n *Node, order *Order) {
switch n.Op {
default:
Fatal("orderstmt %v", Oconv(int(n.Op), 0))
fallthrough
case OVARKILL:
order.out = list(order.out, n)
@ -593,7 +590,7 @@ func orderstmt(n *Node, order *Order) {
order.out = list(order.out, n)
} else {
typ = n.Rlist.N.Type
tmp1 = ordertemp(typ, order, bool2int(haspointers(typ)))
tmp1 = ordertemp(typ, order, haspointers(typ))
order.out = list(order.out, n)
r = Nod(OAS, n.List.N, tmp1)
typecheck(&r, Etop)
@ -611,11 +608,11 @@ func orderstmt(n *Node, order *Order) {
orderexprlist(n.List, order)
orderexpr(&n.Rlist.N.Left, order) // arg to recv
ch = n.Rlist.N.Left.Type
tmp1 = ordertemp(ch.Type, order, bool2int(haspointers(ch.Type)))
tmp1 = ordertemp(ch.Type, order, haspointers(ch.Type))
if !isblank(n.List.Next.N) {
tmp2 = ordertemp(n.List.Next.N.Type, order, 0)
tmp2 = ordertemp(n.List.Next.N.Type, order, false)
} else {
tmp2 = ordertemp(Types[TBOOL], order, 0)
tmp2 = ordertemp(Types[TBOOL], order, false)
}
order.out = list(order.out, n)
r = Nod(OAS, n.List.N, tmp1)
@ -724,7 +721,7 @@ func orderstmt(n *Node, order *Order) {
t = marktemp(order)
orderexpr(&n.Left, order)
if !(Isinter(n.Left.Type) != 0) {
if !Isinter(n.Left.Type) {
orderaddrtemp(&n.Left, order)
}
order.out = list(order.out, n)
@ -745,7 +742,6 @@ func orderstmt(n *Node, order *Order) {
switch n.Type.Etype {
default:
Fatal("orderstmt range %v", Tconv(n.Type, 0))
fallthrough
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
@ -784,7 +780,7 @@ func orderstmt(n *Node, order *Order) {
n.Right = ordercopyexpr(r, r.Type, order, 0)
// n->alloc is the temp for the iterator.
n.Alloc = ordertemp(Types[TUINT8], order, 1)
n.Alloc = ordertemp(Types[TUINT8], order, true)
}
for l = n.List; l != nil; l = l.Next {
@ -884,7 +880,7 @@ func orderstmt(n *Node, order *Order) {
l.N.Ninit = list(l.N.Ninit, tmp2)
}
r.Left = ordertemp(r.Right.Left.Type.Type, order, bool2int(haspointers(r.Right.Left.Type.Type)))
r.Left = ordertemp(r.Right.Left.Type.Type, order, haspointers(r.Right.Left.Type.Type))
tmp2 = Nod(OAS, tmp1, r.Left)
typecheck(&tmp2, Etop)
l.N.Ninit = list(l.N.Ninit, tmp2)
@ -901,7 +897,7 @@ func orderstmt(n *Node, order *Order) {
l.N.Ninit = list(l.N.Ninit, tmp2)
}
r.Ntest = ordertemp(tmp1.Type, order, 0)
r.Ntest = ordertemp(tmp1.Type, order, false)
tmp2 = Nod(OAS, tmp1, r.Ntest)
typecheck(&tmp2, Etop)
l.N.Ninit = list(l.N.Ninit, tmp2)
@ -919,11 +915,11 @@ func orderstmt(n *Node, order *Order) {
// r->left is c, r->right is x, both are always evaluated.
orderexpr(&r.Left, order)
if !(istemp(r.Left) != 0) {
if !istemp(r.Left) {
r.Left = ordercopyexpr(r.Left, r.Left.Type, order, 0)
}
orderexpr(&r.Right, order)
if !(istemp(r.Right) != 0) {
if !istemp(r.Right) {
r.Right = ordercopyexpr(r.Right, r.Right.Type, order, 0)
}
}
@ -1003,8 +999,8 @@ func orderexpr(np **Node, order *Order) {
var l *NodeList
var t *Type
var lno int
var haslit int
var hasbyte int
var haslit bool
var hasbyte bool
n = *np
if n == nil {
@ -1031,7 +1027,7 @@ func orderexpr(np **Node, order *Order) {
t = typ(TARRAY)
t.Bound = int64(count(n.List))
t.Type = Types[TSTRING]
n.Alloc = ordertemp(t, order, 0)
n.Alloc = ordertemp(t, order, false)
}
// Mark string(byteSlice) arguments to reuse byteSlice backing
@ -1041,15 +1037,15 @@ func orderexpr(np **Node, order *Order) {
// Otherwise if all other arguments are empty strings,
// concatstrings will return the reference to the temp string
// to the caller.
hasbyte = 0
hasbyte = false
haslit = 0
haslit = false
for l = n.List; l != nil; l = l.Next {
hasbyte |= bool2int(l.N.Op == OARRAYBYTESTR)
haslit |= bool2int(l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0)
hasbyte = hasbyte || l.N.Op == OARRAYBYTESTR
haslit = haslit || l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0
}
if haslit != 0 && hasbyte != 0 {
if haslit && hasbyte {
for l = n.List; l != nil; l = l.Next {
if l.N.Op == OARRAYBYTESTR {
l.N.Op = OARRAYBYTESTRTMP
@ -1103,7 +1099,7 @@ func orderexpr(np **Node, order *Order) {
case OCONVIFACE:
orderexpr(&n.Left, order)
if !(Isinter(n.Left.Type) != 0) {
if !Isinter(n.Left.Type) {
orderaddrtemp(&n.Left, order)
}
@ -1141,7 +1137,7 @@ func orderexpr(np **Node, order *Order) {
case OCLOSURE:
if n.Noescape && n.Cvars != nil {
n.Alloc = ordertemp(Types[TUINT8], order, 0) // walk will fill in correct type
n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
}
case OARRAYLIT,
@ -1151,7 +1147,7 @@ func orderexpr(np **Node, order *Order) {
orderexprlist(n.List, order)
orderexprlist(n.Rlist, order)
if n.Noescape {
n.Alloc = ordertemp(Types[TUINT8], order, 0) // walk will fill in correct type
n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
}
case ODDDARG:
@ -1160,7 +1156,7 @@ func orderexpr(np **Node, order *Order) {
// Allocate a temporary that will be cleaned up when this statement
// completes. We could be more aggressive and try to arrange for it
// to be cleaned up when the call completes.
n.Alloc = ordertemp(n.Type.Type, order, 0)
n.Alloc = ordertemp(n.Type.Type, order, false)
}
case ORECV,
@ -1173,7 +1169,7 @@ func orderexpr(np **Node, order *Order) {
orderexpr(&n.Left, order)
orderexpr(&n.Right, order)
t = n.Left.Type
if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
if t.Etype == TSTRUCT || Isfixedarray(t) {
// for complex comparisons, we need both args to be
// addressable so we can pass them to the runtime.
orderaddrtemp(&n.Left, order)

View file

@ -272,7 +272,7 @@ func allocauto(ptxt *obj.Prog) {
ll = Curfn.Dcl
n = ll.N
if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
// No locals used at all
Curfn.Dcl = nil
@ -282,7 +282,7 @@ func allocauto(ptxt *obj.Prog) {
for ll = Curfn.Dcl; ll.Next != nil; ll = ll.Next {
n = ll.Next.N
if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
ll.Next = nil
Curfn.Dcl.End = ll
break
@ -360,12 +360,12 @@ func Cgen_checknil(n *Node) {
}
// Ideally we wouldn't see any integer types here, but we do.
if n.Type == nil || (!(Isptr[n.Type.Etype] != 0) && !(Isint[n.Type.Etype] != 0) && n.Type.Etype != TUNSAFEPTR) {
if n.Type == nil || (Isptr[n.Type.Etype] == 0 && Isint[n.Type.Etype] == 0 && n.Type.Etype != TUNSAFEPTR) {
Dump("checknil", n)
Fatal("bad checknil")
}
if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !(n.Addable != 0) || n.Op == OLITERAL {
if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || n.Addable == 0 || n.Op == OLITERAL {
Thearch.Regalloc(&reg, Types[Tptr], n)
Thearch.Cgen(n, &reg)
Thearch.Gins(obj.ACHECKNIL, &reg, nil)
@ -478,7 +478,7 @@ func compile(fn *Node) {
if fn.Wrapper != 0 {
ptxt.From3.Offset |= obj.WRAPPER
}
if fn.Needctxt != 0 {
if fn.Needctxt {
ptxt.From3.Offset |= obj.NEEDCTXT
}
if fn.Nosplit {
@ -557,7 +557,7 @@ func compile(fn *Node) {
Pc.Lineno = lineno
fixjmp(ptxt)
if !(Debug['N'] != 0) || Debug['R'] != 0 || Debug['P'] != 0 {
if Debug['N'] == 0 || Debug['R'] != 0 || Debug['P'] != 0 {
regopt(ptxt)
nilopt(ptxt)
}

View file

@ -183,18 +183,13 @@ func printblock(bb *BasicBlock) {
// are two criteria for termination. If the end of basic block is reached a
// value of zero is returned. If the callback returns a non-zero value, the
// iteration is stopped and the value of the callback is returned.
func blockany(bb *BasicBlock, callback func(*obj.Prog) int) int {
var p *obj.Prog
var result int
for p = bb.last; p != nil; p = p.Opt.(*obj.Prog) {
result = callback(p)
if result != 0 {
return result
func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
if f(p) {
return true
}
}
return 0
return false
}
// Collects and returns and array of Node*s for functions arguments and local
@ -303,7 +298,7 @@ func iscall(prog *obj.Prog, name *obj.LSym) bool {
var isselectcommcasecall_names [5]*obj.LSym
func isselectcommcasecall(prog *obj.Prog) int {
func isselectcommcasecall(prog *obj.Prog) bool {
var i int32
if isselectcommcasecall_names[0] == nil {
@ -315,41 +310,41 @@ func isselectcommcasecall(prog *obj.Prog) int {
for i = 0; isselectcommcasecall_names[i] != nil; i++ {
if iscall(prog, isselectcommcasecall_names[i]) {
return 1
return true
}
}
return 0
return false
}
// Returns true for call instructions that target runtime·newselect.
var isnewselect_sym *obj.LSym
func isnewselect(prog *obj.Prog) int {
func isnewselect(prog *obj.Prog) bool {
if isnewselect_sym == nil {
isnewselect_sym = Linksym(Pkglookup("newselect", Runtimepkg))
}
return bool2int(iscall(prog, isnewselect_sym))
return iscall(prog, isnewselect_sym)
}
// Returns true for call instructions that target runtime·selectgo.
var isselectgocall_sym *obj.LSym
func isselectgocall(prog *obj.Prog) int {
func isselectgocall(prog *obj.Prog) bool {
if isselectgocall_sym == nil {
isselectgocall_sym = Linksym(Pkglookup("selectgo", Runtimepkg))
}
return bool2int(iscall(prog, isselectgocall_sym))
return iscall(prog, isselectgocall_sym)
}
var isdeferreturn_sym *obj.LSym
func isdeferreturn(prog *obj.Prog) int {
func isdeferreturn(prog *obj.Prog) bool {
if isdeferreturn_sym == nil {
isdeferreturn_sym = Linksym(Pkglookup("deferreturn", Runtimepkg))
}
return bool2int(iscall(prog, isdeferreturn_sym))
return iscall(prog, isdeferreturn_sym)
}
// Walk backwards from a runtime·selectgo call up to its immediately dominating
@ -366,7 +361,7 @@ func addselectgosucc(selectgo *BasicBlock) {
Fatal("selectgo does not have a newselect")
}
pred = pred.pred[0]
if blockany(pred, isselectcommcasecall) != 0 {
if blockany(pred, isselectcommcasecall) {
// A select comm case block should have exactly one
// successor.
if len(pred.succ) != 1 {
@ -386,7 +381,7 @@ func addselectgosucc(selectgo *BasicBlock) {
addedge(selectgo, succ)
}
if blockany(pred, isnewselect) != 0 {
if blockany(pred, isnewselect) {
// Reached the matching newselect.
break
}
@ -451,7 +446,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
p.Link.Opt = newblock(p.Link)
cfg = append(cfg, p.Link.Opt.(*BasicBlock))
}
} else if isselectcommcasecall(p) != 0 || isselectgocall(p) != 0 {
} else if isselectcommcasecall(p) || isselectgocall(p) {
// Accommodate implicit selectgo control flow.
if p.Link.Opt == nil {
p.Link.Opt = newblock(p.Link)
@ -478,7 +473,7 @@ func newcfg(firstp *obj.Prog) []*BasicBlock {
}
// Collect basic blocks with selectgo calls.
if isselectgocall(p) != 0 {
if isselectgocall(p) {
selectgo = append(selectgo, bb)
}
}
@ -627,7 +622,7 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avari
// non-tail-call return instructions; see note above
// the for loop for details.
case PPARAMOUT:
if !(node.Addrtaken != 0) && prog.To.Type == obj.TYPE_NONE {
if node.Addrtaken == 0 && prog.To.Type == obj.TYPE_NONE {
bvset(uevar, i)
}
}
@ -674,7 +669,7 @@ func progeffects(prog *obj.Prog, vars []*Node, uevar *Bvec, varkill *Bvec, avari
bvset(uevar, pos)
}
if info.Flags&LeftWrite != 0 {
if from.Node != nil && !(Isfat(((from.Node).(*Node)).Type) != 0) {
if from.Node != nil && !Isfat(((from.Node).(*Node)).Type) {
bvset(varkill, pos)
}
}
@ -718,7 +713,7 @@ Next:
bvset(uevar, pos)
}
if info.Flags&RightWrite != 0 {
if to.Node != nil && (!(Isfat(((to.Node).(*Node)).Type) != 0) || prog.As == obj.AVARDEF) {
if to.Node != nil && (!Isfat(((to.Node).(*Node)).Type) || prog.As == obj.AVARDEF) {
bvset(varkill, pos)
}
}
@ -1050,7 +1045,7 @@ func twobitwalktype1(t *Type, xoffset *int64, bv *Bvec) {
if t.Bound < -1 {
Fatal("twobitwalktype1: invalid bound, %v", Tconv(t, 0))
}
if Isslice(t) != 0 {
if Isslice(t) {
// struct { byte *array; uintgo len; uintgo cap; }
if *xoffset&int64(Widthptr-1) != 0 {
Fatal("twobitwalktype1: invalid TARRAY alignment, %v", Tconv(t, 0))
@ -1101,7 +1096,7 @@ func twobitlivepointermap(lv *Liveness, liveout *Bvec, vars []*Node, args *Bvec,
for i = 0; ; i++ {
i = int32(bvnext(liveout, i))
if !(i >= 0) {
if i < 0 {
break
}
node = vars[i]
@ -1163,8 +1158,8 @@ func newpcdataprog(prog *obj.Prog, index int32) *obj.Prog {
// Returns true for instructions that are safe points that must be annotated
// with liveness information.
func issafepoint(prog *obj.Prog) int {
return bool2int(prog.As == obj.ATEXT || prog.As == obj.ACALL)
func issafepoint(prog *obj.Prog) bool {
return prog.As == obj.ATEXT || prog.As == obj.ACALL
}
// Initializes the sets for solving the live variables. Visits all the
@ -1332,7 +1327,7 @@ func livenesssolve(lv *Liveness) {
// This function is slow but it is only used for generating debug prints.
// Check whether n is marked live in args/locals.
func islive(n *Node, args *Bvec, locals *Bvec) int {
func islive(n *Node, args *Bvec, locals *Bvec) bool {
var i int
switch n.Class {
@ -1340,19 +1335,19 @@ func islive(n *Node, args *Bvec, locals *Bvec) int {
PPARAMOUT:
for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
if bvget(args, int32(n.Xoffset/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
return 1
return true
}
}
case PAUTO:
for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
if bvget(locals, int32((n.Xoffset+stkptrsize)/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
return 1
return true
}
}
}
return 0
return false
}
// Visits all instructions in a basic block and computes a bit vector of live
@ -1427,21 +1422,21 @@ func livenessepilogue(lv *Liveness) {
bvor(any, any, avarinit)
bvor(all, all, avarinit)
if issafepoint(p) != 0 {
if issafepoint(p) {
// Annotate ambiguously live variables so that they can
// be zeroed at function entry.
// livein and liveout are dead here and used as temporaries.
bvresetall(livein)
bvandnot(liveout, any, all)
if !(bvisempty(liveout) != 0) {
if !bvisempty(liveout) {
for pos = 0; pos < liveout.n; pos++ {
if !(bvget(liveout, pos) != 0) {
if bvget(liveout, pos) == 0 {
continue
}
bvset(all, pos) // silence future warnings in this block
n = lv.vars[pos]
if !(n.Needzero != 0) {
if n.Needzero == 0 {
n.Needzero = 1
if debuglive >= 1 {
Warnl(int(p.Lineno), "%v: %v is ambiguously live", Nconv(Curfn.Nname, 0), Nconv(n, obj.FmtLong))
@ -1517,7 +1512,7 @@ func livenessepilogue(lv *Liveness) {
bvcopy(liveout, livein)
bvandnot(livein, liveout, varkill)
bvor(livein, livein, uevar)
if debuglive >= 3 && issafepoint(p) != 0 {
if debuglive >= 3 && issafepoint(p) {
fmt.Printf("%v\n", p)
printvars("uevar", uevar, lv.vars)
printvars("varkill", varkill, lv.vars)
@ -1525,7 +1520,7 @@ func livenessepilogue(lv *Liveness) {
printvars("liveout", liveout, lv.vars)
}
if issafepoint(p) != 0 {
if issafepoint(p) {
// Found an interesting instruction, record the
// corresponding liveness information.
@ -1534,7 +1529,7 @@ func livenessepilogue(lv *Liveness) {
// input parameters.
if p.As == obj.ATEXT {
for j = 0; j < liveout.n; j++ {
if !(bvget(liveout, j) != 0) {
if bvget(liveout, j) == 0 {
continue
}
n = lv.vars[j]
@ -1574,7 +1569,7 @@ func livenessepilogue(lv *Liveness) {
numlive = 0
for j = 0; j < int32(len(lv.vars)); j++ {
n = lv.vars[j]
if islive(n, args, locals) != 0 {
if islive(n, args, locals) {
fmt_ += fmt.Sprintf(" %v", Nconv(n, 0))
numlive++
}
@ -1592,7 +1587,7 @@ func livenessepilogue(lv *Liveness) {
// Only CALL instructions need a PCDATA annotation.
// The TEXT instruction annotation is implicit.
if p.As == obj.ACALL {
if isdeferreturn(p) != 0 {
if isdeferreturn(p) {
// runtime.deferreturn modifies its return address to return
// back to the CALL, not to the subsequent instruction.
// Because the return comes back one instruction early,
@ -1760,11 +1755,11 @@ func printbitset(printed int, name string, vars []*Node, bits *Bvec) int {
started = 0
for i = 0; i < len(vars); i++ {
if !(bvget(bits, int32(i)) != 0) {
if bvget(bits, int32(i)) == 0 {
continue
}
if !(started != 0) {
if !(printed != 0) {
if started == 0 {
if printed == 0 {
fmt.Printf("\t")
} else {
fmt.Printf(" ")
@ -1856,14 +1851,14 @@ func livenessprintdebug(lv *Liveness) {
if printed != 0 {
fmt.Printf("\n")
}
if issafepoint(p) != 0 {
if issafepoint(p) {
args = lv.argslivepointers[pcdata]
locals = lv.livepointers[pcdata]
fmt.Printf("\tlive=")
printed = 0
for j = 0; j < len(lv.vars); j++ {
n = lv.vars[j]
if islive(n, args, locals) != 0 {
if islive(n, args, locals) {
tmp9 := printed
printed++
if tmp9 != 0 {

View file

@ -179,7 +179,7 @@ void proginfo(ProgInfo*, Prog*);
var noreturn_symlist [10]*Sym
func Noreturn(p *obj.Prog) int {
func Noreturn(p *obj.Prog) bool {
var s *Sym
var i int
@ -195,18 +195,18 @@ func Noreturn(p *obj.Prog) int {
}
if p.To.Node == nil {
return 0
return false
}
s = ((p.To.Node).(*Node)).Sym
if s == nil {
return 0
return false
}
for i = 0; noreturn_symlist[i] != nil; i++ {
if s == noreturn_symlist[i] {
return 1
return true
}
}
return 0
return false
}
// JMP chasing and removal.
@ -325,7 +325,7 @@ func fixjmp(firstp *obj.Prog) {
// pass 4: elide JMP to next instruction.
// only safe if there are no jumps to JMPs anymore.
if !(jmploop != 0) {
if jmploop == 0 {
last = nil
for p = firstp; p != nil; p = p.Link {
if p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch == p.Link {
@ -434,7 +434,7 @@ func Flowstart(firstp *obj.Prog, newData func() interface{}) *Graph {
for f = start; f != nil; f = f.Link {
p = f.Prog
Thearch.Proginfo(&info, p)
if !(info.Flags&Break != 0) {
if info.Flags&Break == 0 {
f1 = f.Link
f.S1 = f1
f1.P1 = f
@ -492,11 +492,11 @@ func postorder(r *Flow, rpo2r []*Flow, n int32) int32 {
r.Rpo = 1
r1 = r.S1
if r1 != nil && !(r1.Rpo != 0) {
if r1 != nil && r1.Rpo == 0 {
n = postorder(r1, rpo2r, n)
}
r1 = r.S2
if r1 != nil && !(r1.Rpo != 0) {
if r1 != nil && r1.Rpo == 0 {
n = postorder(r1, rpo2r, n)
}
rpo2r[n] = r
@ -529,26 +529,26 @@ func rpolca(idom []int32, rpo1 int32, rpo2 int32) int32 {
return rpo1
}
func doms(idom []int32, r int32, s int32) int {
func doms(idom []int32, r int32, s int32) bool {
for s > r {
s = idom[s]
}
return bool2int(s == r)
return s == r
}
func loophead(idom []int32, r *Flow) int {
func loophead(idom []int32, r *Flow) bool {
var src int32
src = r.Rpo
if r.P1 != nil && doms(idom, src, r.P1.Rpo) != 0 {
return 1
if r.P1 != nil && doms(idom, src, r.P1.Rpo) {
return true
}
for r = r.P2; r != nil; r = r.P2link {
if doms(idom, src, r.Rpo) != 0 {
return 1
if doms(idom, src, r.Rpo) {
return true
}
}
return 0
return false
}
func loopmark(rpo2r **Flow, head int32, r *Flow) {
@ -620,7 +620,7 @@ func flowrpo(g *Graph) {
for i = 0; i < nr; i++ {
r1 = rpo2r[i]
r1.Loop++
if r1.P2 != nil && loophead(idom, r1) != 0 {
if r1.P2 != nil && loophead(idom, r1) {
loopmark(&rpo2r[0], i, r1)
}
}
@ -718,8 +718,8 @@ func (x startcmp) Less(i, j int) bool {
}
// Is n available for merging?
func canmerge(n *Node) int {
return bool2int(n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp"))
func canmerge(n *Node) bool {
return n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp")
}
func mergetemp(firstp *obj.Prog) {
@ -757,7 +757,7 @@ func mergetemp(firstp *obj.Prog) {
// Build list of all mergeable variables.
nvar = 0
for l = Curfn.Dcl; l != nil; l = l.Next {
if canmerge(l.N) != 0 {
if canmerge(l.N) {
nvar++
}
}
@ -766,7 +766,7 @@ func mergetemp(firstp *obj.Prog) {
nvar = 0
for l = Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if canmerge(n) != 0 {
if canmerge(n) {
v = &var_[nvar]
nvar++
n.Opt = v
@ -826,9 +826,9 @@ func mergetemp(firstp *obj.Prog) {
if f != nil && f.Data.(*Flow) == nil {
p = f.Prog
Thearch.Proginfo(&info, p)
if p.To.Node == v.node && (info.Flags&RightWrite != 0) && !(info.Flags&RightRead != 0) {
if p.To.Node == v.node && (info.Flags&RightWrite != 0) && info.Flags&RightRead == 0 {
p.As = obj.ANOP
p.To = obj.Zprog.To
p.To = obj.Addr{}
v.removed = 1
if debugmerge > 0 && Debug['v'] != 0 {
fmt.Printf("drop write-only %v\n", Sconv(v.node.Sym, 0))
@ -851,7 +851,7 @@ func mergetemp(firstp *obj.Prog) {
const (
SizeAny = SizeB | SizeW | SizeL | SizeQ | SizeF | SizeD
)
if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && !((info.Flags|info1.Flags)&(LeftAddr|RightAddr) != 0) && info.Flags&SizeAny == info1.Flags&SizeAny {
if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && (info.Flags|info1.Flags)&(LeftAddr|RightAddr) == 0 && info.Flags&SizeAny == info1.Flags&SizeAny {
p1.From = p.From
Thearch.Excise(f)
v.removed = 1
@ -1010,7 +1010,7 @@ func mergetemp(firstp *obj.Prog) {
// Delete merged nodes from declaration list.
for lp = &Curfn.Dcl; ; {
l = *lp
if !(l != nil) {
if l == nil {
break
}
@ -1126,11 +1126,11 @@ func nilopt(firstp *obj.Prog) {
nkill = 0
for f = g.Start; f != nil; f = f.Link {
p = f.Prog
if p.As != obj.ACHECKNIL || !(Thearch.Regtyp(&p.From) != 0) {
if p.As != obj.ACHECKNIL || !Thearch.Regtyp(&p.From) {
continue
}
ncheck++
if Thearch.Stackaddr(&p.From) != 0 {
if Thearch.Stackaddr(&p.From) {
if Debug_checknil != 0 && p.Lineno > 1 {
Warnl(int(p.Lineno), "removed nil check of SP address")
}
@ -1177,13 +1177,13 @@ func nilwalkback(fcheck *Flow) {
for f = fcheck; f != nil; f = Uniqp(f) {
p = f.Prog
Thearch.Proginfo(&info, p)
if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) != 0 {
if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
// Found initialization of value we're checking for nil.
// without first finding the check, so this one is unchecked.
return
}
if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) != 0 {
if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) {
fcheck.Data = &killed
return
}
@ -1249,12 +1249,12 @@ func nilwalkfwd(fcheck *Flow) {
p = f.Prog
Thearch.Proginfo(&info, p)
if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) != 0 {
if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) {
fcheck.Data = &killed
return
}
if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) != 0 {
if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) {
fcheck.Data = &killed
return
}
@ -1265,12 +1265,12 @@ func nilwalkfwd(fcheck *Flow) {
}
// Stop if value is lost.
if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) != 0 {
if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
return
}
// Stop if memory write.
if (info.Flags&RightWrite != 0) && !(Thearch.Regtyp(&p.To) != 0) {
if (info.Flags&RightWrite != 0) && !Thearch.Regtyp(&p.To) {
return
}

View file

@ -30,27 +30,27 @@ var omit_pkgs = []string{"runtime", "runtime/race"}
// Memory accesses in the packages are either uninteresting or will cause false positives.
var noinst_pkgs = []string{"sync", "sync/atomic"}
func ispkgin(pkgs []string) int {
func ispkgin(pkgs []string) bool {
var i int
if myimportpath != "" {
for i = 0; i < len(pkgs); i++ {
if myimportpath == pkgs[i] {
return 1
return true
}
}
}
return 0
return false
}
func isforkfunc(fn *Node) int {
func isforkfunc(fn *Node) bool {
// Special case for syscall.forkAndExecInChild.
// In the child, this function must not acquire any locks, because
// they might have been locked at the time of the fork. This means
// no rescheduling, no malloc calls, and no new stack segments.
// Race instrumentation does all of the above.
return bool2int(myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild")
return myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild"
}
func racewalk(fn *Node) {
@ -58,11 +58,11 @@ func racewalk(fn *Node) {
var nodpc *Node
var s string
if ispkgin(omit_pkgs) != 0 || isforkfunc(fn) != 0 {
if ispkgin(omit_pkgs) || isforkfunc(fn) {
return
}
if !(ispkgin(noinst_pkgs) != 0) {
if !ispkgin(noinst_pkgs) {
racewalklist(fn.Nbody, nil)
// nothing interesting for race detector in fn->enter
@ -147,7 +147,6 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
switch n.Op {
default:
Fatal("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
fallthrough
case OAS,
OAS2FUNC:
@ -263,7 +262,7 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
OLEN,
OCAP:
racewalknode(&n.Left, init, 0, 0)
if Istype(n.Left.Type, TMAP) != 0 {
if Istype(n.Left.Type, TMAP) {
n1 = Nod(OCONVNOP, n.Left, nil)
n1.Type = Ptrto(Types[TUINT8])
n1 = Nod(OIND, n1, nil)
@ -326,9 +325,9 @@ func racewalknode(np **Node, init **NodeList, wr int, skip int) {
goto ret
case OINDEX:
if !(Isfixedarray(n.Left.Type) != 0) {
if !Isfixedarray(n.Left.Type) {
racewalknode(&n.Left, init, 0, 0)
} else if !(islvalue(n.Left) != 0) {
} else if !islvalue(n.Left) {
// index of unaddressable array, like Map[k][i].
racewalknode(&n.Left, init, wr, 0)
@ -468,34 +467,34 @@ ret:
*np = n
}
func isartificial(n *Node) int {
func isartificial(n *Node) bool {
// compiler-emitted artificial things that we do not want to instrument,
// cant' possibly participate in a data race.
if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
if n.Sym.Name == "_" {
return 1
return true
}
// autotmp's are always local
if strings.HasPrefix(n.Sym.Name, "autotmp_") {
return 1
return true
}
// statictmp's are read-only
if strings.HasPrefix(n.Sym.Name, "statictmp_") {
return 1
return true
}
// go.itab is accessed only by the compiler and runtime (assume safe)
if n.Sym.Pkg != nil && n.Sym.Pkg.Name != "" && n.Sym.Pkg.Name == "go.itab" {
return 1
return true
}
}
return 0
return false
}
func callinstr(np **Node, init **NodeList, wr int, skip int) int {
func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
var name string
var f *Node
var b *Node
@ -510,18 +509,18 @@ func callinstr(np **Node, init **NodeList, wr int, skip int) int {
// n, n->op, n->type ? n->type->etype : -1, n->class);
if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
return 0
return false
}
t = n.Type
if isartificial(n) != 0 {
return 0
if isartificial(n) {
return false
}
b = outervalue(n)
// it skips e.g. stores to ... parameter array
if isartificial(b) != 0 {
return 0
if isartificial(b) {
return false
}
class = int(b.Class)
@ -539,7 +538,7 @@ func callinstr(np **Node, init **NodeList, wr int, skip int) int {
n = treecopy(n)
makeaddable(n)
if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
if t.Etype == TSTRUCT || Isfixedarray(t) {
name = "racereadrange"
if wr != 0 {
name = "racewriterange"
@ -554,10 +553,10 @@ func callinstr(np **Node, init **NodeList, wr int, skip int) int {
}
*init = list(*init, f)
return 1
return true
}
return 0
return false
}
// makeaddable returns a node whose memory location is the
@ -572,7 +571,7 @@ func makeaddable(n *Node) {
// an addressable value.
switch n.Op {
case OINDEX:
if Isfixedarray(n.Left.Type) != 0 {
if Isfixedarray(n.Left.Type) {
makeaddable(n.Left)
}
@ -596,7 +595,7 @@ func uintptraddr(n *Node) *Node {
var r *Node
r = Nod(OADDR, n, nil)
r.Bounded = 1
r.Bounded = true
r = conv(r, Types[TUNSAFEPTR])
r = conv(r, Types[TUINTPTR])
return r

View file

@ -43,7 +43,7 @@ func typecheckrange(n *Node) {
}
}
if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
t = t.Type
}
n.Type = t
@ -63,7 +63,7 @@ func typecheckrange(n *Node) {
t2 = t.Type
case TCHAN:
if !(t.Chan&Crecv != 0) {
if t.Chan&Crecv == 0 {
Yyerror("invalid operation: range %v (receive from send-only type %v)", Nconv(n.Right, 0), Tconv(n.Right.Type, 0))
goto out
}
@ -184,7 +184,6 @@ func walkrange(n *Node) {
switch t.Etype {
default:
Fatal("walkrange")
fallthrough
// Lower n into runtime·memclr if possible, for
// fast zeroing of slices and arrays (issue 5373).
@ -196,8 +195,8 @@ func walkrange(n *Node) {
//
// in which the evaluation of a is side-effect-free.
case TARRAY:
if !(Debug['N'] != 0) {
if !(flag_race != 0) {
if Debug['N'] == 0 {
if flag_race == 0 {
if v1 != nil {
if v2 == nil {
if n.Nbody != nil {
@ -206,10 +205,10 @@ func walkrange(n *Node) {
tmp = n.Nbody.N // first statement of body
if tmp.Op == OAS {
if tmp.Left.Op == OINDEX {
if samesafeexpr(tmp.Left.Left, a) != 0 {
if samesafeexpr(tmp.Left.Right, v1) != 0 {
if samesafeexpr(tmp.Left.Left, a) {
if samesafeexpr(tmp.Left.Right, v1) {
if t.Type.Width > 0 {
if iszero(tmp.Right) != 0 {
if iszero(tmp.Right) {
// Convert to
// if len(a) != 0 {
// hp = &a[0]
@ -227,7 +226,7 @@ func walkrange(n *Node) {
hp = temp(Ptrto(Types[TUINT8]))
tmp = Nod(OINDEX, a, Nodintconst(0))
tmp.Bounded = 1
tmp.Bounded = true
tmp = Nod(OADDR, tmp, nil)
tmp = Nod(OCONVNOP, tmp, nil)
tmp.Type = Ptrto(Types[TUINT8])
@ -282,7 +281,7 @@ func walkrange(n *Node) {
if v2 != nil {
hp = temp(Ptrto(n.Type.Type))
tmp = Nod(OINDEX, ha, Nodintconst(0))
tmp.Bounded = 1
tmp.Bounded = true
init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
}
@ -369,7 +368,7 @@ func walkrange(n *Node) {
}
hb = temp(Types[TBOOL])
n.Ntest = Nod(ONE, hb, Nodbool(0))
n.Ntest = Nod(ONE, hb, Nodbool(false))
a = Nod(OAS2RECV, nil, nil)
a.Typecheck = 1
a.List = list(list1(hv1), hb)

View file

@ -355,7 +355,7 @@ func methods(t *Type) *Sig {
// type stored in interface word
it = t
if !(isdirectiface(it) != 0) {
if !isdirectiface(it) {
it = Ptrto(t)
}
@ -370,10 +370,10 @@ func methods(t *Type) *Sig {
if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 {
Fatal("non-method on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
}
if !(getthisx(f.Type).Type != nil) {
if getthisx(f.Type).Type == nil {
Fatal("receiver with no type on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
}
if f.Nointerface != 0 {
if f.Nointerface {
continue
}
@ -391,7 +391,7 @@ func methods(t *Type) *Sig {
if Isptr[this.Etype] != 0 && this.Type == t {
continue
}
if Isptr[this.Etype] != 0 && !(Isptr[t.Etype] != 0) && f.Embedded != 2 && !(isifacemethod(f.Type) != 0) {
if Isptr[this.Etype] != 0 && Isptr[t.Etype] == 0 && f.Embedded != 2 && !isifacemethod(f.Type) {
continue
}
@ -412,7 +412,7 @@ func methods(t *Type) *Sig {
a.type_ = methodfunc(f.Type, t)
a.mtype = methodfunc(f.Type, nil)
if !(a.isym.Flags&SymSiggen != 0) {
if a.isym.Flags&SymSiggen == 0 {
a.isym.Flags |= SymSiggen
if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
compiling_wrappers = 1
@ -421,7 +421,7 @@ func methods(t *Type) *Sig {
}
}
if !(a.tsym.Flags&SymSiggen != 0) {
if a.tsym.Flags&SymSiggen == 0 {
a.tsym.Flags |= SymSiggen
if !Eqtype(this, t) {
compiling_wrappers = 1
@ -489,7 +489,7 @@ func imethods(t *Type) *Sig {
// code can refer to it.
isym = methodsym(method, t, 0)
if !(isym.Flags&SymSiggen != 0) {
if isym.Flags&SymSiggen == 0 {
isym.Flags |= SymSiggen
genwrapper(t, f, isym, 0)
}
@ -649,7 +649,7 @@ var kinds = []int{
func haspointers(t *Type) bool {
var t1 *Type
var ret int
var ret bool
if t.Haspointers != 0 {
return t.Haspointers-1 != 0
@ -672,26 +672,26 @@ func haspointers(t *Type) bool {
TCOMPLEX64,
TCOMPLEX128,
TBOOL:
ret = 0
ret = false
case TARRAY:
if t.Bound < 0 { // slice
ret = 1
ret = true
break
}
if t.Bound == 0 { // empty array
ret = 0
ret = false
break
}
ret = bool2int(haspointers(t.Type))
ret = haspointers(t.Type)
case TSTRUCT:
ret = 0
ret = false
for t1 = t.Type; t1 != nil; t1 = t1.Down {
if haspointers(t1.Type) {
ret = 1
ret = true
break
}
}
@ -706,11 +706,11 @@ func haspointers(t *Type) bool {
TFUNC:
fallthrough
default:
ret = 1
ret = true
}
t.Haspointers = uint8(1 + ret)
return ret != 0
t.Haspointers = 1 + uint8(bool2int(ret))
return ret
}
/*
@ -724,7 +724,7 @@ func dcommontype(s *Sym, ot int, t *Type) int {
var i int
var alg int
var sizeofAlg int
var gcprog int
var gcprog bool
var sptr *Sym
var algsym *Sym
var zero *Sym
@ -751,7 +751,7 @@ func dcommontype(s *Sym, ot int, t *Type) int {
algsym = dalgsym(t)
}
if t.Sym != nil && !(Isptr[t.Etype] != 0) {
if t.Sym != nil && Isptr[t.Etype] == 0 {
sptr = dtypesym(Ptrto(t))
} else {
sptr = weaktypesym(Ptrto(t))
@ -811,10 +811,10 @@ func dcommontype(s *Sym, ot int, t *Type) int {
if !haspointers(t) {
i |= obj.KindNoPointers
}
if isdirectiface(t) != 0 {
if isdirectiface(t) {
i |= obj.KindDirectIface
}
if gcprog != 0 {
if gcprog {
i |= obj.KindGCProg
}
ot = duint8(s, ot, uint8(i)) // kind
@ -825,7 +825,7 @@ func dcommontype(s *Sym, ot int, t *Type) int {
}
// gc
if gcprog != 0 {
if gcprog {
gengcprog(t, &gcprog0, &gcprog1)
if gcprog0 != nil {
ot = dsymptr(s, ot, gcprog0, 0)
@ -937,7 +937,7 @@ func typenamesym(t *Type) *Sym {
var s *Sym
var n *Node
if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) != 0 {
if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) {
Fatal("typename %v", Tconv(t, 0))
}
s = typesym(t)
@ -987,7 +987,7 @@ func weaktypesym(t *Type) *Sym {
* Returns 1 if t has a reflexive equality operator.
* That is, if x==x for all x of type t.
*/
func isreflexive(t *Type) int {
func isreflexive(t *Type) bool {
var t1 *Type
switch t.Etype {
case TBOOL,
@ -1007,33 +1007,33 @@ func isreflexive(t *Type) int {
TUNSAFEPTR,
TSTRING,
TCHAN:
return 1
return true
case TFLOAT32,
TFLOAT64,
TCOMPLEX64,
TCOMPLEX128,
TINTER:
return 0
return false
case TARRAY:
if Isslice(t) != 0 {
if Isslice(t) {
Fatal("slice can't be a map key: %v", Tconv(t, 0))
}
return isreflexive(t.Type)
case TSTRUCT:
for t1 = t.Type; t1 != nil; t1 = t1.Down {
if !(isreflexive(t1.Type) != 0) {
return 0
if !isreflexive(t1.Type) {
return false
}
}
return 1
return true
default:
Fatal("bad type for map key: %v", Tconv(t, 0))
return 0
return false
}
}
@ -1062,7 +1062,7 @@ func dtypesym(t *Type) *Sym {
t = Types[t.Etype]
}
if isideal(t) != 0 {
if isideal(t) {
Fatal("dtypesym %v", Tconv(t, 0))
}
@ -1090,7 +1090,7 @@ func dtypesym(t *Type) *Sym {
}
// named types from other files are defined only by those files
if tbase.Sym != nil && !(tbase.Local != 0) {
if tbase.Sym != nil && tbase.Local == 0 {
return s
}
if isforw[tbase.Etype] != 0 {
@ -1230,7 +1230,7 @@ ok:
}
ot = duint16(s, ot, uint16(mapbucket(t).Width))
ot = duint8(s, ot, uint8(isreflexive(t.Down)))
ot = duint8(s, ot, uint8(bool2int(isreflexive(t.Down))))
case TPTR32,
TPTR64:
@ -1265,7 +1265,7 @@ ok:
ot = duintxx(s, ot, uint64(n), Widthint)
for t1 = t.Type; t1 != nil; t1 = t1.Down {
// ../../runtime/type.go:/structField
if t1.Sym != nil && !(t1.Embedded != 0) {
if t1.Sym != nil && t1.Embedded == 0 {
ot = dgostringptr(s, ot, t1.Sym.Name)
if exportname(t1.Sym.Name) {
ot = dgostringptr(s, ot, "")
@ -1447,12 +1447,12 @@ func dalgsym(t *Type) *Sym {
return s
}
func usegcprog(t *Type) int {
func usegcprog(t *Type) bool {
var size int64
var nptr int64
if !haspointers(t) {
return 0
return false
}
if t.Width == BADWIDTH {
dowidth(t)
@ -1473,7 +1473,7 @@ func usegcprog(t *Type) int {
// While large objects usually contain arrays; and even if it don't
// the program uses 2-bits per word while mask uses 4-bits per word,
// so the program is still smaller.
return bool2int(size > int64(2*Widthptr))
return size > int64(2*Widthptr)
}
// Generates sparse GC bitmask (4 bits per word).
@ -1483,7 +1483,7 @@ func gengcmask(t *Type, gcmask []byte) {
var nptr int64
var i int64
var j int64
var half int
var half bool
var bits uint8
var pos []byte
@ -1505,7 +1505,7 @@ func gengcmask(t *Type, gcmask []byte) {
pos = gcmask
nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
half = 0
half = false
// If number of words is odd, repeat the mask.
// This makes simpler handling of arrays in runtime.
@ -1520,12 +1520,12 @@ func gengcmask(t *Type, gcmask []byte) {
bits = obj.BitsScalar
}
bits <<= 2
if half != 0 {
if half {
bits <<= 4
}
pos[0] |= byte(bits)
half = bool2int(!(half != 0))
if !(half != 0) {
half = !half
if !half {
pos = pos[1:]
}
}
@ -1699,7 +1699,7 @@ func gengcprog1(g *ProgGen, t *Type, xoffset *int64) {
*xoffset += t.Width
case TARRAY:
if Isslice(t) != 0 {
if Isslice(t) {
proggendata(g, obj.BitsPointer)
proggendata(g, obj.BitsScalar)
proggendata(g, obj.BitsScalar)

View file

@ -74,7 +74,7 @@ func setaddrs(bit Bits) {
var v *Var
var node *Node
for bany(&bit) != 0 {
for bany(&bit) {
// convert each bit to a variable
i = bnum(bit)
@ -169,9 +169,9 @@ func addmove(r *Flow, bn int, rn int, f int) {
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(rn)
p1.From.Name = obj.NAME_NONE
if !(f != 0) {
if f == 0 {
p1.From = *a
*a = obj.Zprog.From
*a = obj.Addr{}
a.Type = obj.TYPE_REG
a.Reg = int16(rn)
}
@ -182,18 +182,18 @@ func addmove(r *Flow, bn int, rn int, f int) {
Ostats.Nspill++
}
func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) int {
func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
var t1 int64
var t2 int64
t1 = o1 + int64(w1)
t2 = o2 + int64(w2)
if !(t1 > o2 && t2 > o1) {
return 0
if t1 <= o2 || t2 <= o1 {
return false
}
return 1
return true
}
func mkvar(f *Flow, a *obj.Addr) Bits {
@ -292,7 +292,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
if int(v.etype) == et {
if int64(v.width) == w {
// TODO(rsc): Remove special case for arm here.
if !(flag != 0) || Thearch.Thechar != '5' {
if flag == 0 || Thearch.Thechar != '5' {
return blsh(uint(i))
}
}
@ -300,7 +300,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
}
// if they overlap, disable both
if overlap_reg(v.offset, v.width, o, int(w)) != 0 {
if overlap_reg(v.offset, v.width, o, int(w)) {
// print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
v.addr = 1
@ -446,7 +446,7 @@ func prop(f *Flow, ref Bits, cal Bits) {
switch f1.Prog.As {
case obj.ACALL:
if Noreturn(f1.Prog) != 0 {
if Noreturn(f1.Prog) {
break
}
@ -499,7 +499,7 @@ func prop(f *Flow, ref Bits, cal Bits) {
// This will set the bits at most twice, keeping the overall loop linear.
v1, _ = v.node.Opt.(*Var)
if v == v1 || !(btest(&cal, uint(v1.id)) != 0) {
if v == v1 || !btest(&cal, uint(v1.id)) {
for ; v1 != nil; v1 = v1.nextinnode {
biset(&cal, uint(v1.id))
}
@ -633,7 +633,7 @@ func paint1(f *Flow, bn int) {
return
}
for {
if !(r.refbehind.b[z]&bb != 0) {
if r.refbehind.b[z]&bb == 0 {
break
}
f1 = f.P1
@ -641,7 +641,7 @@ func paint1(f *Flow, bn int) {
break
}
r1 = f1.Data.(*Reg)
if !(r1.refahead.b[z]&bb != 0) {
if r1.refahead.b[z]&bb == 0 {
break
}
if r1.act.b[z]&bb != 0 {
@ -679,7 +679,7 @@ func paint1(f *Flow, bn int) {
}
}
if !(r.refahead.b[z]&bb != 0) {
if r.refahead.b[z]&bb == 0 {
break
}
f1 = f.S2
@ -696,7 +696,7 @@ func paint1(f *Flow, bn int) {
if r.act.b[z]&bb != 0 {
break
}
if !(r.refbehind.b[z]&bb != 0) {
if r.refbehind.b[z]&bb == 0 {
break
}
}
@ -714,11 +714,11 @@ func paint2(f *Flow, bn int, depth int) uint64 {
bb = 1 << uint(bn%64)
vreg = regbits
r = f.Data.(*Reg)
if !(r.act.b[z]&bb != 0) {
if r.act.b[z]&bb == 0 {
return vreg
}
for {
if !(r.refbehind.b[z]&bb != 0) {
if r.refbehind.b[z]&bb == 0 {
break
}
f1 = f.P1
@ -726,10 +726,10 @@ func paint2(f *Flow, bn int, depth int) uint64 {
break
}
r1 = f1.Data.(*Reg)
if !(r1.refahead.b[z]&bb != 0) {
if r1.refahead.b[z]&bb == 0 {
break
}
if !(r1.act.b[z]&bb != 0) {
if r1.act.b[z]&bb == 0 {
break
}
f = f1
@ -753,7 +753,7 @@ func paint2(f *Flow, bn int, depth int) uint64 {
}
}
if !(r.refahead.b[z]&bb != 0) {
if r.refahead.b[z]&bb == 0 {
break
}
f1 = f.S2
@ -767,10 +767,10 @@ func paint2(f *Flow, bn int, depth int) uint64 {
break
}
r = f.Data.(*Reg)
if !(r.act.b[z]&bb != 0) {
if r.act.b[z]&bb == 0 {
break
}
if !(r.refbehind.b[z]&bb != 0) {
if r.refbehind.b[z]&bb == 0 {
break
}
}
@ -793,7 +793,7 @@ func paint3(f *Flow, bn int, rb uint64, rn int) {
return
}
for {
if !(r.refbehind.b[z]&bb != 0) {
if r.refbehind.b[z]&bb == 0 {
break
}
f1 = f.P1
@ -801,7 +801,7 @@ func paint3(f *Flow, bn int, rb uint64, rn int) {
break
}
r1 = f1.Data.(*Reg)
if !(r1.refahead.b[z]&bb != 0) {
if r1.refahead.b[z]&bb == 0 {
break
}
if r1.act.b[z]&bb != 0 {
@ -851,7 +851,7 @@ func paint3(f *Flow, bn int, rb uint64, rn int) {
}
}
if !(r.refahead.b[z]&bb != 0) {
if r.refahead.b[z]&bb == 0 {
break
}
f1 = f.S2
@ -868,7 +868,7 @@ func paint3(f *Flow, bn int, rb uint64, rn int) {
if r.act.b[z]&bb != 0 {
break
}
if !(r.refbehind.b[z]&bb != 0) {
if r.refbehind.b[z]&bb == 0 {
break
}
}
@ -896,33 +896,33 @@ func dumpone(f *Flow, isreg int) {
for z = 0; z < BITS; z++ {
bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
}
if bany(&bit) != 0 {
if bany(&bit) {
fmt.Printf("\t")
if bany(&r.set) != 0 {
if bany(&r.set) {
fmt.Printf(" s:%v", Qconv(r.set, 0))
}
if bany(&r.use1) != 0 {
if bany(&r.use1) {
fmt.Printf(" u1:%v", Qconv(r.use1, 0))
}
if bany(&r.use2) != 0 {
if bany(&r.use2) {
fmt.Printf(" u2:%v", Qconv(r.use2, 0))
}
if bany(&r.refbehind) != 0 {
if bany(&r.refbehind) {
fmt.Printf(" rb:%v ", Qconv(r.refbehind, 0))
}
if bany(&r.refahead) != 0 {
if bany(&r.refahead) {
fmt.Printf(" ra:%v ", Qconv(r.refahead, 0))
}
if bany(&r.calbehind) != 0 {
if bany(&r.calbehind) {
fmt.Printf(" cb:%v ", Qconv(r.calbehind, 0))
}
if bany(&r.calahead) != 0 {
if bany(&r.calahead) {
fmt.Printf(" ca:%v ", Qconv(r.calahead, 0))
}
if bany(&r.regdiff) != 0 {
if bany(&r.regdiff) {
fmt.Printf(" d:%v ", Qconv(r.regdiff, 0))
}
if bany(&r.act) != 0 {
if bany(&r.act) {
fmt.Printf(" a:%v ", Qconv(r.act, 0))
}
}
@ -1052,7 +1052,7 @@ func regopt(firstp *obj.Prog) {
r.set.b[0] |= info.Regset
bit = mkvar(f, &p.From)
if bany(&bit) != 0 {
if bany(&bit) {
if info.Flags&LeftAddr != 0 {
setaddrs(bit)
}
@ -1080,7 +1080,7 @@ func regopt(firstp *obj.Prog) {
}
bit = mkvar(f, &p.To)
if bany(&bit) != 0 {
if bany(&bit) {
if info.Flags&RightAddr != 0 {
setaddrs(bit)
}
@ -1143,7 +1143,7 @@ func regopt(firstp *obj.Prog) {
for f = firstf; f != nil; f = f.Link {
p = f.Prog
if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) != 0 && ((p.To.Node).(*Node)).Opt != nil {
if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt != nil {
active++
walkvardef(p.To.Node.(*Node), f, active)
}
@ -1172,7 +1172,7 @@ loop11:
for f = firstf; f != nil; f = f1 {
f1 = f.Link
if f1 != nil && f1.Active != 0 && !(f.Active != 0) {
if f1 != nil && f1.Active != 0 && f.Active == 0 {
prop(f, zbits, zbits)
i = 1
}
@ -1244,7 +1244,7 @@ loop2:
for z = 0; z < BITS; z++ {
bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
}
if bany(&bit) != 0 && !(f.Refset != 0) {
if bany(&bit) && f.Refset == 0 {
// should never happen - all variables are preset
if Debug['w'] != 0 {
fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), Qconv(bit, 0))
@ -1262,7 +1262,7 @@ loop2:
for z = 0; z < BITS; z++ {
bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
}
if bany(&bit) != 0 && !(f.Refset != 0) {
if bany(&bit) && f.Refset == 0 {
if Debug['w'] != 0 {
fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), Qconv(bit, 0))
}
@ -1273,7 +1273,7 @@ loop2:
for z = 0; z < BITS; z++ {
bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
}
for bany(&bit) != 0 {
for bany(&bit) {
i = bnum(bit)
change = 0
paint1(f, i)
@ -1354,7 +1354,7 @@ brk:
* pass 7
* peep-hole on basic block
*/
if !(Debug['R'] != 0) || Debug['P'] != 0 {
if Debug['R'] == 0 || Debug['P'] != 0 {
Thearch.Peep(firstp)
}

View file

@ -134,7 +134,6 @@ func walkselect(sel *Node) {
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
fallthrough
// ok already
case OSEND:
@ -232,7 +231,6 @@ func walkselect(sel *Node) {
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
fallthrough
// if selectnbsend(c, v) { body } else { default body }
case OSEND:
@ -299,7 +297,6 @@ func walkselect(sel *Node) {
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
fallthrough
// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
case OSEND:

View file

@ -155,7 +155,7 @@ func init1(n *Node, out **NodeList) {
if n.Defn.Left != n {
goto bad
}
if isblank(n.Defn.Left) && candiscard(n.Defn.Right) != 0 {
if isblank(n.Defn.Left) && candiscard(n.Defn.Right) {
n.Defn.Op = OEMPTY
n.Defn.Left = nil
n.Defn.Right = nil
@ -166,7 +166,7 @@ func init1(n *Node, out **NodeList) {
if Debug['j'] != 0 {
fmt.Printf("%v\n", Sconv(n.Sym, 0))
}
if isblank(n) || !(staticinit(n, out) != 0) {
if isblank(n) || !staticinit(n, out) {
if Debug['%'] != 0 {
Dump("nonstatic", n.Defn)
}
@ -275,7 +275,7 @@ func initfix(l *NodeList) *NodeList {
* compilation of top-level (static) assignments
* into DATA statements if at all possible.
*/
func staticinit(n *Node, out **NodeList) int {
func staticinit(n *Node, out **NodeList) bool {
var l *Node
var r *Node
@ -291,7 +291,7 @@ func staticinit(n *Node, out **NodeList) int {
// like staticassign but we are copying an already
// initialized value r.
func staticcopy(l *Node, r *Node, out **NodeList) int {
func staticcopy(l *Node, r *Node, out **NodeList) bool {
var i int
var e *InitEntry
var p *InitPlan
@ -302,37 +302,37 @@ func staticcopy(l *Node, r *Node, out **NodeList) int {
var n1 Node
if r.Op != ONAME || r.Class != PEXTERN || r.Sym.Pkg != localpkg {
return 0
return false
}
if r.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
return 0
return false
}
if r.Defn.Op != OAS {
return 0
return false
}
orig = r
r = r.Defn.Right
switch r.Op {
case ONAME:
if staticcopy(l, r, out) != 0 {
return 1
if staticcopy(l, r, out) {
return true
}
*out = list(*out, Nod(OAS, l, r))
return 1
return true
case OLITERAL:
if iszero(r) != 0 {
return 1
if iszero(r) {
return true
}
gdata(l, r, int(l.Type.Width))
return 1
return true
case OADDR:
switch r.Left.Op {
case ONAME:
gdata(l, r, int(l.Type.Width))
return 1
return true
}
case OPTRLIT:
@ -347,11 +347,11 @@ func staticcopy(l *Node, r *Node, out **NodeList) int {
OMAPLIT:
gdata(l, Nod(OADDR, r.Nname, nil), int(l.Type.Width))
return 1
return true
}
case OARRAYLIT:
if Isslice(r.Type) != 0 {
if Isslice(r.Type) {
// copy slice
a = r.Nname
@ -362,7 +362,7 @@ func staticcopy(l *Node, r *Node, out **NodeList) int {
gdata(&n1, r.Right, Widthint)
n1.Xoffset = l.Xoffset + int64(Array_cap)
gdata(&n1, r.Right, Widthint)
return 1
return true
}
fallthrough
@ -381,7 +381,7 @@ func staticcopy(l *Node, r *Node, out **NodeList) int {
ll = Nod(OXXX, nil, nil)
*ll = n1
ll.Orig = ll // completely separate copy
if !(staticassign(ll, e.Expr, out) != 0) {
if !staticassign(ll, e.Expr, out) {
// Requires computation, but we're
// copying someone else's computation.
rr = Nod(OXXX, nil, nil)
@ -395,13 +395,13 @@ func staticcopy(l *Node, r *Node, out **NodeList) int {
}
}
return 1
return true
}
return 0
return false
}
func staticassign(l *Node, r *Node, out **NodeList) int {
func staticassign(l *Node, r *Node, out **NodeList) bool {
var a *Node
var n1 Node
var nam Node
@ -422,18 +422,18 @@ func staticassign(l *Node, r *Node, out **NodeList) int {
}
case OLITERAL:
if iszero(r) != 0 {
return 1
if iszero(r) {
return true
}
gdata(l, r, int(l.Type.Width))
return 1
return true
case OADDR:
if stataddr(&nam, r.Left) != 0 {
if stataddr(&nam, r.Left) {
n1 = *r
n1.Left = &nam
gdata(l, &n1, int(l.Type.Width))
return 1
return true
}
fallthrough
@ -453,22 +453,22 @@ func staticassign(l *Node, r *Node, out **NodeList) int {
gdata(l, Nod(OADDR, a, nil), int(l.Type.Width))
// Init underlying literal.
if !(staticassign(a, r.Left, out) != 0) {
if !staticassign(a, r.Left, out) {
*out = list(*out, Nod(OAS, a, r.Left))
}
return 1
return true
}
case OSTRARRAYBYTE:
if l.Class == PEXTERN && r.Left.Op == OLITERAL {
sval = r.Left.Val.U.Sval
slicebytes(l, sval.S, len(sval.S))
return 1
return true
}
case OARRAYLIT:
initplan(r)
if Isslice(r.Type) != 0 {
if Isslice(r.Type) {
// Init slice.
ta = typ(TARRAY)
@ -505,20 +505,20 @@ func staticassign(l *Node, r *Node, out **NodeList) int {
a = Nod(OXXX, nil, nil)
*a = n1
a.Orig = a // completely separate copy
if !(staticassign(a, e.Expr, out) != 0) {
if !staticassign(a, e.Expr, out) {
*out = list(*out, Nod(OAS, a, e.Expr))
}
}
}
return 1
return true
// TODO: Table-driven map insert.
case OMAPLIT:
break
}
return 0
return false
}
/*
@ -534,27 +534,27 @@ func staticname(t *Type, ctxt int) *Node {
namebuf = fmt.Sprintf("statictmp_%.4d", statuniqgen)
statuniqgen++
n = newname(Lookup(namebuf))
if !(ctxt != 0) {
if ctxt == 0 {
n.Readonly = 1
}
addvar(n, t, PEXTERN)
return n
}
func isliteral(n *Node) int {
func isliteral(n *Node) bool {
if n.Op == OLITERAL {
if n.Val.Ctype != CTNIL {
return 1
return true
}
}
return 0
return false
}
func simplename(n *Node) int {
func simplename(n *Node) bool {
if n.Op != ONAME {
goto no
}
if !(n.Addable != 0) {
if n.Addable == 0 {
goto no
}
if n.Class&PHEAP != 0 {
@ -563,10 +563,10 @@ func simplename(n *Node) int {
if n.Class == PPARAMREF {
goto no
}
return 1
return true
no:
return 0
return false
}
func litas(l *Node, r *Node, init **NodeList) {
@ -591,13 +591,13 @@ func getdyn(n *Node, top int) int {
mode = 0
switch n.Op {
default:
if isliteral(n) != 0 {
if isliteral(n) {
return MODECONST
}
return MODEDYNAM
case OARRAYLIT:
if !(top != 0) && n.Type.Bound < 0 {
if top == 0 && n.Type.Bound < 0 {
return MODEDYNAM
}
fallthrough
@ -657,7 +657,7 @@ func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
continue
}
if isliteral(value) != 0 {
if isliteral(value) {
if pass == 2 {
continue
}
@ -725,7 +725,7 @@ func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
continue
}
if isliteral(index) != 0 && isliteral(value) != 0 {
if isliteral(index) && isliteral(value) {
if pass == 2 {
continue
}
@ -881,7 +881,7 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
index = r.Left
value = r.Right
a = Nod(OINDEX, var_, index)
a.Bounded = 1
a.Bounded = true
// TODO need to check bounds?
@ -898,7 +898,7 @@ func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
continue
}
if isliteral(index) != 0 && isliteral(value) != 0 {
if isliteral(index) && isliteral(value) {
continue
}
@ -951,7 +951,7 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
index = r.Left
value = r.Right
if isliteral(index) != 0 && isliteral(value) != 0 {
if isliteral(index) && isliteral(value) {
b++
}
}
@ -999,7 +999,7 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
index = r.Left
value = r.Right
if isliteral(index) != 0 && isliteral(value) != 0 {
if isliteral(index) && isliteral(value) {
// build vstat[b].a = key;
a = Nodintconst(b)
@ -1033,11 +1033,11 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
index = temp(Types[TINT])
a = Nod(OINDEX, vstat, index)
a.Bounded = 1
a.Bounded = true
a = Nod(ODOT, a, newname(symb))
r = Nod(OINDEX, vstat, index)
r.Bounded = 1
r.Bounded = true
r = Nod(ODOT, r, newname(syma))
r = Nod(OINDEX, var_, r)
@ -1068,7 +1068,7 @@ func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
index = r.Left
value = r.Right
if isliteral(index) != 0 && isliteral(value) != 0 {
if isliteral(index) && isliteral(value) {
continue
}
@ -1118,10 +1118,9 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
switch n.Op {
default:
Fatal("anylit: not lit")
fallthrough
case OPTRLIT:
if !(Isptr[t.Etype] != 0) {
if Isptr[t.Etype] == 0 {
Fatal("anylit: not ptr")
}
@ -1150,7 +1149,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
Fatal("anylit: not struct")
}
if simplename(var_) != 0 && count(n.List) > 4 {
if simplename(var_) && count(n.List) > 4 {
if ctxt == 0 {
// lay out static data
vstat = staticname(t, ctxt)
@ -1176,7 +1175,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
}
// initialize of not completely specified
if simplename(var_) != 0 || count(n.List) < structcount(t) {
if simplename(var_) || count(n.List) < structcount(t) {
a = Nod(OAS, var_, nil)
typecheck(&a, Etop)
walkexpr(&a, init)
@ -1194,7 +1193,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
break
}
if simplename(var_) != 0 && count(n.List) > 4 {
if simplename(var_) && count(n.List) > 4 {
if ctxt == 0 {
// lay out static data
vstat = staticname(t, ctxt)
@ -1220,7 +1219,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
}
// initialize of not completely specified
if simplename(var_) != 0 || int64(count(n.List)) < t.Bound {
if simplename(var_) || int64(count(n.List)) < t.Bound {
a = Nod(OAS, var_, nil)
typecheck(&a, Etop)
walkexpr(&a, init)
@ -1237,7 +1236,7 @@ func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
}
}
func oaslit(n *Node, init **NodeList) int {
func oaslit(n *Node, init **NodeList) bool {
var ctxt int
if n.Left == nil || n.Right == nil {
@ -1246,7 +1245,7 @@ func oaslit(n *Node, init **NodeList) int {
if n.Left.Type == nil || n.Right.Type == nil {
goto no
}
if !(simplename(n.Left) != 0) {
if !simplename(n.Left) {
goto no
}
if !Eqtype(n.Left.Type, n.Right.Type) {
@ -1268,28 +1267,28 @@ func oaslit(n *Node, init **NodeList) int {
case OSTRUCTLIT,
OARRAYLIT,
OMAPLIT:
if vmatch1(n.Left, n.Right) != 0 {
if vmatch1(n.Left, n.Right) {
goto no
}
anylit(ctxt, n.Right, n.Left, init)
}
n.Op = OEMPTY
return 1
return true
// not a special composit literal assignment
no:
return 0
return false
}
func getlit(lit *Node) int {
if Smallintconst(lit) != 0 {
if Smallintconst(lit) {
return int(Mpgetfix(lit.Val.U.Xval))
}
return -1
}
func stataddr(nam *Node, n *Node) int {
func stataddr(nam *Node, n *Node) bool {
var l int
if n == nil {
@ -1299,21 +1298,21 @@ func stataddr(nam *Node, n *Node) int {
switch n.Op {
case ONAME:
*nam = *n
return int(n.Addable)
return n.Addable != 0
case ODOT:
if !(stataddr(nam, n.Left) != 0) {
if !stataddr(nam, n.Left) {
break
}
nam.Xoffset += n.Xoffset
nam.Type = n.Type
return 1
return true
case OINDEX:
if n.Left.Type.Bound < 0 {
break
}
if !(stataddr(nam, n.Left) != 0) {
if !stataddr(nam, n.Left) {
break
}
l = getlit(n.Right)
@ -1327,11 +1326,11 @@ func stataddr(nam *Node, n *Node) int {
}
nam.Xoffset += int64(l) * n.Type.Width
nam.Type = n.Type
return 1
return true
}
no:
return 0
return false
}
func initplan(n *Node) {
@ -1347,12 +1346,11 @@ func initplan(n *Node) {
switch n.Op {
default:
Fatal("initplan")
fallthrough
case OARRAYLIT:
for l = n.List; l != nil; l = l.Next {
a = l.N
if a.Op != OKEY || !(Smallintconst(a.Left) != 0) {
if a.Op != OKEY || !Smallintconst(a.Left) {
Fatal("initplan arraylit")
}
addvalue(p, n.Type.Type.Width*Mpgetfix(a.Left.Val.U.Xval), nil, a.Right)
@ -1384,13 +1382,13 @@ func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
var e *InitEntry
// special case: zero can be dropped entirely
if iszero(n) != 0 {
if iszero(n) {
p.Zero += n.Type.Width
return
}
// special case: inline struct and array (not slice) literals
if isvaluelit(n) != 0 {
if isvaluelit(n) {
initplan(n)
q = n.Initplan
for i = 0; i < len(q.E); i++ {
@ -1414,7 +1412,7 @@ func addvalue(p *InitPlan, xoffset int64, key *Node, n *Node) {
e.Expr = n
}
func iszero(n *Node) int {
func iszero(n *Node) bool {
var l *NodeList
switch n.Op {
@ -1423,30 +1421,29 @@ func iszero(n *Node) int {
default:
Dump("unexpected literal", n)
Fatal("iszero")
fallthrough
case CTNIL:
return 1
return true
case CTSTR:
return bool2int(n.Val.U.Sval == nil || len(n.Val.U.Sval.S) == 0)
return n.Val.U.Sval == nil || len(n.Val.U.Sval.S) == 0
case CTBOOL:
return bool2int(n.Val.U.Bval == 0)
return n.Val.U.Bval == 0
case CTINT,
CTRUNE:
return bool2int(mpcmpfixc(n.Val.U.Xval, 0) == 0)
return mpcmpfixc(n.Val.U.Xval, 0) == 0
case CTFLT:
return bool2int(mpcmpfltc(n.Val.U.Fval, 0) == 0)
return mpcmpfltc(n.Val.U.Fval, 0) == 0
case CTCPLX:
return bool2int(mpcmpfltc(&n.Val.U.Cval.Real, 0) == 0 && mpcmpfltc(&n.Val.U.Cval.Imag, 0) == 0)
return mpcmpfltc(&n.Val.U.Cval.Real, 0) == 0 && mpcmpfltc(&n.Val.U.Cval.Imag, 0) == 0
}
case OARRAYLIT:
if Isslice(n.Type) != 0 {
if Isslice(n.Type) {
break
}
fallthrough
@ -1454,18 +1451,18 @@ func iszero(n *Node) int {
// fall through
case OSTRUCTLIT:
for l = n.List; l != nil; l = l.Next {
if !(iszero(l.N.Right) != 0) {
return 0
if !iszero(l.N.Right) {
return false
}
}
return 1
return true
}
return 0
return false
}
func isvaluelit(n *Node) int {
return bool2int((n.Op == OARRAYLIT && Isfixedarray(n.Type) != 0) || n.Op == OSTRUCTLIT)
func isvaluelit(n *Node) bool {
return (n.Op == OARRAYLIT && Isfixedarray(n.Type)) || n.Op == OSTRUCTLIT
}
func entry(p *InitPlan) *InitEntry {
@ -1473,7 +1470,7 @@ func entry(p *InitPlan) *InitEntry {
return &p.E[len(p.E)-1]
}
func gen_as_init(n *Node) int {
func gen_as_init(n *Node) bool {
var nr *Node
var nl *Node
var nam Node
@ -1486,7 +1483,7 @@ func gen_as_init(n *Node) int {
nr = n.Right
nl = n.Left
if nr == nil {
if !(stataddr(&nam, nl) != 0) {
if !stataddr(&nam, nl) {
goto no
}
if nam.Class != PEXTERN {
@ -1499,7 +1496,7 @@ func gen_as_init(n *Node) int {
goto no
}
if !(stataddr(&nam, nl) != 0) {
if !stataddr(&nam, nl) {
goto no
}
@ -1562,7 +1559,7 @@ func gen_as_init(n *Node) int {
}
yes:
return 1
return true
slice:
gused(nil) // in case the data is the dest of a goto
@ -1598,5 +1595,5 @@ no:
Fatal("gen_as_init couldnt make data statement")
}
return 0
return false
}

View file

@ -119,7 +119,7 @@ func yyerrorl(line int, fmt_ string, args ...interface{}) {
hcrash()
nerrors++
if nsavederrors+nerrors >= 10 && !(Debug['e'] != 0) {
if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
Flusherrors()
fmt.Printf("%v: too many errors\n", Ctxt.Line(line))
errorexit()
@ -192,7 +192,7 @@ func Yyerror(fmt_ string, args ...interface{}) {
hcrash()
nerrors++
if nsavederrors+nerrors >= 10 && !(Debug['e'] != 0) {
if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
Flusherrors()
fmt.Printf("%v: too many errors\n", Ctxt.Line(parserline()))
errorexit()
@ -248,7 +248,7 @@ func linehist(file string, off int32, relative int) {
fmt.Printf(" at line %v\n", Ctxt.Line(int(lexlineno)))
}
if off < 0 && file[0] != '/' && !(relative != 0) {
if off < 0 && file[0] != '/' && relative == 0 {
file = fmt.Sprintf("%s/%s", Ctxt.Pathname, file)
}
obj.Linklinehist(Ctxt, int(lexlineno), file, int(off))
@ -384,6 +384,26 @@ func importdot(opkg *Pkg, pack *Node) {
}
}
func gethunk() {
var h string
var nh int32
nh = NHUNK
if thunk >= 10*NHUNK {
nh = 10 * NHUNK
}
h = string(make([]byte, nh))
if h == "" {
Flusherrors()
Yyerror("out of memory")
errorexit()
}
hunk = h
nhunk = nh
thunk += nh
}
func Nod(op int, nleft *Node, nright *Node) *Node {
var n *Node
@ -412,14 +432,14 @@ func saveorignode(n *Node) {
// ispaddedfield reports whether the given field
// is followed by padding. For the case where t is
// the last field, total gives the size of the enclosing struct.
func ispaddedfield(t *Type, total int64) int {
func ispaddedfield(t *Type, total int64) bool {
if t.Etype != TFIELD {
Fatal("ispaddedfield called non-field %v", Tconv(t, 0))
}
if t.Down == nil {
return bool2int(t.Width+t.Type.Width != total)
return t.Width+t.Type.Width != total
}
return bool2int(t.Width+t.Type.Width != t.Down.Width)
return t.Width+t.Type.Width != t.Down.Width
}
func algtype1(t *Type, bad **Type) int {
@ -486,13 +506,13 @@ func algtype1(t *Type, bad **Type) int {
return ASTRING
case TINTER:
if isnilinter(t) != 0 {
if isnilinter(t) {
return ANILINTER
}
return AINTER
case TARRAY:
if Isslice(t) != 0 {
if Isslice(t) {
if bad != nil {
*bad = t
}
@ -526,7 +546,7 @@ func algtype1(t *Type, bad **Type) int {
// Blank fields, padded fields, fields with non-memory
// equality need special compare.
if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) != 0 {
if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) {
ret = -1
continue
}
@ -544,7 +564,7 @@ func algtype(t *Type) int {
a = algtype1(t, nil)
if a == AMEM || a == ANOEQ {
if Isslice(t) != 0 {
if Isslice(t) {
return ASLICE
}
switch t.Width {
@ -687,7 +707,7 @@ func sortinter(t *Type) *Type {
for {
tmp11 := i
i--
if !(tmp11 > 0) {
if tmp11 <= 0 {
break
}
a[i].Down = f
@ -748,12 +768,12 @@ func nodnil() *Node {
return c
}
func Nodbool(b int) *Node {
func Nodbool(b bool) *Node {
var c *Node
c = Nodintconst(0)
c.Val.Ctype = CTBOOL
c.Val.U.Bval = int16(b)
c.Val.U.Bval = int16(bool2int(b))
c.Type = idealbool
return c
}
@ -829,46 +849,46 @@ func treecopy(n *Node) *Node {
return m
}
func isnil(n *Node) int {
func isnil(n *Node) bool {
if n == nil {
return 0
return false
}
if n.Op != OLITERAL {
return 0
return false
}
if n.Val.Ctype != CTNIL {
return 0
return false
}
return 1
return true
}
func isptrto(t *Type, et int) int {
func isptrto(t *Type, et int) bool {
if t == nil {
return 0
return false
}
if !(Isptr[t.Etype] != 0) {
return 0
if Isptr[t.Etype] == 0 {
return false
}
t = t.Type
if t == nil {
return 0
return false
}
if int(t.Etype) != et {
return 0
return false
}
return 1
return true
}
func Istype(t *Type, et int) int {
return bool2int(t != nil && int(t.Etype) == et)
func Istype(t *Type, et int) bool {
return t != nil && int(t.Etype) == et
}
func Isfixedarray(t *Type) int {
return bool2int(t != nil && t.Etype == TARRAY && t.Bound >= 0)
func Isfixedarray(t *Type) bool {
return t != nil && t.Etype == TARRAY && t.Bound >= 0
}
func Isslice(t *Type) int {
return bool2int(t != nil && t.Etype == TARRAY && t.Bound < 0)
func Isslice(t *Type) bool {
return t != nil && t.Etype == TARRAY && t.Bound < 0
}
func isblank(n *Node) bool {
@ -882,34 +902,34 @@ func isblanksym(s *Sym) bool {
return s != nil && s.Name == "_"
}
func Isinter(t *Type) int {
return bool2int(t != nil && t.Etype == TINTER)
func Isinter(t *Type) bool {
return t != nil && t.Etype == TINTER
}
func isnilinter(t *Type) int {
if !(Isinter(t) != 0) {
return 0
func isnilinter(t *Type) bool {
if !Isinter(t) {
return false
}
if t.Type != nil {
return 0
return false
}
return 1
return true
}
func isideal(t *Type) int {
func isideal(t *Type) bool {
if t == nil {
return 0
return false
}
if t == idealstring || t == idealbool {
return 1
return true
}
switch t.Etype {
case TNIL,
TIDEAL:
return 1
return true
}
return 0
return false
}
/*
@ -938,7 +958,7 @@ func methtype(t *Type, mustname int) *Type {
}
// check types
if !(issimple[t.Etype] != 0) {
if issimple[t.Etype] == 0 {
switch t.Etype {
default:
return nil
@ -979,13 +999,13 @@ type TypePairList struct {
next *TypePairList
}
func onlist(l *TypePairList, t1 *Type, t2 *Type) int {
func onlist(l *TypePairList, t1 *Type, t2 *Type) bool {
for ; l != nil; l = l.next {
if (l.t1 == t1 && l.t2 == t2) || (l.t1 == t2 && l.t2 == t1) {
return 1
return true
}
}
return 0
return false
}
// Return 1 if t1 and t2 are identical, following the spec rules.
@ -995,17 +1015,17 @@ func onlist(l *TypePairList, t1 *Type, t2 *Type) int {
// pointer (t1 == t2), so there's no chance of chasing cycles
// ad infinitum, so no need for a depth counter.
func Eqtype(t1 *Type, t2 *Type) bool {
return eqtype1(t1, t2, nil) != 0
return eqtype1(t1, t2, nil)
}
func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) int {
func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
var l TypePairList
if t1 == t2 {
return 1
return true
}
if t1 == nil || t2 == nil || t1.Etype != t2.Etype {
return 0
return false
}
if t1.Sym != nil || t2.Sym != nil {
// Special case: we keep byte and uint8 separate
@ -1013,21 +1033,21 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) int {
switch t1.Etype {
case TUINT8:
if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
return 1
return true
}
case TINT,
TINT32:
if (t1 == Types[runetype.Etype] || t1 == runetype) && (t2 == Types[runetype.Etype] || t2 == runetype) {
return 1
return true
}
}
return 0
return false
}
if onlist(assumed_equal, t1, t2) != 0 {
return 1
if onlist(assumed_equal, t1, t2) {
return true
}
l.next = assumed_equal
l.t1 = t1
@ -1042,7 +1062,7 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) int {
if t1.Etype != TFIELD || t2.Etype != TFIELD {
Fatal("struct/interface missing field: %v %v", Tconv(t1, 0), Tconv(t2, 0))
}
if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !(eqtype1(t1.Type, t2.Type, &l) != 0) || !eqnote(t1.Note, t2.Note) {
if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
goto no
}
}
@ -1071,7 +1091,7 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) int {
if ta.Etype != TFIELD || tb.Etype != TFIELD {
Fatal("func struct missing field: %v %v", Tconv(ta, 0), Tconv(tb, 0))
}
if ta.Isddd != tb.Isddd || !(eqtype1(ta.Type, tb.Type, &l) != 0) {
if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
goto no
}
}
@ -1097,34 +1117,34 @@ func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) int {
}
}
if eqtype1(t1.Down, t2.Down, &l) != 0 && eqtype1(t1.Type, t2.Type, &l) != 0 {
if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
goto yes
}
goto no
yes:
return 1
return true
no:
return 0
return false
}
// Are t1 and t2 equal struct types when field names are ignored?
// For deciding whether the result struct from g can be copied
// directly when compiling f(g()).
func eqtypenoname(t1 *Type, t2 *Type) int {
func eqtypenoname(t1 *Type, t2 *Type) bool {
if t1 == nil || t2 == nil || t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
return 0
return false
}
t1 = t1.Type
t2 = t2.Type
for {
if !Eqtype(t1, t2) {
return 0
return false
}
if t1 == nil {
return 1
return true
}
t1 = t1.Down
t2 = t2.Down
@ -1167,13 +1187,13 @@ func assignop(src *Type, dst *Type, why *string) int {
// both are empty interface types.
// For assignable but different non-empty interface types,
// we want to recompute the itab.
if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src) != 0) {
if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src)) {
return OCONVNOP
}
// 3. dst is an interface type and src implements dst.
if dst.Etype == TINTER && src.Etype != TNIL {
if implements(src, dst, &missing, &have, &ptr) != 0 {
if implements(src, dst, &missing, &have, &ptr) {
return OCONVIFACE
}
@ -1183,9 +1203,9 @@ func assignop(src *Type, dst *Type, why *string) int {
}
if why != nil {
if isptrto(src, TINTER) != 0 {
if isptrto(src, TINTER) {
*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(src, 0))
} else if have != nil && have.Sym == missing.Sym && have.Nointerface != 0 {
} else if have != nil && have.Sym == missing.Sym && have.Nointerface {
*why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
} else if have != nil && have.Sym == missing.Sym {
*why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
@ -1201,7 +1221,7 @@ func assignop(src *Type, dst *Type, why *string) int {
return 0
}
if isptrto(dst, TINTER) != 0 {
if isptrto(dst, TINTER) {
if why != nil {
*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(dst, 0))
}
@ -1209,7 +1229,7 @@ func assignop(src *Type, dst *Type, why *string) int {
}
if src.Etype == TINTER && dst.Etype != TBLANK {
if why != nil && implements(dst, src, &missing, &have, &ptr) != 0 {
if why != nil && implements(dst, src, &missing, &have, &ptr) {
*why = ": need type assertion"
}
return 0
@ -1322,7 +1342,7 @@ func convertop(src *Type, dst *Type, why *string) int {
return ORUNESTR
}
if Isslice(src) != 0 && dst.Etype == TSTRING {
if Isslice(src) && dst.Etype == TSTRING {
if src.Type.Etype == bytetype.Etype {
return OARRAYBYTESTR
}
@ -1333,7 +1353,7 @@ func convertop(src *Type, dst *Type, why *string) int {
// 7. src is a string and dst is []byte or []rune.
// String to slice.
if src.Etype == TSTRING && Isslice(dst) != 0 {
if src.Etype == TSTRING && Isslice(dst) {
if dst.Type.Etype == bytetype.Etype {
return OSTRARRAYBYTE
}
@ -1408,23 +1428,23 @@ func assignconv(n *Node, t *Type, context string) *Node {
return r
}
func subtype(stp **Type, t *Type, d int) int {
func subtype(stp **Type, t *Type, d int) bool {
var st *Type
loop:
st = *stp
if st == nil {
return 0
return false
}
d++
if d >= 10 {
return 0
return false
}
switch st.Etype {
default:
return 0
return false
case TPTR32,
TPTR64,
@ -1434,13 +1454,13 @@ loop:
goto loop
case TANY:
if !(st.Copyany != 0) {
return 0
if st.Copyany == 0 {
return false
}
*stp = t
case TMAP:
if subtype(&st.Down, t, d) != 0 {
if subtype(&st.Down, t, d) {
break
}
stp = &st.Type
@ -1448,51 +1468,51 @@ loop:
case TFUNC:
for {
if subtype(&st.Type, t, d) != 0 {
if subtype(&st.Type, t, d) {
break
}
if subtype(&st.Type.Down.Down, t, d) != 0 {
if subtype(&st.Type.Down.Down, t, d) {
break
}
if subtype(&st.Type.Down, t, d) != 0 {
if subtype(&st.Type.Down, t, d) {
break
}
return 0
return false
}
case TSTRUCT:
for st = st.Type; st != nil; st = st.Down {
if subtype(&st.Type, t, d) != 0 {
return 1
if subtype(&st.Type, t, d) {
return true
}
}
return 0
return false
}
return 1
return true
}
/*
* Is this a 64-bit type?
*/
func Is64(t *Type) int {
func Is64(t *Type) bool {
if t == nil {
return 0
return false
}
switch Simtype[t.Etype] {
case TINT64,
TUINT64,
TPTR64:
return 1
return true
}
return 0
return false
}
/*
* Is a conversion between t1 and t2 a no-op?
*/
func Noconv(t1 *Type, t2 *Type) int {
func Noconv(t1 *Type, t2 *Type) bool {
var e1 int
var e2 int
@ -1502,35 +1522,35 @@ func Noconv(t1 *Type, t2 *Type) int {
switch e1 {
case TINT8,
TUINT8:
return bool2int(e2 == TINT8 || e2 == TUINT8)
return e2 == TINT8 || e2 == TUINT8
case TINT16,
TUINT16:
return bool2int(e2 == TINT16 || e2 == TUINT16)
return e2 == TINT16 || e2 == TUINT16
case TINT32,
TUINT32,
TPTR32:
return bool2int(e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32)
return e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32
case TINT64,
TUINT64,
TPTR64:
return bool2int(e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64)
return e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64
case TFLOAT32:
return bool2int(e2 == TFLOAT32)
return e2 == TFLOAT32
case TFLOAT64:
return bool2int(e2 == TFLOAT64)
return e2 == TFLOAT64
}
return 0
return false
}
func argtype(on *Node, t *Type) {
dowidth(t)
if !(subtype(&on.Type, t, 0) != 0) {
if !subtype(&on.Type, t, 0) {
Fatal("argtype: failed %v %v\n", Nconv(on, 0), Tconv(t, 0))
}
}
@ -1607,7 +1627,7 @@ func syslook(name string, copy int) *Node {
Fatal("syslook: can't find runtime.%s", name)
}
if !(copy != 0) {
if copy == 0 {
return s.Def
}
@ -1886,7 +1906,7 @@ func funcnext(s *Iter) *Type {
var fp *Type
fp = structnext(s)
if fp == nil && !(s.Done != 0) {
if fp == nil && s.Done == 0 {
s.Done = 1
fp = Structfirst(s, getinarg(s.Tfunc))
}
@ -2039,7 +2059,7 @@ func safeexpr(n *Node, init **NodeList) *Node {
}
// make a copy; must not be used as an lvalue
if islvalue(n) != 0 {
if islvalue(n) {
Fatal("missing lvalue case in safeexpr: %v", Nconv(n, 0))
}
return cheapexpr(n, init)
@ -2077,7 +2097,7 @@ func cheapexpr(n *Node, init **NodeList) *Node {
* assignment to it.
*/
func localexpr(n *Node, t *Type, init **NodeList) *Node {
if n.Op == ONAME && (!(n.Addrtaken != 0) || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
if n.Op == ONAME && (n.Addrtaken == 0 || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
return n
}
@ -2182,7 +2202,7 @@ func adddot1(s *Sym, t *Type, d int, save **Type, ignorecase int) int {
d--
for f = u.Type; f != nil; f = f.Down {
if !(f.Embedded != 0) {
if f.Embedded == 0 {
continue
}
if f.Sym == nil {
@ -2343,7 +2363,7 @@ func expand1(t *Type, d int, followptr int) {
}
for f = u.Type; f != nil; f = f.Down {
if !(f.Embedded != 0) {
if f.Embedded == 0 {
continue
}
if f.Sym == nil {
@ -2583,10 +2603,10 @@ func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
dot = adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
// generate call
if !(flag_race != 0) && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !(isifacemethod(method.Type) != 0) {
if flag_race == 0 && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !isifacemethod(method.Type) {
// generate tail call: adjust pointer receiver and jump to embedded method.
dot = dot.Left // skip final .M
if !(Isptr[dotlist[0].field.Type.Etype] != 0) {
if Isptr[dotlist[0].field.Type.Etype] == 0 {
dot = Nod(OADDR, dot, nil)
}
as = Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
@ -2625,7 +2645,7 @@ func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
// Set inl_nonlocal to whether we are calling a method on a
// type defined in a different package. Checked in inlvar.
if !(methodrcvr.Local != 0) {
if methodrcvr.Local == 0 {
inl_nonlocal = 1
}
@ -2666,7 +2686,6 @@ func hashfor(t *Type) *Node {
switch a {
case AMEM:
Fatal("hashfor with AMEM type")
fallthrough
case AINTER:
sym = Pkglookup("interhash", Runtimepkg)
@ -2760,10 +2779,9 @@ func genhash(sym *Sym, t *Type) {
switch t.Etype {
default:
Fatal("genhash %v", Tconv(t, 0))
fallthrough
case TARRAY:
if Isslice(t) != 0 {
if Isslice(t) {
Fatal("genhash %v", Tconv(t, 0))
}
@ -2798,7 +2816,7 @@ func genhash(sym *Sym, t *Type) {
call = Nod(OCALL, hashel, nil)
nx = Nod(OINDEX, np, ni)
nx.Bounded = 1
nx.Bounded = true
na = Nod(OADDR, nx, nil)
na.Etype = 1 // no escape to heap
call.List = list(call.List, na)
@ -2821,7 +2839,7 @@ func genhash(sym *Sym, t *Type) {
}
// If it's a memory field but it's padded, stop here.
if ispaddedfield(t1, t.Width) != 0 {
if ispaddedfield(t1, t.Width) {
t1 = t1.Down
} else {
continue
@ -2924,7 +2942,7 @@ func eqfield(p *Node, q *Node, field *Node) *Node {
nif = Nod(OIF, nil, nil)
nif.Ntest = Nod(ONE, nx, ny)
r = Nod(ORETURN, nil, nil)
r.List = list(r.List, Nodbool(0))
r.List = list(r.List, Nodbool(false))
nif.Nbody = list(nif.Nbody, r)
return nif
}
@ -2981,7 +2999,7 @@ func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
nif.Ninit = list(nif.Ninit, call)
nif.Ntest = Nod(ONOT, call, nil)
r = Nod(ORETURN, nil, nil)
r.List = list(r.List, Nodbool(0))
r.List = list(r.List, Nodbool(false))
nif.Nbody = list(nif.Nbody, r)
return nif
}
@ -3040,10 +3058,9 @@ func geneq(sym *Sym, t *Type) {
switch t.Etype {
default:
Fatal("geneq %v", Tconv(t, 0))
fallthrough
case TARRAY:
if Isslice(t) != 0 {
if Isslice(t) {
Fatal("geneq %v", Tconv(t, 0))
}
@ -3064,14 +3081,14 @@ func geneq(sym *Sym, t *Type) {
// if p[i] != q[i] { return false }
nx = Nod(OINDEX, np, ni)
nx.Bounded = 1
nx.Bounded = true
ny = Nod(OINDEX, nq, ni)
ny.Bounded = 1
ny.Bounded = true
nif = Nod(OIF, nil, nil)
nif.Ntest = Nod(ONE, nx, ny)
r = Nod(ORETURN, nil, nil)
r.List = list(r.List, Nodbool(0))
r.List = list(r.List, Nodbool(false))
nif.Nbody = list(nif.Nbody, r)
nrange.Nbody = list(nrange.Nbody, nif)
fn.Nbody = list(fn.Nbody, nrange)
@ -3091,7 +3108,7 @@ func geneq(sym *Sym, t *Type) {
}
// If it's a memory field but it's padded, stop here.
if ispaddedfield(t1, t.Width) != 0 {
if ispaddedfield(t1, t.Width) {
t1 = t1.Down
} else {
continue
@ -3134,7 +3151,7 @@ func geneq(sym *Sym, t *Type) {
// return true
r = Nod(ORETURN, nil, nil)
r.List = list(r.List, Nodbool(1))
r.List = list(r.List, Nodbool(true))
fn.Nbody = list(fn.Nbody, r)
if Debug['r'] != 0 {
@ -3199,7 +3216,7 @@ func ifacelookdot(s *Sym, t *Type, followptr *int, ignorecase int) *Type {
return nil
}
func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) int {
func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool {
var t0 *Type
var im *Type
var tm *Type
@ -3209,7 +3226,7 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) int {
t0 = t
if t == nil {
return 0
return false
}
// if this is too slow,
@ -3226,18 +3243,18 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) int {
*m = im
*samename = tm
*ptr = 0
return 0
return false
}
}
*m = im
*samename = nil
*ptr = 0
return 0
return false
found:
}
return 1
return true
}
t = methtype(t, 0)
@ -3247,21 +3264,21 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) int {
for im = iface.Type; im != nil; im = im.Down {
imtype = methodfunc(im.Type, nil)
tm = ifacelookdot(im.Sym, t, &followptr, 0)
if tm == nil || tm.Nointerface != 0 || !Eqtype(methodfunc(tm.Type, nil), imtype) {
if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
if tm == nil {
tm = ifacelookdot(im.Sym, t, &followptr, 1)
}
*m = im
*samename = tm
*ptr = 0
return 0
return false
}
// if pointer receiver in method,
// the method does not exist for value types.
rcvr = getthisx(tm.Type).Type.Type
if Isptr[rcvr.Etype] != 0 && !(Isptr[t0.Etype] != 0) && !(followptr != 0) && !(isifacemethod(tm.Type) != 0) {
if Isptr[rcvr.Etype] != 0 && Isptr[t0.Etype] == 0 && followptr == 0 && !isifacemethod(tm.Type) {
if false && Debug['r'] != 0 {
Yyerror("interface pointer mismatch")
}
@ -3269,11 +3286,11 @@ func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) int {
*m = im
*samename = nil
*ptr = 1
return 0
return false
}
}
return 1
return true
}
/*
@ -3470,7 +3487,7 @@ func powtwo(n *Node) int {
if n == nil || n.Op != OLITERAL || n.Type == nil {
goto no
}
if !(Isint[n.Type.Etype] != 0) {
if Isint[n.Type.Etype] == 0 {
goto no
}
@ -3483,7 +3500,7 @@ func powtwo(n *Node) int {
b = b << 1
}
if !(Issigned[n.Type.Etype] != 0) {
if Issigned[n.Type.Etype] == 0 {
goto no
}
@ -3895,7 +3912,7 @@ func isbadimport(path_ *Strlit) bool {
func checknil(x *Node, init **NodeList) {
var n *Node
if Isinter(x.Type) != 0 {
if Isinter(x.Type) {
x = Nod(OITAB, x, nil)
typecheck(&x, Erv)
}
@ -3909,7 +3926,7 @@ func checknil(x *Node, init **NodeList) {
* Can this type be stored directly in an interface word?
* Yes, if the representation is a single pointer.
*/
func isdirectiface(t *Type) int {
func isdirectiface(t *Type) bool {
switch t.Etype {
case TPTR32,
TPTR64,
@ -3917,16 +3934,16 @@ func isdirectiface(t *Type) int {
TMAP,
TFUNC,
TUNSAFEPTR:
return 1
return true
// Array of 1 direct iface type can be direct.
case TARRAY:
return bool2int(t.Bound == 1 && isdirectiface(t.Type) != 0)
return t.Bound == 1 && isdirectiface(t.Type)
// Struct with 1 field of direct iface type can be direct.
case TSTRUCT:
return bool2int(t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type) != 0)
return t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type)
}
return 0
return false
}

View file

@ -281,7 +281,7 @@ func casebody(sw *Node, typeswvar *Node) {
var go_ *Node
var br *Node
var lno int32
var needvar int32
var needvar bool
if sw.List == nil {
return
@ -301,7 +301,7 @@ func casebody(sw *Node, typeswvar *Node) {
Fatal("casebody %v", Oconv(int(n.Op), 0))
}
n.Op = OCASE
needvar = int32(bool2int(count(n.List) != 1 || n.List.N.Op == OLITERAL))
needvar = count(n.List) != 1 || n.List.N.Op == OLITERAL
go_ = Nod(OGOTO, newlabel_swt(), nil)
if n.List == nil {
@ -332,7 +332,7 @@ func casebody(sw *Node, typeswvar *Node) {
}
stat = list(stat, Nod(OLABEL, go_.Left, nil))
if typeswvar != nil && needvar != 0 && n.Nname != nil {
if typeswvar != nil && needvar && n.Nname != nil {
var l *NodeList
l = list1(Nod(ODCL, n.Nname, nil))
@ -410,7 +410,7 @@ func mkcaselist(sw *Node, arg int) *Case {
continue
}
if Istype(n.Left.Type, TINTER) != 0 {
if Istype(n.Left.Type, TINTER) {
c.type_ = Ttypevar
continue
}
@ -552,7 +552,7 @@ func exprswitch(sw *Node) {
casebody(sw, nil)
arg = Snorm
if Isconst(sw.Ntest, CTBOOL) != 0 {
if Isconst(sw.Ntest, CTBOOL) {
arg = Strue
if sw.Ntest.Val.U.Bval == 0 {
arg = Sfalse
@ -572,7 +572,7 @@ func exprswitch(sw *Node) {
cas = nil
if arg == Strue || arg == Sfalse {
exprname = Nodbool(bool2int(arg == Strue))
exprname = Nodbool(arg == Strue)
} else if consttype(sw.Ntest) >= 0 {
// leave constants to enable dead code elimination (issue 9608)
exprname = sw.Ntest
@ -600,7 +600,7 @@ loop:
}
// deal with the variables one-at-a-time
if !(okforcmp[t.Etype] != 0) || c0.type_ != Texprconst {
if okforcmp[t.Etype] == 0 || c0.type_ != Texprconst {
a = exprbsw(c0, 1, arg)
cas = list(cas, a)
c0 = c0.link
@ -738,7 +738,7 @@ func typeswitch(sw *Node) {
}
walkexpr(&sw.Ntest.Right, &sw.Ninit)
if !(Istype(sw.Ntest.Right.Type, TINTER) != 0) {
if !Istype(sw.Ntest.Right.Type, TINTER) {
Yyerror("type switch must be on an interface")
return
}
@ -764,7 +764,7 @@ func typeswitch(sw *Node) {
typecheck(&hashname, Erv)
t = sw.Ntest.Right.Type
if isnilinter(t) != 0 {
if isnilinter(t) {
a = syslook("efacethash", 1)
} else {
a = syslook("ifacethash", 1)
@ -871,7 +871,7 @@ func walkswitch(sw *Node) {
* both have inserted OBREAK statements
*/
if sw.Ntest == nil {
sw.Ntest = Nodbool(1)
sw.Ntest = Nodbool(true)
typecheck(&sw.Ntest, Erv)
}
@ -933,11 +933,11 @@ func typecheckswitch(n *Node) {
t = Types[TBOOL]
}
if t != nil {
if !(okforeq[t.Etype] != 0) {
if okforeq[t.Etype] == 0 {
Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
} else if t.Etype == TARRAY && !(Isfixedarray(t) != 0) {
} else if t.Etype == TARRAY && !Isfixedarray(t) {
nilonly = "slice"
} else if t.Etype == TARRAY && Isfixedarray(t) != 0 && algtype1(t, nil) == ANOEQ {
} else if t.Etype == TARRAY && Isfixedarray(t) && algtype1(t, nil) == ANOEQ {
Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
} else if t.Etype == TSTRUCT && algtype1(t, &badtype) == ANOEQ {
Yyerror("cannot switch on %v (struct containing %v cannot be compared)", Nconv(n.Ntest, obj.FmtLong), Tconv(badtype, 0))
@ -976,27 +976,27 @@ func typecheckswitch(n *Node) {
if ll.N.Op == OTYPE {
Yyerror("type %v is not an expression", Tconv(ll.N.Type, 0))
} else if ll.N.Type != nil && !(assignop(ll.N.Type, t, nil) != 0) && !(assignop(t, ll.N.Type, nil) != 0) {
} else if ll.N.Type != nil && assignop(ll.N.Type, t, nil) == 0 && assignop(t, ll.N.Type, nil) == 0 {
if n.Ntest != nil {
Yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", Nconv(ll.N, 0), Nconv(n.Ntest, 0), Tconv(ll.N.Type, 0), Tconv(t, 0))
} else {
Yyerror("invalid case %v in switch (mismatched types %v and bool)", Nconv(ll.N, 0), Tconv(ll.N.Type, 0))
}
} else if nilonly != "" && !(Isconst(ll.N, CTNIL) != 0) {
} else if nilonly != "" && !Isconst(ll.N, CTNIL) {
Yyerror("invalid case %v in switch (can only compare %s %v to nil)", Nconv(ll.N, 0), nilonly, Nconv(n.Ntest, 0))
}
case Etype: // type switch
if ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL) != 0 {
if ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL) {
} else if ll.N.Op != OTYPE && ll.N.Type != nil { // should this be ||?
Yyerror("%v is not a type", Nconv(ll.N, obj.FmtLong))
// reset to original type
ll.N = n.Ntest.Right
} else if ll.N.Type.Etype != TINTER && t.Etype == TINTER && !(implements(ll.N.Type, t, &missing, &have, &ptr) != 0) {
if have != nil && !(missing.Broke != 0) && !(have.Broke != 0) {
} else if ll.N.Type.Etype != TINTER && t.Etype == TINTER && !implements(ll.N.Type, t, &missing, &have, &ptr) {
if have != nil && missing.Broke == 0 && have.Broke == 0 {
Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort))
} else if !(missing.Broke != 0) {
} else if missing.Broke == 0 {
Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0))
}
}
@ -1008,7 +1008,7 @@ func typecheckswitch(n *Node) {
ll = ncase.List
nvar = ncase.Nname
if nvar != nil {
if ll != nil && ll.Next == nil && ll.N.Type != nil && !(Istype(ll.N.Type, TNIL) != 0) {
if ll != nil && ll.Next == nil && ll.N.Type != nil && !Istype(ll.N.Type, TNIL) {
// single entry type switch
nvar.Ntype = typenod(ll.N.Type)
} else {

File diff suppressed because it is too large Load diff

View file

@ -161,18 +161,18 @@ ret:
return n
}
func isunsafebuiltin(n *Node) int {
func isunsafebuiltin(n *Node) bool {
if n == nil || n.Op != ONAME || n.Sym == nil || n.Sym.Pkg != unsafepkg {
return 0
return false
}
if n.Sym.Name == "Sizeof" {
return 1
return true
}
if n.Sym.Name == "Offsetof" {
return 1
return true
}
if n.Sym.Name == "Alignof" {
return 1
return true
}
return 0
return false
}

File diff suppressed because it is too large Load diff

View file

@ -851,6 +851,7 @@ type yyLexer interface {
}
type yyParser interface {
Parse(yyLexer) int
Lookahead() int
}
@ -862,6 +863,13 @@ func (p *yyParserImpl) Lookahead() int {
return p.lookahead()
}
func yyNewParser() yyParser {
p := &yyParserImpl{
lookahead: func() int { return -1 },
}
return p
}
const yyFlag = -1000
func yyTokname(c int) string {
@ -919,6 +927,10 @@ out:
}
func yyParse(yylex yyLexer) int {
return yyNewParser().Parse(yylex)
}
func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
var yyn int
var yylval yySymType
var yyVAL yySymType
@ -930,19 +942,12 @@ func yyParse(yylex yyLexer) int {
yystate := 0
yychar := -1
yytoken := -1 // yychar translated into internal numbering
if lx, ok := yylex.(interface {
SetParser(yyParser)
}); ok {
p := &yyParserImpl{
lookahead: func() int { return yychar },
}
lx.SetParser(p)
yyrcvr.lookahead = func() int { return yychar }
defer func() {
// Make sure we report no lookahead when not parsing.
yychar = -1
yytoken = -1
}()
}
yyp := -1
goto yystack

View file

@ -35,11 +35,11 @@ package obj
// THE SOFTWARE.
type ar_hdr struct {
Name string
Date string
Uid string
Gid string
Mode string
Size string
Fmag string
name string
date string
uid string
gid string
mode string
size string
fmag string
}

View file

@ -437,7 +437,7 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
// split it into two instructions:
// ADD $-100004, R13
// MOVW R14, 0(R13)
q = new(obj.Prog)
q = ctxt.NewProg()
p.Scond &^= C_WBIT
*q = *p
@ -462,14 +462,14 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
p.From = *a
p.From.Reg = 0
p.From.Type = obj.TYPE_CONST
p.To = obj.Zprog.To
p.To = obj.Addr{}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R13
// make q into p but load/store from 0(R13)
q.Spadj = 0
*a2 = obj.Zprog.From
*a2 = obj.Addr{}
a2.Type = obj.TYPE_MEM
a2.Reg = REG_R13
a2.Sym = nil
@ -514,7 +514,7 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
if p.Scond&(C_PBIT|C_WBIT) != 0 {
ctxt.Diag("unsupported instruction (.P/.W): %v", p)
}
q = new(obj.Prog)
q = ctxt.NewProg()
*q = *p
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
@ -535,12 +535,12 @@ func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint3
p.From = *a
p.From.Type = obj.TYPE_ADDR
p.To = obj.Zprog.To
p.To = obj.Addr{}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R11
// make q into p but load/store from 0(R11)
*a2 = obj.Zprog.From
*a2 = obj.Addr{}
a2.Type = obj.TYPE_MEM
a2.Reg = REG_R11
@ -606,7 +606,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
p = p.Link
for ; p != nil || ctxt.Blitrl != nil; (func() { op = p; p = p.Link })() {
if p == nil {
if checkpool(ctxt, op, 0) != 0 {
if checkpool(ctxt, op, 0) {
p = op
continue
}
@ -638,7 +638,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
if p.As == ACASE {
i = int(casesz(ctxt, p))
}
if checkpool(ctxt, op, i) != 0 {
if checkpool(ctxt, op, i) {
p = op
continue
}
@ -749,7 +749,7 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
}
cursym.Size = int64(c)
if !(bflag != 0) {
if bflag == 0 {
break
}
}
@ -834,16 +834,16 @@ func span5(ctxt *obj.Link, cursym *obj.LSym) {
* drop the pool now, and branch round it.
* this happens only in extended basic blocks that exceed 4k.
*/
func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) int {
func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) bool {
if pool.size >= 0xff0 || immaddr(int32((p.Pc+int64(sz)+4)+4+int64(12+pool.size)-int64(pool.start+8))) == 0 {
return flushpool(ctxt, p, 1, 0)
} else if p.Link == nil {
return flushpool(ctxt, p, 2, 0)
}
return 0
return false
}
func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) int {
func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
var q *obj.Prog
if ctxt.Blitrl != nil {
@ -851,23 +851,21 @@ func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) int {
if false && skip == 1 {
fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
}
q = new(obj.Prog)
q = ctxt.NewProg()
q.As = AB
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Link
q.Link = ctxt.Blitrl
q.Lineno = p.Lineno
q.Ctxt = p.Ctxt
ctxt.Blitrl = q
} else if !(force != 0) && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
return 0
} else if force == 0 && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
return false
}
if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
// if pool is not multiple of 16 bytes, add an alignment marker
q = new(obj.Prog)
q = ctxt.NewProg()
q.As = ADATABUNDLEEND
q.Ctxt = p.Ctxt
ctxt.Elitrl.Link = q
ctxt.Elitrl = q
}
@ -888,10 +886,10 @@ func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) int {
pool.size = 0
pool.start = 0
pool.extra = 0
return 1
return true
}
return 0
return false
}
func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
@ -901,9 +899,8 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
c = aclass(ctxt, a)
t = obj.Zprog
t.Ctxt = ctxt
t.As = AWORD
t.Ctxt = p.Ctxt
switch c {
default:
@ -941,12 +938,9 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
// start a new data bundle
q = new(obj.Prog)
*q = obj.Zprog
q = ctxt.NewProg()
q.As = ADATABUNDLE
q.Pc = int64(pool.size)
q.Ctxt = p.Ctxt
pool.size += 4
if ctxt.Blitrl == nil {
ctxt.Blitrl = q
@ -958,7 +952,7 @@ func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
ctxt.Elitrl = q
}
q = new(obj.Prog)
q = ctxt.NewProg()
*q = t
q.Pc = int64(pool.size)
@ -1740,7 +1734,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 13: /* op $lcon, [R], R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
o2 = oprrr(ctxt, int(p.As), int(p.Scond))
@ -1836,7 +1830,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 30: /* mov/movb/movbu R,L(R) */
o1 = omvl(ctxt, p, &p.To, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
r = int(p.To.Reg)
@ -1851,7 +1845,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 31: /* mov/movbu L(R),R -> lr[b] */
o1 = omvl(ctxt, p, &p.From, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
r = int(p.From.Reg)
@ -1866,7 +1860,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 34: /* mov $lacon,R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
@ -1984,7 +1978,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 52: /* floating point store, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.To, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
r = int(p.To.Reg)
@ -1997,7 +1991,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 53: /* floating point load, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.From, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
r = int(p.From.Reg)
@ -2122,7 +2116,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 64: /* mov/movb/movbu R,addr */
o1 = omvl(ctxt, p, &p.To, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
o2 = osr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond))
@ -2134,7 +2128,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 65: /* mov/movbu addr,R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
o2 = olr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
@ -2149,7 +2143,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 68: /* floating point store -> ADDR */
o1 = omvl(ctxt, p, &p.To, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
o2 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
@ -2161,7 +2155,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 69: /* floating point load <- ADDR */
o1 = omvl(ctxt, p, &p.From, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
o2 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
@ -2197,7 +2191,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 72: /* movh/movhu R,L(R) -> strh */
o1 = omvl(ctxt, p, &p.To, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
r = int(p.To.Reg)
@ -2209,7 +2203,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 73: /* movb/movh/movhu L(R),R -> ldrsb/ldrsh/ldrh */
o1 = omvl(ctxt, p, &p.From, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
r = int(p.From.Reg)
@ -2394,7 +2388,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */
o1 = omvl(ctxt, p, &p.From, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
o2 = olhr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
@ -2411,7 +2405,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
case 94: /* movh/movhu R,addr -> strh */
o1 = omvl(ctxt, p, &p.To, REGTMP)
if !(o1 != 0) {
if o1 == 0 {
break
}
o2 = oshr(ctxt, int(p.From.Reg), 0, REGTMP, int(p.Scond))
@ -2725,10 +2719,10 @@ func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
ctxt.Diag(".nil on LDR/STR instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if !(sc&C_PBIT != 0) {
if sc&C_PBIT == 0 {
o |= 1 << 24
}
if !(sc&C_UBIT != 0) {
if sc&C_UBIT == 0 {
o |= 1 << 23
}
if sc&C_WBIT != 0 {
@ -2759,7 +2753,7 @@ func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
ctxt.Diag(".nil on LDRH/STRH instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if !(sc&C_PBIT != 0) {
if sc&C_PBIT == 0 {
o |= 1 << 24
}
if sc&C_WBIT != 0 {
@ -2820,7 +2814,7 @@ func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uin
ctxt.Diag(".nil on FLDR/FSTR instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if !(sc&C_PBIT != 0) {
if sc&C_PBIT == 0 {
o |= 1 << 24
}
if sc&C_WBIT != 0 {
@ -2860,7 +2854,7 @@ func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uin
func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
var v int32
var o1 uint32
if !(p.Pcond != nil) {
if p.Pcond == nil {
aclass(ctxt, a)
v = immrot(^uint32(ctxt.Instoffset))
if v == 0 {

View file

@ -225,7 +225,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
cursym.Args = p.To.U.Argsize
if ctxt.Debugzerostack != 0 {
if autoffset != 0 && !(p.From3.Offset&obj.NOSPLIT != 0) {
if autoffset != 0 && p.From3.Offset&obj.NOSPLIT == 0 {
// MOVW $4(R13), R1
p = obj.Appendp(ctxt, p)
@ -370,7 +370,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
}
if !(autosize != 0) && !(cursym.Text.Mark&LEAF != 0) {
if autosize == 0 && cursym.Text.Mark&LEAF == 0 {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
obj.Bflush(ctxt.Bso)
@ -381,13 +381,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
if cursym.Text.Mark&LEAF != 0 {
cursym.Leaf = 1
if !(autosize != 0) {
if autosize == 0 {
break
}
}
if !(p.From3.Offset&obj.NOSPLIT != 0) {
p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0))) // emit split check
if p.From3.Offset&obj.NOSPLIT == 0 {
p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
}
// MOVW.W R14,$-autosize(SP)
@ -493,9 +493,9 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
case obj.ARET:
obj.Nocache(p)
if cursym.Text.Mark&LEAF != 0 {
if !(autosize != 0) {
if autosize == 0 {
p.As = AB
p.From = obj.Zprog.From
p.From = obj.Addr{}
if p.To.Sym != nil { // retjmp
p.To.Type = obj.TYPE_BRANCH
} else {
@ -662,8 +662,8 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
}
func isfloatreg(a *obj.Addr) int {
return bool2int(a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15)
func isfloatreg(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15
}
func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
@ -687,7 +687,7 @@ func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
for p = cursym.Text; p != nil; p = p.Link {
switch p.As {
case AMOVW:
if isfloatreg(&p.To) != 0 || isfloatreg(&p.From) != 0 {
if isfloatreg(&p.To) || isfloatreg(&p.From) {
goto soft
}
goto notsoft
@ -721,13 +721,13 @@ func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
}
soft:
if !(wasfloat != 0) || (p.Mark&LABEL != 0) {
next = new(obj.Prog)
if wasfloat == 0 || (p.Mark&LABEL != 0) {
next = ctxt.NewProg()
*next = *p
// BL _sfloat(SB)
*p = obj.Zprog
*p = obj.Prog{}
p.Ctxt = ctxt
p.Link = next
p.As = ABL
p.To.Type = obj.TYPE_BRANCH
@ -745,7 +745,7 @@ func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
}
}
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.Prog {
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
@ -856,7 +856,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.P
if ctxt.Cursym.Cfunc != 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else {
p.To.Sym = ctxt.Symmorestack[noctxt]
p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
}
// BLS start
@ -885,7 +885,7 @@ func follow(ctxt *obj.Link, s *obj.LSym) {
ctxt.Cursym = s
firstp = new(obj.Prog)
firstp = ctxt.NewProg()
lastp = firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
@ -948,7 +948,7 @@ loop:
if q != nil && q.As != obj.ATEXT {
p.Mark |= FOLL
p = q
if !(p.Mark&FOLL != 0) {
if p.Mark&FOLL == 0 {
goto loop
}
}
@ -979,9 +979,9 @@ loop:
copy:
for {
r = new(obj.Prog)
r = ctxt.NewProg()
*r = *p
if !(r.Mark&FOLL != 0) {
if r.Mark&FOLL == 0 {
fmt.Printf("can't happen 1\n")
}
r.Mark |= FOLL
@ -1003,10 +1003,10 @@ loop:
}
r.Pcond = p.Link
r.Link = p.Pcond
if !(r.Link.Mark&FOLL != 0) {
if r.Link.Mark&FOLL == 0 {
xfol(ctxt, r.Link, last)
}
if !(r.Pcond.Mark&FOLL != 0) {
if r.Pcond.Mark&FOLL == 0 {
fmt.Printf("can't happen 2\n")
}
return
@ -1014,13 +1014,12 @@ loop:
}
a = AB
q = new(obj.Prog)
q = ctxt.NewProg()
q.As = int16(a)
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
q.Pcond = p
q.Ctxt = p.Ctxt
p = q
}

View file

@ -142,7 +142,7 @@ func Setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
return off + wid
}
func Adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
var off int64
off = s.Size
@ -150,23 +150,23 @@ func Adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
return off
}
func Adduint8(ctxt *Link, s *LSym, v uint8) int64 {
return Adduintxx(ctxt, s, uint64(v), 1)
func adduint8(ctxt *Link, s *LSym, v uint8) int64 {
return adduintxx(ctxt, s, uint64(v), 1)
}
func Adduint16(ctxt *Link, s *LSym, v uint16) int64 {
return Adduintxx(ctxt, s, uint64(v), 2)
func adduint16(ctxt *Link, s *LSym, v uint16) int64 {
return adduintxx(ctxt, s, uint64(v), 2)
}
func Adduint32(ctxt *Link, s *LSym, v uint32) int64 {
return Adduintxx(ctxt, s, uint64(v), 4)
return adduintxx(ctxt, s, uint64(v), 4)
}
func Adduint64(ctxt *Link, s *LSym, v uint64) int64 {
return Adduintxx(ctxt, s, v, 8)
return adduintxx(ctxt, s, v, 8)
}
func Setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 1)
}
@ -174,7 +174,7 @@ func setuint16(ctxt *Link, s *LSym, r int64, v uint16) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 2)
}
func Setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 4)
}
@ -182,7 +182,7 @@ func setuint64(ctxt *Link, s *LSym, r int64, v uint64) int64 {
return Setuintxx(ctxt, s, r, v, 8)
}
func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc
@ -222,11 +222,11 @@ func addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
return i + int64(r.Siz)
}
func Addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
return Addaddrplus(ctxt, s, t, 0)
func addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
return addaddrplus(ctxt, s, t, 0)
}
func Setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
var r *Reloc
if s.Type == 0 {
@ -247,11 +247,11 @@ func Setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
return off + int64(r.Siz)
}
func Setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
return Setaddrplus(ctxt, s, off, t, 0)
func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
return setaddrplus(ctxt, s, off, t, 0)
}
func Addsize(ctxt *Link, s *LSym, t *LSym) int64 {
func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
var i int64
var r *Reloc
@ -270,7 +270,7 @@ func Addsize(ctxt *Link, s *LSym, t *LSym) int64 {
return i + int64(r.Siz)
}
func Addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc

View file

@ -17,8 +17,6 @@ var Framepointer_enabled int
var Fieldtrack_enabled int
var Zprog Prog
// Toolchain experiments.
// These are controlled by the GOEXPERIMENT environment
// variable recorded when the toolchain is built.
@ -72,11 +70,11 @@ func double2ieee(ieee *uint64, f float64) {
func Nopout(p *Prog) {
p.As = ANOP
p.Scond = Zprog.Scond
p.From = Zprog.From
p.From3 = Zprog.From3
p.Reg = Zprog.Reg
p.To = Zprog.To
p.Scond = 0
p.From = Addr{}
p.From3 = Addr{}
p.Reg = 0
p.To = Addr{}
}
func Nocache(p *Prog) {

View file

@ -1853,7 +1853,7 @@ func span8(ctxt *obj.Link, s *obj.LSym) {
ctxt.Diag("span must be looping")
log.Fatalf("bad code")
}
if !(loop != 0) {
if loop == 0 {
break
}
}
@ -3355,7 +3355,7 @@ found:
case Zlit:
for ; ; z++ {
op = int(o.op[z])
if !(op != 0) {
if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
@ -3365,7 +3365,7 @@ found:
case Zlitm_r:
for ; ; z++ {
op = int(o.op[z])
if !(op != 0) {
if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
@ -3400,7 +3400,7 @@ found:
tmp2 := z
z++
op = int(o.op[tmp2])
if !(op != 0) {
if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)

View file

@ -38,16 +38,16 @@ import (
"math"
)
func canuselocaltls(ctxt *obj.Link) int {
func canuselocaltls(ctxt *obj.Link) bool {
switch ctxt.Headtype {
case obj.Hlinux,
obj.Hnacl,
obj.Hplan9,
obj.Hwindows:
return 0
return false
}
return 1
return true
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
@ -56,7 +56,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
var q *obj.Prog
// See obj6.c for discussion of TLS.
if canuselocaltls(ctxt) != 0 {
if canuselocaltls(ctxt) {
// Reduce TLS initial exec model to TLS local exec model.
// Sequences like
// MOVL TLS, BX
@ -261,13 +261,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
q = nil
if !(p.From3.Offset&obj.NOSPLIT != 0) || (p.From3.Offset&obj.WRAPPER != 0) {
if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
p = load_g_cx(ctxt, p) // load g into CX
}
if !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
p = stacksplit(ctxt, p, autoffset, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0)), &q) // emit split check
if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
p = stacksplit(ctxt, p, autoffset, cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
}
if autoffset != 0 {
@ -367,7 +367,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p2.Pcond = p
}
if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
// 8l -Z means zero the stack frame on entry.
// This slows down function calls but can help avoid
// false positives in garbage collection.
@ -507,7 +507,7 @@ func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int, jmpok **obj.Prog) *obj.Prog {
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
@ -659,7 +659,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int, jmpok
if ctxt.Cursym.Cfunc != 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else {
p.To.Sym = ctxt.Symmorestack[noctxt]
p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
}
p = obj.Appendp(ctxt, p)
@ -684,27 +684,27 @@ func follow(ctxt *obj.Link, s *obj.LSym) {
ctxt.Cursym = s
firstp = new(obj.Prog)
firstp = ctxt.NewProg()
lastp = firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
}
func nofollow(a int) int {
func nofollow(a int) bool {
switch a {
case obj.AJMP,
obj.ARET,
AIRETL,
AIRETW,
obj.AUNDEF:
return 1
return true
}
return 0
return false
}
func pushpop(a int) int {
func pushpop(a int) bool {
switch a {
case APUSHL,
APUSHFL,
@ -714,10 +714,10 @@ func pushpop(a int) int {
APOPFL,
APOPW,
APOPFW:
return 1
return true
}
return 0
return false
}
func relinv(a int) int {
@ -802,7 +802,7 @@ loop:
continue
}
if nofollow(a) != 0 || pushpop(a) != 0 {
if nofollow(a) || pushpop(a) {
break // NOTE(rsc): arm does goto copy
}
if q.Pcond == nil || q.Pcond.Mark != 0 {
@ -839,7 +839,7 @@ loop:
/* */
}
}
q = new(obj.Prog)
q = ctxt.NewProg()
q.As = obj.AJMP
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
@ -856,7 +856,7 @@ loop:
a = int(p.As)
/* continue loop with what comes after p */
if nofollow(a) != 0 {
if nofollow(a) {
return
}
if p.Pcond != nil && a != obj.ACALL {

View file

@ -31,50 +31,6 @@
package obj
import (
"fmt"
"os"
"path"
"strings"
)
func addlib(ctxt *Link, src, obj, pathname string) {
name := path.Clean(pathname)
// runtime.a -> runtime
short := strings.TrimSuffix(name, ".a")
// already loaded?
for i := range ctxt.Library {
if ctxt.Library[i].Pkg == short {
return
}
}
var pname string
// runtime -> runtime.a for search
if (!(ctxt.Windows != 0) && name[0] == '/') || (ctxt.Windows != 0 && name[1] == ':') {
pname = name
} else {
// try dot, -L "libdir", and then goroot.
for _, dir := range ctxt.Libdir {
pname = dir + "/" + name
if _, err := os.Stat(pname); !os.IsNotExist(err) {
break
}
}
}
pname = path.Clean(pname)
// runtime.a -> runtime
pname = strings.TrimSuffix(pname, ".a")
if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
fmt.Fprintf(ctxt.Bso, "%5.2f addlib: %s %s pulls in %s\n", Cputime(), obj, src, pname)
}
Addlibpath(ctxt, src, obj, pname, name)
}
/*
* add library to library list.
* srcref: src file referring to package
@ -82,24 +38,6 @@ func addlib(ctxt *Link, src, obj, pathname string) {
* file: object file, e.g., /home/rsc/go/pkg/container/vector.a
* pkg: package import path, e.g. container/vector
*/
func Addlibpath(ctxt *Link, srcref, objref, file, pkg string) {
for _, lib := range ctxt.Library {
if lib.File == file {
return
}
}
if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
fmt.Fprintf(ctxt.Bso, "%5.2f addlibpath: srcref: %s objref: %s file: %s pkg: %s\n", Cputime(), srcref, objref, file, pkg)
}
ctxt.Library = append(ctxt.Library, Library{
Objref: objref,
Srcref: srcref,
File: file,
Pkg: pkg,
})
}
const (
LOG = 5

View file

@ -280,14 +280,14 @@ type Pcdata struct {
}
type Pciter struct {
D Pcdata
P []byte
Pc uint32
Nextpc uint32
Pcscale uint32
Value int32
Start int
Done int
d Pcdata
p []byte
pc uint32
nextpc uint32
pcscale uint32
value int32
start int
done int
}
// An Addr is an argument to an instruction.

View file

@ -188,7 +188,7 @@ func Writeobjdirect(ctxt *Link, b *Biobuf) {
}
}
if !(found != 0) {
if found == 0 {
p = Appendp(ctxt, s.Text)
p.As = AFUNCDATA
p.From.Type = TYPE_CONST

View file

@ -307,7 +307,7 @@ func getvarint(pp *[]byte) uint32 {
v |= uint32(p[0]&0x7F) << uint(shift)
tmp7 := p
p = p[1:]
if !(tmp7[0]&0x80 != 0) {
if tmp7[0]&0x80 == 0 {
break
}
}
@ -316,45 +316,45 @@ func getvarint(pp *[]byte) uint32 {
return v
}
func Pciternext(it *Pciter) {
func pciternext(it *Pciter) {
var v uint32
var dv int32
it.Pc = it.Nextpc
if it.Done != 0 {
it.pc = it.nextpc
if it.done != 0 {
return
}
if -cap(it.P) >= -cap(it.D.P[len(it.D.P):]) {
it.Done = 1
if -cap(it.p) >= -cap(it.d.P[len(it.d.P):]) {
it.done = 1
return
}
// value delta
v = getvarint(&it.P)
v = getvarint(&it.p)
if v == 0 && !(it.Start != 0) {
it.Done = 1
if v == 0 && it.start == 0 {
it.done = 1
return
}
it.Start = 0
it.start = 0
dv = int32(v>>1) ^ (int32(v<<31) >> 31)
it.Value += dv
it.value += dv
// pc delta
v = getvarint(&it.P)
v = getvarint(&it.p)
it.Nextpc = it.Pc + v*it.Pcscale
it.nextpc = it.pc + v*it.pcscale
}
func Pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
it.D = *d
it.P = it.D.P
it.Pc = 0
it.Nextpc = 0
it.Value = -1
it.Start = 1
it.Done = 0
it.Pcscale = uint32(ctxt.Arch.Minlc)
Pciternext(it)
func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
it.d = *d
it.p = it.d.P
it.pc = 0
it.nextpc = 0
it.value = -1
it.start = 1
it.done = 0
it.pcscale = uint32(ctxt.Arch.Minlc)
pciternext(it)
}

View file

@ -470,16 +470,14 @@ func span9(ctxt *obj.Link, cursym *obj.LSym) {
if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
otxt = p.Pcond.Pc - c
if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
q = new(obj.Prog)
q.Ctxt = p.Ctxt
q = ctxt.NewProg()
q.Link = p.Link
p.Link = q
q.As = ABR
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Pcond
p.Pcond = q
q = new(obj.Prog)
q.Ctxt = p.Ctxt
q = ctxt.NewProg()
q.Link = p.Link
p.Link = q
q.As = ABR
@ -534,12 +532,12 @@ func span9(ctxt *obj.Link, cursym *obj.LSym) {
}
}
func isint32(v int64) int {
return bool2int(int64(int32(v)) == v)
func isint32(v int64) bool {
return int64(int32(v)) == v
}
func isuint32(v uint64) int {
return bool2int(uint64(uint32(v)) == v)
func isuint32(v uint64) bool {
return uint64(uint32(v)) == v
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
@ -637,7 +635,7 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
return C_SACON
}
if isint32(ctxt.Instoffset) != 0 {
if isint32(ctxt.Instoffset) {
return C_LACON
}
return C_DACON
@ -689,10 +687,10 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if ctxt.Instoffset <= 0xffff {
return C_ANDCON
}
if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) != 0 { /* && (instoffset & (1<<31)) == 0) */
if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
return C_UCON
}
if isint32(ctxt.Instoffset) != 0 || isuint32(uint64(ctxt.Instoffset)) != 0 {
if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
return C_LCON
}
return C_DCON
@ -701,10 +699,10 @@ func aclass(ctxt *obj.Link, a *obj.Addr) int {
if ctxt.Instoffset >= -0x8000 {
return C_ADDCON
}
if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) != 0 {
if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
return C_UCON
}
if isint32(ctxt.Instoffset) != 0 {
if isint32(ctxt.Instoffset) {
return C_LCON
}
return C_DCON
@ -1407,20 +1405,20 @@ func addaddrreloc(ctxt *obj.Link, s *obj.LSym, o1 *uint32, o2 *uint32) {
/*
* 32-bit masks
*/
func getmask(m []byte, v uint32) int {
func getmask(m []byte, v uint32) bool {
var i int
m[1] = 0
m[0] = m[1]
if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
if getmask(m, ^v) != 0 {
if getmask(m, ^v) {
i = int(m[0])
m[0] = m[1] + 1
m[1] = byte(i - 1)
return 1
return true
}
return 0
return false
}
for i = 0; i < 32; i++ {
@ -1429,25 +1427,25 @@ func getmask(m []byte, v uint32) int {
for {
m[1] = byte(i)
i++
if !(i < 32 && v&(1<<uint(31-i)) != 0) {
if i >= 32 || v&(1<<uint(31-i)) == 0 {
break
}
}
for ; i < 32; i++ {
if v&(1<<uint(31-i)) != 0 {
return 0
return false
}
}
return 1
return true
}
}
return 0
return false
}
func maskgen(ctxt *obj.Link, p *obj.Prog, m []byte, v uint32) {
if !(getmask(m, v) != 0) {
if !getmask(m, v) {
ctxt.Diag("cannot generate mask #%x\n%v", v, p)
}
}
@ -1455,7 +1453,7 @@ func maskgen(ctxt *obj.Link, p *obj.Prog, m []byte, v uint32) {
/*
* 64-bit masks (rldic etc)
*/
func getmask64(m []byte, v uint64) int {
func getmask64(m []byte, v uint64) bool {
var i int
m[1] = 0
@ -1466,25 +1464,25 @@ func getmask64(m []byte, v uint64) int {
for {
m[1] = byte(i)
i++
if !(i < 64 && v&(uint64(1)<<uint(63-i)) != 0) {
if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
break
}
}
for ; i < 64; i++ {
if v&(uint64(1)<<uint(63-i)) != 0 {
return 0
return false
}
}
return 1
return true
}
}
return 0
return false
}
func maskgen64(ctxt *obj.Link, p *obj.Prog, m []byte, v uint64) {
if !(getmask64(m, v) != 0) {
if !getmask64(m, v) {
ctxt.Diag("cannot generate mask #%x\n%v", v, p)
}
}
@ -1493,7 +1491,7 @@ func loadu32(r int, d int64) uint32 {
var v int32
v = int32(d >> 16)
if isuint32(uint64(d)) != 0 {
if isuint32(uint64(d)) {
return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
}
return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
@ -1574,7 +1572,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
log.Fatalf("invalid handling of %v", p)
}
v >>= 16
if r == REGZERO && isuint32(uint64(d)) != 0 {
if r == REGZERO && isuint32(uint64(d)) {
o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
break
}
@ -1862,7 +1860,7 @@ func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
if r == 0 {
r = int(p.To.Reg)
}
if p.As == AADD && (!(r0iszero != 0 /*TypeKind(100016)*/) && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
ctxt.Diag("literal operation on R0\n%v", p)
}
o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16)

View file

@ -289,7 +289,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p.Pcond = q1
}
if !(q1.Mark&LEAF != 0) {
if q1.Mark&LEAF == 0 {
q1.Mark |= LABEL
}
} else {
@ -341,15 +341,15 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
p.To.Offset = int64(autosize) - 8
if !(p.From3.Offset&obj.NOSPLIT != 0) {
p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0))) // emit split check
if p.From3.Offset&obj.NOSPLIT == 0 {
p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
}
q = p
if autosize != 0 {
/* use MOVDU to adjust R1 when saving R31, if autosize is small */
if !(cursym.Text.Mark&LEAF != 0) && autosize >= -BIG && autosize <= BIG {
if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG {
mov = AMOVDU
aoffset = int(-autosize)
} else {
@ -362,7 +362,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
q.To.Reg = REGSP
q.Spadj = +autosize
}
} else if !(cursym.Text.Mark&LEAF != 0) {
} else if cursym.Text.Mark&LEAF == 0 {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
obj.Bflush(ctxt.Bso)
@ -499,9 +499,9 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
if cursym.Text.Mark&LEAF != 0 {
if !(autosize != 0) {
if autosize == 0 {
p.As = ABR
p.From = obj.Zprog.From
p.From = obj.Addr{}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_LR
p.Mark |= BRANCH
@ -515,7 +515,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p.To.Reg = REGSP
p.Spadj = -autosize
q = p.Ctxt.NewProg()
q = ctxt.NewProg()
q.As = ABR
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_REG
@ -535,7 +535,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p.To.Type = obj.TYPE_REG
p.To.Reg = REGTMP
q = p.Ctxt.NewProg()
q = ctxt.NewProg()
q.As = AMOVD
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_REG
@ -549,7 +549,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
if false {
// Debug bad returns
q = p.Ctxt.NewProg()
q = ctxt.NewProg()
q.As = AMOVD
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_MEM
@ -564,7 +564,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
if autosize != 0 {
q = p.Ctxt.NewProg()
q = ctxt.NewProg()
q.As = AADD
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_CONST
@ -577,7 +577,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p.Link = q
}
q1 = p.Ctxt.NewProg()
q1 = ctxt.NewProg()
q1.As = ABR
q1.Lineno = p.Lineno
q1.To.Type = obj.TYPE_REG
@ -641,7 +641,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
q = p;
}
*/
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.Prog {
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
@ -774,7 +774,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.P
if ctxt.Cursym.Cfunc != 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else {
p.To.Sym = ctxt.Symmorestack[noctxt]
p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
}
// BR start
@ -799,7 +799,7 @@ func follow(ctxt *obj.Link, s *obj.LSym) {
ctxt.Cursym = s
firstp = new(obj.Prog)
firstp = ctxt.NewProg()
lastp = firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
@ -853,7 +853,7 @@ loop:
p = p.Link
xfol(ctxt, p, last)
p = q
if p != nil && !(p.Mark&FOLL != 0) {
if p != nil && p.Mark&FOLL == 0 {
goto loop
}
return
@ -862,7 +862,7 @@ loop:
if q != nil {
p.Mark |= FOLL
p = q
if !(p.Mark&FOLL != 0) {
if p.Mark&FOLL == 0 {
goto loop
}
}
@ -885,19 +885,19 @@ loop:
if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
goto copy
}
if !(q.Pcond != nil) || (q.Pcond.Mark&FOLL != 0) {
if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
continue
}
b = relinv(a)
if !(b != 0) {
if b == 0 {
continue
}
copy:
for {
r = new(obj.Prog)
r = ctxt.NewProg()
*r = *p
if !(r.Mark&FOLL != 0) {
if r.Mark&FOLL == 0 {
fmt.Printf("cant happen 1\n")
}
r.Mark |= FOLL
@ -916,10 +916,10 @@ loop:
r.As = int16(b)
r.Pcond = p.Link
r.Link = p.Pcond
if !(r.Link.Mark&FOLL != 0) {
if r.Link.Mark&FOLL == 0 {
xfol(ctxt, r.Link, last)
}
if !(r.Pcond.Mark&FOLL != 0) {
if r.Pcond.Mark&FOLL == 0 {
fmt.Printf("cant happen 2\n")
}
return
@ -927,7 +927,7 @@ loop:
}
a = ABR
q = p.Ctxt.NewProg()
q = ctxt.NewProg()
q.As = int16(a)
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH

View file

@ -100,7 +100,7 @@ var headers = []struct {
}{"windowsgui", Hwindows},
}
func Headtype(name string) int {
func headtype(name string) int {
var i int
for i = 0; i < len(headers); i++ {
@ -146,7 +146,7 @@ func Linknew(arch *LinkArch) *Link {
ctxt.Pathname = buf
ctxt.Headtype = Headtype(Getgoos())
ctxt.Headtype = headtype(Getgoos())
if ctxt.Headtype < 0 {
log.Fatalf("unknown goos %s", Getgoos())
}
@ -222,7 +222,7 @@ func Linknew(arch *LinkArch) *Link {
return ctxt
}
func Linknewsym(ctxt *Link, symb string, v int) *LSym {
func linknewsym(ctxt *Link, symb string, v int) *LSym {
var s *LSym
s = new(LSym)
@ -261,11 +261,11 @@ func _lookup(ctxt *Link, symb string, v int, creat int) *LSym {
return s
}
}
if !(creat != 0) {
if creat == 0 {
return nil
}
s = Linknewsym(ctxt, symb, v)
s = linknewsym(ctxt, symb, v)
s.Extname = s.Name
s.Hash = ctxt.Hash[h]
ctxt.Hash[h] = s
@ -278,7 +278,7 @@ func Linklookup(ctxt *Link, name string, v int) *LSym {
}
// read-only lookup
func Linkrlookup(ctxt *Link, name string, v int) *LSym {
func linkrlookup(ctxt *Link, name string, v int) *LSym {
return _lookup(ctxt, name, v, 0)
}

View file

@ -2118,9 +2118,9 @@ var opindex [ALAST + 1]*Optab
// It does not seem to be necessary for any other systems. This is probably working
// around a Solaris-specific bug that should be fixed differently, but we don't know
// what that bug is. And this does fix it.
func isextern(s *obj.LSym) int {
func isextern(s *obj.LSym) bool {
// All the Solaris dynamic imports from libc.so begin with "libc_".
return bool2int(strings.HasPrefix(s.Name, "libc_"))
return strings.HasPrefix(s.Name, "libc_")
}
// single-instruction no-ops of various lengths.
@ -2348,7 +2348,7 @@ func span6(ctxt *obj.Link, s *obj.LSym) {
ctxt.Diag("span must be looping")
log.Fatalf("loop")
}
if !(loop != 0) {
if loop == 0 {
break
}
}
@ -2589,7 +2589,7 @@ func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int {
switch a.Name {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
if a.Sym != nil && isextern(a.Sym) != 0 {
if a.Sym != nil && isextern(a.Sym) {
return Yi32
}
return Yiauto // use pc-relative addressing
@ -2997,7 +2997,7 @@ func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 {
log.Fatalf("reloc")
}
if isextern(s) != 0 {
if isextern(s) {
r.Siz = 4
r.Type = obj.R_ADDR
} else {
@ -3074,7 +3074,7 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
switch a.Name {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
if !(isextern(a.Sym) != 0) {
if !isextern(a.Sym) {
goto bad
}
base = REG_NONE
@ -3136,7 +3136,7 @@ func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int)
ctxt.Rexflag |= regrex[base]&Rxb | rex
if base == REG_NONE || (REG_CS <= base && base <= REG_GS) || base == REG_TLS {
if (a.Sym == nil || !(isextern(a.Sym) != 0)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN) || ctxt.Asmode != 64 {
if (a.Sym == nil || !isextern(a.Sym)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN) || ctxt.Asmode != 64 {
ctxt.Andptr[0] = byte(0<<6 | 5<<0 | r<<3)
ctxt.Andptr = ctxt.Andptr[1:]
goto putrelv
@ -3370,18 +3370,18 @@ var ymovtab = []Movtab{
Movtab{0, 0, 0, 0, [4]uint8{}},
}
func isax(a *obj.Addr) int {
func isax(a *obj.Addr) bool {
switch a.Reg {
case REG_AX,
REG_AL,
REG_AH:
return 1
return true
}
if a.Index == REG_AX {
return 1
return true
}
return 0
return false
}
func subreg(p *obj.Prog, from int, to int) {
@ -3587,7 +3587,7 @@ found:
case Zlit:
for ; ; z++ {
op = int(o.op[z])
if !(op != 0) {
if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
@ -3597,7 +3597,7 @@ found:
case Zlitm_r:
for ; ; z++ {
op = int(o.op[z])
if !(op != 0) {
if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
@ -3652,7 +3652,7 @@ found:
tmp1 := z
z++
op = int(o.op[tmp1])
if !(op != 0) {
if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
@ -4097,7 +4097,7 @@ bad:
z = int(p.From.Reg)
if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
if isax(&p.To) != 0 || p.To.Type == obj.TYPE_NONE {
if isax(&p.To) || p.To.Type == obj.TYPE_NONE {
// We certainly don't want to exchange
// with AX if the op is MUL or DIV.
ctxt.Andptr[0] = 0x87
@ -4122,7 +4122,7 @@ bad:
z = int(p.To.Reg)
if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
if isax(&p.From) != 0 {
if isax(&p.From) {
ctxt.Andptr[0] = 0x87
ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
asmando(ctxt, p, &p.To, reg[REG_BX])

View file

@ -38,14 +38,14 @@ import (
"math"
)
func canuselocaltls(ctxt *obj.Link) int {
func canuselocaltls(ctxt *obj.Link) bool {
switch ctxt.Headtype {
case obj.Hplan9,
obj.Hwindows:
return 0
return false
}
return 1
return true
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
@ -86,7 +86,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog) {
// access TLS, and they are rewritten appropriately first here in
// liblink and then finally using relocations in the linker.
if canuselocaltls(ctxt) != 0 {
if canuselocaltls(ctxt) {
// Reduce TLS initial exec model to TLS local exec model.
// Sequences like
// MOVQ TLS, BX
@ -366,7 +366,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
cursym.Args = int32(textarg)
cursym.Locals = int32(p.To.Offset)
if autoffset < obj.StackSmall && !(p.From3.Offset&obj.NOSPLIT != 0) {
if autoffset < obj.StackSmall && p.From3.Offset&obj.NOSPLIT == 0 {
for q = p; q != nil; q = q.Link {
if q.As == obj.ACALL {
goto noleaf
@ -381,13 +381,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
}
q = nil
if !(p.From3.Offset&obj.NOSPLIT != 0) || (p.From3.Offset&obj.WRAPPER != 0) {
if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
p = load_g_cx(ctxt, p) // load g into CX
}
if !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
p = stacksplit(ctxt, p, autoffset, int32(textarg), bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0)), &q) // emit split check
if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
p = stacksplit(ctxt, p, autoffset, int32(textarg), cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
}
if autoffset != 0 {
@ -540,7 +540,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
p2.Pcond = p
}
if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
// 6l -Z means zero the stack frame on entry.
// This slows down function calls but can help avoid
// false positives in garbage collection.
@ -722,7 +722,7 @@ func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt int, jmpok **obj.Prog) *obj.Prog {
func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
var cmp int
@ -853,7 +853,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noc
if ctxt.Cursym.Cfunc != 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else {
p.To.Sym = ctxt.Symmorestack[noctxt]
p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
}
p = obj.Appendp(ctxt, p)
@ -878,14 +878,14 @@ func follow(ctxt *obj.Link, s *obj.LSym) {
ctxt.Cursym = s
firstp = new(obj.Prog)
firstp = ctxt.NewProg()
lastp = firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
}
func nofollow(a int) int {
func nofollow(a int) bool {
switch a {
case obj.AJMP,
obj.ARET,
@ -896,13 +896,13 @@ func nofollow(a int) int {
ARETFQ,
ARETFW,
obj.AUNDEF:
return 1
return true
}
return 0
return false
}
func pushpop(a int) int {
func pushpop(a int) bool {
switch a {
case APUSHL,
APUSHFL,
@ -916,10 +916,10 @@ func pushpop(a int) int {
APOPFQ,
APOPW,
APOPFW:
return 1
return true
}
return 0
return false
}
func relinv(a int) int {
@ -1004,7 +1004,7 @@ loop:
continue
}
if nofollow(a) != 0 || pushpop(a) != 0 {
if nofollow(a) || pushpop(a) {
break // NOTE(rsc): arm does goto copy
}
if q.Pcond == nil || q.Pcond.Mark != 0 {
@ -1041,7 +1041,7 @@ loop:
/* */
}
}
q = new(obj.Prog)
q = ctxt.NewProg()
q.As = obj.AJMP
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
@ -1058,7 +1058,7 @@ loop:
a = int(p.As)
/* continue loop with what comes after p */
if nofollow(a) != 0 {
if nofollow(a) {
return
}
if p.Pcond != nil && a != obj.ACALL {

View file

@ -54,7 +54,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || !(res.Addable != 0) {
if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
@ -64,7 +64,7 @@ func cgen(n *gc.Node, res *gc.Node) {
return
case gc.OEFACE:
if res.Op != gc.ONAME || !(res.Addable != 0) {
if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
@ -90,7 +90,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
}
if gc.Isfat(n.Type) != 0 {
if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
@ -104,12 +104,12 @@ func cgen(n *gc.Node, res *gc.Node) {
switch n.Op {
case gc.OSPTR,
gc.OLEN:
if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
if gc.Isslice(n.Left.Type) != 0 {
if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
@ -119,7 +119,7 @@ func cgen(n *gc.Node, res *gc.Node) {
// if both are addressable, move
if n.Addable != 0 && res.Addable != 0 {
if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
gmove(n, res)
} else {
regalloc(&n1, n.Type, nil)
@ -132,7 +132,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
// if both are not addressable, use a temporary.
if !(n.Addable != 0) && !(res.Addable != 0) {
if n.Addable == 0 && res.Addable == 0 {
// could use regalloc here sometimes,
// but have to check for ullman >= UINF.
gc.Tempname(&n1, n.Type)
@ -144,22 +144,22 @@ func cgen(n *gc.Node, res *gc.Node) {
// if result is not addressable directly but n is,
// compute its address and then store via the address.
if !(res.Addable != 0) {
if res.Addable == 0 {
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
return
}
if gc.Complexop(n, res) != 0 {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
return
}
// if n is sudoaddable generate addr and move
if !(gc.Is64(n.Type) != 0) && !(gc.Is64(res.Type) != 0) && !(gc.Iscomplex[n.Type.Etype] != 0) && !(gc.Iscomplex[res.Type.Etype] != 0) {
if !gc.Is64(n.Type) && !gc.Is64(res.Type) && gc.Iscomplex[n.Type.Etype] == 0 && gc.Iscomplex[res.Type.Etype] == 0 {
a = optoas(gc.OAS, n.Type)
if sudoaddable(a, n, &addr, &w) != 0 {
if sudoaddable(a, n, &addr, &w) {
if res.Op != gc.OREGISTER {
regalloc(&n2, res.Type, nil)
p1 = gins(a, nil, &n2)
@ -201,7 +201,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
// 64-bit ops are hard on 32-bit machine.
if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Left != nil && gc.Is64(n.Left.Type) != 0 {
if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
switch n.Op {
// math goes to cgen64.
case gc.OMINUS,
@ -247,11 +247,11 @@ func cgen(n *gc.Node, res *gc.Node) {
p1 = gc.Gbranch(arm.AB, nil, 0)
p2 = gc.Pc
gmove(gc.Nodbool(1), res)
gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(arm.AB, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(0), res)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
@ -298,19 +298,19 @@ func cgen(n *gc.Node, res *gc.Node) {
case gc.OLROT,
gc.OLSH,
gc.ORSH:
cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
case gc.OCONV:
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
cgen(nl, res)
break
}
if nl.Addable != 0 && !(gc.Is64(nl.Type) != 0) {
if nl.Addable != 0 && !gc.Is64(nl.Type) {
regalloc(&n1, nl.Type, res)
gmove(nl, &n1)
} else {
if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) != 0 || gc.Isfloat[nl.Type.Etype] != 0 {
if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) || gc.Isfloat[nl.Type.Etype] != 0 {
gc.Tempname(&n1, nl.Type)
} else {
regalloc(&n1, nl.Type, res)
@ -318,7 +318,7 @@ func cgen(n *gc.Node, res *gc.Node) {
cgen(nl, &n1)
}
if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) != 0 || gc.Isfloat[n.Type.Etype] != 0 {
if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] != 0 {
gc.Tempname(&n2, n.Type)
} else {
regalloc(&n2, n.Type, nil)
@ -352,7 +352,7 @@ func cgen(n *gc.Node, res *gc.Node) {
// pointer is the first word of string or slice.
case gc.OSPTR:
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 = gins(arm.AMOVW, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@ -367,7 +367,7 @@ func cgen(n *gc.Node, res *gc.Node) {
regfree(&n1)
case gc.OLEN:
if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map has len in the first 32-bit word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
@ -390,7 +390,7 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
igen(nl, &n1, res)
@ -404,7 +404,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
if gc.Istype(nl.Type, gc.TCHAN) != 0 {
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second 32-bit word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
@ -428,7 +428,7 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
if gc.Isslice(nl.Type) != 0 {
if gc.Isslice(nl.Type) {
igen(nl, &n1, res)
n1.Type = gc.Types[gc.TUINT32]
n1.Xoffset += int64(gc.Array_cap)
@ -495,7 +495,7 @@ abop: // asymmetric binary
gc.OAND,
gc.OOR,
gc.OXOR:
if gc.Smallintconst(nr) != 0 {
if gc.Smallintconst(nr) {
n2 = *nr
break
}
@ -512,7 +512,7 @@ abop: // asymmetric binary
gc.OAND,
gc.OOR,
gc.OXOR:
if gc.Smallintconst(nr) != 0 {
if gc.Smallintconst(nr) {
n2 = *nr
break
}
@ -600,7 +600,7 @@ ret:
* n might be any size; res is 32-bit.
* returns Prog* to patch to panic call.
*/
func cgenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
var tmp gc.Node
var lo gc.Node
var hi gc.Node
@ -608,7 +608,7 @@ func cgenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
var n1 gc.Node
var n2 gc.Node
if !(gc.Is64(n.Type) != 0) {
if !gc.Is64(n.Type) {
cgen(n, res)
return nil
}
@ -617,7 +617,7 @@ func cgenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
cgen(n, &tmp)
split64(&tmp, &lo, &hi)
gmove(&lo, res)
if bounded != 0 {
if bounded {
splitclean()
return nil
}
@ -659,7 +659,7 @@ func agen(n *gc.Node, res *gc.Node) {
n = n.Left
}
if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
@ -746,7 +746,7 @@ func agen(n *gc.Node, res *gc.Node) {
}
// should only get here for heap vars or paramref
if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME class %#x", n.Class)
}
@ -912,7 +912,7 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gc.Dump("cgenr-n", n)
}
if gc.Isfat(n.Type) != 0 {
if gc.Isfat(n.Type) {
gc.Fatal("cgenr on fat node")
}
@ -960,7 +960,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
var p2 *obj.Prog
var w uint32
var v uint64
var bounded int
var bounded bool
if gc.Debug['g'] != 0 {
gc.Dump("agenr-n", n)
@ -987,35 +987,35 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
case gc.OINDEX:
p2 = nil // to be patched to panicindex.
w = uint32(n.Type.Width)
bounded = bool2int(gc.Debug['B'] != 0 || n.Bounded != 0)
bounded = gc.Debug['B'] != 0 || n.Bounded
if nr.Addable != 0 {
if !(gc.Isconst(nr, gc.CTINT) != 0) {
if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT32])
}
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
if !(gc.Isconst(nr, gc.CTINT) != 0) {
if !gc.Isconst(nr, gc.CTINT) {
p2 = cgenindex(nr, &tmp, bounded)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
} else if nl.Addable != 0 {
if !(gc.Isconst(nr, gc.CTINT) != 0) {
if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT32])
p2 = cgenindex(nr, &tmp, bounded)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
} else {
gc.Tempname(&tmp, gc.Types[gc.TINT32])
p2 = cgenindex(nr, &tmp, bounded)
nr = &tmp
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
@ -1027,13 +1027,13 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
// w is width
// constant index
if gc.Isconst(nr, gc.CTINT) != 0 {
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nr, gc.CTINT) {
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index")
}
v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
@ -1065,11 +1065,11 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gmove(&n1, &n2)
regfree(&n1)
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&n4, gc.Types[gc.TUINT32], int64(len(nl.Val.U.Sval.S)))
} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
@ -1092,12 +1092,12 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc)
}
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 = gins(arm.AMOVW, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
p1.From.Type = obj.TYPE_ADDR
} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
@ -1185,7 +1185,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
if n == nil {
n = gc.Nodbool(1)
n = gc.Nodbool(true)
}
if n.Ninit != nil {
@ -1219,7 +1219,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == !(n.Val.U.Bval != 0) {
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
}
goto ret
@ -1302,7 +1302,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
nr = r
}
if gc.Isslice(nl.Type) != 0 {
if gc.Isslice(nl.Type) {
// only valid to cmp darray to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal array comparison")
@ -1317,7 +1317,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
break
}
if gc.Isinter(nl.Type) != 0 {
if gc.Isinter(nl.Type) {
// front end shold only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
@ -1337,14 +1337,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
break
}
if gc.Is64(nr.Type) != 0 {
if !(nl.Addable != 0) {
if gc.Is64(nr.Type) {
if nl.Addable == 0 {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if !(nr.Addable != 0) {
if nr.Addable == 0 {
gc.Tempname(&n2, nr.Type)
cgen(nr, &n2)
nr = &n2
@ -1355,7 +1355,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
if nr.Op == gc.OLITERAL {
if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
gencmp0(nl, nl.Type, a, likely, to)
break
}
@ -1453,14 +1453,14 @@ func stkof(n *gc.Node) int32 {
case gc.OINDEX:
t = n.Left.Type
if !(gc.Isfixedarray(t) != 0) {
if !gc.Isfixedarray(t) {
break
}
off = stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
if gc.Isconst(n.Right, gc.CTINT) != 0 {
if gc.Isconst(n.Right, gc.CTINT) {
return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
}
return 1000
@ -1547,7 +1547,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
}
// Avoid taking the address for simple enough types.
if componentgen(n, res) != 0 {
if componentgen(n, res) {
return
}
@ -1560,7 +1560,6 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
switch align {
default:
gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
fallthrough
case 1:
op = arm.AMOVB
@ -1712,7 +1711,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
for {
tmp14 := c
c--
if !(tmp14 > 0) {
if tmp14 <= 0 {
break
}
p = gins(op, &src, &tmp)
@ -1732,19 +1731,19 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
regfree(&tmp)
}
func cadable(n *gc.Node) int {
if !(n.Addable != 0) {
func cadable(n *gc.Node) bool {
if n.Addable == 0 {
// dont know how it happens,
// but it does
return 0
return false
}
switch n.Op {
case gc.ONAME:
return 1
return true
}
return 0
return false
}
/*
@ -1755,7 +1754,7 @@ func cadable(n *gc.Node) int {
* nr is N when assigning a zero value.
* return 1 if can do, 0 if cant.
*/
func componentgen(nr *gc.Node, nl *gc.Node) int {
func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
var tmp gc.Node
@ -1777,12 +1776,12 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
t = nl.Type
// Slices are ok.
if gc.Isslice(t) != 0 {
if gc.Isslice(t) {
break
}
// Small arrays are ok.
if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
break
}
@ -1794,7 +1793,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
fldcount = 0
for t = nl.Type.Type; t != nil; t = t.Down {
if gc.Isfat(t.Type) != 0 {
if gc.Isfat(t.Type) {
goto no
}
if t.Etype != gc.TFIELD {
@ -1813,8 +1812,8 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
}
nodl = *nl
if !(cadable(nl) != 0) {
if nr != nil && !(cadable(nr) != 0) {
if !cadable(nl) {
if nr != nil && !cadable(nr) {
goto no
}
igen(nl, &nodl, nil)
@ -1823,7 +1822,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
if nr != nil {
nodr = *nr
if !(cadable(nr) != 0) {
if !cadable(nr) {
igen(nr, &nodr, nil)
freer = 1
}
@ -1851,7 +1850,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
gc.Gvardef(nl)
}
t = nl.Type
if !(gc.Isslice(t) != 0) {
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount = 0; fldcount < t.Bound; fldcount++ {
@ -1991,7 +1990,7 @@ no:
if freel != 0 {
regfree(&nodl)
}
return 0
return false
yes:
if freer != 0 {
@ -2000,5 +1999,5 @@ yes:
if freel != 0 {
regfree(&nodl)
}
return 1
return true
}

View file

@ -48,7 +48,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
}
l = n.Left
if !(l.Addable != 0) {
if l.Addable == 0 {
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
l = &t1
@ -58,7 +58,6 @@ func cgen64(n *gc.Node, res *gc.Node) {
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
fallthrough
case gc.OMINUS:
split64(res, &lo2, &hi2)
@ -124,13 +123,13 @@ func cgen64(n *gc.Node, res *gc.Node) {
// setup for binary operators
r = n.Right
if r != nil && !(r.Addable != 0) {
if r != nil && r.Addable == 0 {
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
r = &t2
}
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
@ -141,7 +140,6 @@ func cgen64(n *gc.Node, res *gc.Node) {
switch n.Op {
default:
gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0))
fallthrough
// TODO: Constants
case gc.OADD:
@ -316,7 +314,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
regalloc(&s, gc.Types[gc.TUINT32], nil)
regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
// shift is >= 1<<32
split64(r, &cl, &ch)
@ -487,7 +485,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
regalloc(&s, gc.Types[gc.TUINT32], nil)
regalloc(&creg, gc.Types[gc.TUINT32], nil)
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
// shift is >= 1<<32
split64(r, &cl, &ch)
@ -721,7 +719,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
regfree(&n1)
}
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
splitclean()
}
splitclean()
@ -770,7 +768,6 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
switch op {
default:
gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
fallthrough
// cmp hi
// bne L

View file

@ -36,7 +36,7 @@ func defframe(ptxt *obj.Prog) {
r0 = 0
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if !(n.Needzero != 0) {
if n.Needzero == 0 {
continue
}
if n.Class != gc.PAUTO {
@ -176,7 +176,7 @@ func ginscall(f *gc.Node, proc int) {
p = gins(arm.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) != 0 {
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
@ -265,7 +265,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
reg[r]--
}
if !(i.Addable != 0) {
if i.Addable == 0 {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
@ -529,7 +529,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
@ -709,7 +709,7 @@ func clearfat(nl *gc.Node) {
w = uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if componentgen(nil, nl) != 0 {
if componentgen(nil, nl) {
return
}

View file

@ -73,7 +73,7 @@ func gclean() {
}
}
func anyregalloc() int {
func anyregalloc() bool {
var i int
var j int
@ -86,11 +86,11 @@ func anyregalloc() int {
goto ok
}
}
return 1
return true
ok:
}
return 0
return false
}
var regpc [REGALLOC_FMAX + 1]uint32
@ -126,7 +126,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
gc.Fatal("regalloc: t nil")
}
et = int(gc.Simtype[t.Etype])
if gc.Is64(t) != 0 {
if gc.Is64(t) {
gc.Fatal("regalloc: 64 bit type %v")
}
@ -263,7 +263,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
var n1 gc.Node
var i int64
if !(gc.Is64(n.Type) != 0) {
if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
@ -276,7 +276,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
default:
switch n.Op {
default:
if !(dotaddable(n, &n1) != 0) {
if !dotaddable(n, &n1) {
igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
@ -359,7 +359,7 @@ func gmove(f *gc.Node, t *gc.Node) {
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
if !(gc.Is64(f.Type) != 0) && !(gc.Is64(t.Type) != 0) && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
@ -392,7 +392,7 @@ func gmove(f *gc.Node, t *gc.Node) {
ft = gc.Simsimtype(con.Type)
// constants can't move directly to memory
if gc.Ismem(t) != 0 && !(gc.Is64(t.Type) != 0) {
if gc.Ismem(t) && !gc.Is64(t.Type) {
goto hard
}
}
@ -412,7 +412,7 @@ func gmove(f *gc.Node, t *gc.Node) {
* integer copy and truncate
*/
case gc.TINT8<<16 | gc.TINT8: // same size
if !(gc.Ismem(f) != 0) {
if !gc.Ismem(f) {
a = arm.AMOVB
break
}
@ -426,7 +426,7 @@ func gmove(f *gc.Node, t *gc.Node) {
a = arm.AMOVBS
case gc.TUINT8<<16 | gc.TUINT8:
if !(gc.Ismem(f) != 0) {
if !gc.Ismem(f) {
a = arm.AMOVB
break
}
@ -451,7 +451,7 @@ func gmove(f *gc.Node, t *gc.Node) {
goto trunc64
case gc.TINT16<<16 | gc.TINT16: // same size
if !(gc.Ismem(f) != 0) {
if !gc.Ismem(f) {
a = arm.AMOVH
break
}
@ -463,7 +463,7 @@ func gmove(f *gc.Node, t *gc.Node) {
a = arm.AMOVHS
case gc.TUINT16<<16 | gc.TUINT16:
if !(gc.Ismem(f) != 0) {
if !gc.Ismem(f) {
a = arm.AMOVH
break
}
@ -795,9 +795,9 @@ fatal:
gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
func samaddr(f *gc.Node, t *gc.Node) int {
func samaddr(f *gc.Node, t *gc.Node) bool {
if f.Op != t.Op {
return 0
return false
}
switch f.Op {
@ -805,10 +805,10 @@ func samaddr(f *gc.Node, t *gc.Node) int {
if f.Val.U.Reg != t.Val.U.Reg {
break
}
return 1
return true
}
return 0
return false
}
/*
@ -1245,13 +1245,13 @@ func sudoclean() {
cleani -= 2
}
func dotaddable(n *gc.Node, n1 *gc.Node) int {
func dotaddable(n *gc.Node, n1 *gc.Node) bool {
var o int
var oary [10]int64
var nn *gc.Node
if n.Op != gc.ODOT {
return 0
return false
}
o = gc.Dotoffset(n, oary[:], &nn)
@ -1259,10 +1259,10 @@ func dotaddable(n *gc.Node, n1 *gc.Node) int {
*n1 = *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
return 1
return true
}
return 0
return false
}
/*
@ -1276,7 +1276,7 @@ func dotaddable(n *gc.Node, n1 *gc.Node) int {
* after successful sudoaddable,
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) int {
func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
var o int
var i int
var oary [10]int64
@ -1295,14 +1295,14 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) int {
var t *gc.Type
if n.Type == nil {
return 0
return false
}
*a = obj.Addr{}
switch n.Op {
case gc.OLITERAL:
if !(gc.Isconst(n, gc.CTINT) != 0) {
if !gc.Isconst(n, gc.CTINT) {
break
}
v = gc.Mpgetfix(n.Val.U.Xval)
@ -1321,12 +1321,12 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) int {
goto odot
case gc.OINDEX:
return 0
return false
// disabled: OINDEX case is now covered by agenr
// for a more suitable register allocation pattern.
if n.Left.Type.Etype == gc.TSTRING {
return 0
return false
}
cleani += 2
reg = &clean[cleani-1]
@ -1336,12 +1336,12 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) int {
goto oindex
}
return 0
return false
lit:
switch as {
default:
return 0
return false
case arm.AADD,
arm.ASUB,
@ -1437,7 +1437,7 @@ oindex:
}
*w = int(n.Type.Width)
if gc.Isconst(r, gc.CTINT) != 0 {
if gc.Isconst(r, gc.CTINT) {
goto oindex_const
}
@ -1471,7 +1471,7 @@ oindex:
}
regalloc(reg1, t, nil)
regalloc(&n3, gc.Types[gc.TINT32], reg1)
p2 = cgenindex(r, &n3, bool2int(gc.Debug['B'] != 0 || n.Bounded != 0))
p2 = cgenindex(r, &n3, gc.Debug['B'] != 0 || n.Bounded)
gmove(&n3, reg1)
regfree(&n3)
@ -1487,7 +1487,7 @@ oindex:
}
// check bounds
if !(gc.Debug['B'] != 0) {
if gc.Debug['B'] == 0 {
if o&ODynam != 0 {
n2 = *reg
n2.Op = gc.OINDREG
@ -1557,7 +1557,7 @@ oindex_const:
v = gc.Mpgetfix(r.Val.U.Xval)
if o&ODynam != 0 {
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Debug['B'] == 0 && !n.Bounded {
n1 = *reg
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
@ -1591,9 +1591,9 @@ oindex_const:
goto yes
yes:
return 1
return true
no:
sudoclean()
return 0
return false
}

View file

@ -79,16 +79,16 @@ loop1:
arm.AMOVW,
arm.AMOVF,
arm.AMOVD:
if regtyp(&p.From) != 0 {
if regtyp(&p.From) {
if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) {
if p.Scond == arm.C_SCOND_NONE {
if copyprop(g, r) != 0 {
if copyprop(g, r) {
excise(r)
t++
break
}
if subprop(r) != 0 && copyprop(g, r) != 0 {
if subprop(r) && copyprop(g, r) {
excise(r)
t++
break
@ -102,7 +102,7 @@ loop1:
arm.AMOVBS,
arm.AMOVBU:
if p.From.Type == obj.TYPE_REG {
if shortprop(r) != 0 {
if shortprop(r) {
t++
}
}
@ -128,7 +128,7 @@ loop1:
* EOR -1,x,y => MVN x,y
*/
case arm.AEOR:
if isdconst(&p.From) != 0 && p.From.Offset == -1 {
if isdconst(&p.From) && p.From.Offset == -1 {
p.As = arm.AMVN
p.From.Type = obj.TYPE_REG
if p.Reg != 0 {
@ -231,8 +231,8 @@ loop1:
gc.Flowend(g)
}
func regtyp(a *obj.Addr) int {
return bool2int(a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15))
func regtyp(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
}
/*
@ -249,7 +249,7 @@ func regtyp(a *obj.Addr) int {
* hopefully, then the former or latter MOV
* will be eliminated by copy propagation.
*/
func subprop(r0 *gc.Flow) int {
func subprop(r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
@ -259,12 +259,12 @@ func subprop(r0 *gc.Flow) int {
p = r0.Prog
v1 = &p.From
if !(regtyp(v1) != 0) {
return 0
if !regtyp(v1) {
return false
}
v2 = &p.To
if !(regtyp(v2) != 0) {
return 0
if !regtyp(v2) {
return false
}
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
@ -276,7 +276,7 @@ func subprop(r0 *gc.Flow) int {
}
proginfo(&info, p)
if info.Flags&gc.Call != 0 {
return 0
return false
}
if (info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
@ -289,7 +289,7 @@ func subprop(r0 *gc.Flow) int {
case arm.AMULLU,
arm.AMULA,
arm.AMVN:
return 0
return false
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
@ -302,7 +302,7 @@ func subprop(r0 *gc.Flow) int {
}
}
if copyau(&p.From, v2) != 0 || copyau1(p, v2) != 0 || copyau(&p.To, v2) != 0 {
if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
break
}
if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
@ -310,7 +310,7 @@ func subprop(r0 *gc.Flow) int {
}
}
return 0
return false
gotit:
copysub(&p.To, v1, v2, 1)
@ -338,7 +338,7 @@ gotit:
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return 1
return true
}
/*
@ -353,7 +353,7 @@ gotit:
* set v1 F=1
* set v2 return success
*/
func copyprop(g *gc.Graph, r0 *gc.Flow) int {
func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
@ -361,14 +361,14 @@ func copyprop(g *gc.Graph, r0 *gc.Flow) int {
p = r0.Prog
v1 = &p.From
v2 = &p.To
if copyas(v1, v2) != 0 {
return 1
if copyas(v1, v2) {
return true
}
gactive++
return copy1(v1, v2, r0.S1, 0)
}
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
var t int
var p *obj.Prog
@ -376,7 +376,7 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
}
return 1
return true
}
r.Active = int32(gactive)
@ -388,7 +388,7 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
if !(f != 0) && gc.Uniqp(r) == nil {
if f == 0 && gc.Uniqp(r) == nil {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; merge; f=%d", f)
@ -401,33 +401,33 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("; %vrar; return 0\n", gc.Ctxt.Dconv(v2))
}
return 0
return false
case 3: /* set */
if gc.Debug['P'] != 0 {
fmt.Printf("; %vset; return 1\n", gc.Ctxt.Dconv(v2))
}
return 1
return true
case 1, /* used, substitute */
4: /* use and set */
if f != 0 {
if !(gc.Debug['P'] != 0) {
return 0
if gc.Debug['P'] == 0 {
return false
}
if t == 4 {
fmt.Printf("; %vused+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
} else {
fmt.Printf("; %vused and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
}
return 0
return false
}
if copyu(p, v2, v1) != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub fail; return 0\n")
}
return 0
return false
}
if gc.Debug['P'] != 0 {
@ -437,13 +437,13 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("; %vused+set; return 1\n", gc.Ctxt.Dconv(v2))
}
return 1
return true
}
}
if !(f != 0) {
if f == 0 {
t = copyu(p, v1, nil)
if !(f != 0) && (t == 2 || t == 3 || t == 4) {
if f == 0 && (t == 2 || t == 3 || t == 4) {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %vset and !f; f=%d", gc.Ctxt.Dconv(v1), f)
@ -455,13 +455,13 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
fmt.Printf("\n")
}
if r.S2 != nil {
if !(copy1(v1, v2, r.S2, f) != 0) {
return 0
if !copy1(v1, v2, r.S2, f) {
return false
}
}
}
return 1
return true
}
// UNUSED
@ -490,7 +490,7 @@ func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
return
}
if p.As == arm.AMOVW && copyas(&p.From, c1) != 0 {
if p.As == arm.AMOVW && copyas(&p.From, c1) {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1))
}
@ -526,7 +526,7 @@ func constprop(c1 *obj.Addr, v1 *obj.Addr, r *gc.Flow) {
*
* MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
*/
func shortprop(r *gc.Flow) int {
func shortprop(r *gc.Flow) bool {
var p *obj.Prog
var p1 *obj.Prog
var r1 *gc.Flow
@ -534,7 +534,7 @@ func shortprop(r *gc.Flow) int {
p = r.Prog
r1 = findpre(r, &p.From)
if r1 == nil {
return 0
return false
}
p1 = r1.Prog
@ -543,12 +543,12 @@ func shortprop(r *gc.Flow) int {
goto gotit
}
if p1.As == arm.AMOVW && isdconst(&p1.From) != 0 && p1.From.Offset >= 0 && p1.From.Offset < 128 {
if p1.As == arm.AMOVW && isdconst(&p1.From) && p1.From.Offset >= 0 && p1.From.Offset < 128 {
// Loaded an immediate.
goto gotit
}
return 0
return false
gotit:
if gc.Debug['P'] != 0 {
@ -567,7 +567,7 @@ gotit:
if gc.Debug['P'] != 0 {
fmt.Printf(" => %v\n", arm.Aconv(int(p.As)))
}
return 1
return true
}
// UNUSED
@ -582,7 +582,7 @@ gotit:
* AXXX (x<<y),a,b
* ..
*/
func shiftprop(r *gc.Flow) int {
func shiftprop(r *gc.Flow) bool {
var r1 *gc.Flow
var p *obj.Prog
var p1 *obj.Prog
@ -596,11 +596,11 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
}
return 0
return false
}
n = int(p.To.Reg)
a = obj.Zprog.From
a = obj.Addr{}
if p.Reg != 0 && p.Reg != p.To.Reg {
a.Type = obj.TYPE_REG
a.Reg = p.Reg
@ -618,14 +618,14 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\tbranch; FAILURE\n")
}
return 0
return false
}
if gc.Uniqp(r1) == nil {
if gc.Debug['P'] != 0 {
fmt.Printf("\tmerge; FAILURE\n")
}
return 0
return false
}
p1 = r1.Prog
@ -638,7 +638,7 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\targs modified; FAILURE\n")
}
return 0
return false
}
continue
@ -647,7 +647,7 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: noref; FAILURE\n")
}
return 0
return false
}
}
@ -660,7 +660,7 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\tnon-dpi; FAILURE\n")
}
return 0
return false
case arm.AAND,
arm.AEOR,
@ -676,7 +676,7 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\tcan't swap; FAILURE\n")
}
return 0
return false
}
p1.Reg = p1.From.Reg
@ -709,14 +709,14 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\tcan't swap; FAILURE\n")
}
return 0
return false
}
if p1.Reg == 0 && int(p1.To.Reg) == n {
if gc.Debug['P'] != 0 {
fmt.Printf("\tshift result used twice; FAILURE\n")
}
return 0
return false
}
// case AMVN:
@ -724,14 +724,14 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\tshift result used in shift; FAILURE\n")
}
return 0
return false
}
if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
}
return 0
return false
}
}
@ -745,7 +745,7 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\tinconclusive; FAILURE\n")
}
return 0
return false
}
p1 = r1.Prog
@ -763,7 +763,7 @@ func shiftprop(r *gc.Flow) int {
if gc.Debug['P'] != 0 {
fmt.Printf("\treused; FAILURE\n")
}
return 0
return false
}
break
@ -798,13 +798,13 @@ func shiftprop(r *gc.Flow) int {
o |= 2 << 5
}
p2.From = obj.Zprog.From
p2.From = obj.Addr{}
p2.From.Type = obj.TYPE_SHIFT
p2.From.Offset = int64(o)
if gc.Debug['P'] != 0 {
fmt.Printf("\t=>%v\tSUCCEED\n", p2)
}
return 1
return true
}
/*
@ -853,7 +853,7 @@ func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow {
p = r1.Prog
if p.As == arm.AADD {
if isdconst(&p.From) != 0 {
if isdconst(&p.From) {
if p.From.Offset > -4096 && p.From.Offset < 4096 {
return r1
}
@ -869,13 +869,13 @@ func findinc(r *gc.Flow, r2 *gc.Flow, v *obj.Addr) *gc.Flow {
return nil
}
func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) int {
func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
var a [3]obj.Addr
var i int
var n int
if r == r2 {
return 1
return true
}
n = 0
if p.Reg != 0 && p.Reg != p.To.Reg {
@ -898,47 +898,47 @@ func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) int {
}
if n == 0 {
return 1
return true
}
for ; r != nil && r != r2; r = gc.Uniqs(r) {
p = r.Prog
for i = 0; i < n; i++ {
if copyu(p, &a[i], nil) > 1 {
return 0
return false
}
}
}
return 1
return true
}
func findu1(r *gc.Flow, v *obj.Addr) int {
func findu1(r *gc.Flow, v *obj.Addr) bool {
for ; r != nil; r = r.S1 {
if r.Active != 0 {
return 0
return false
}
r.Active = 1
switch copyu(r.Prog, v, nil) {
case 1, /* used */
2, /* read-alter-rewrite */
4: /* set and used */
return 1
return true
case 3: /* set */
return 0
return false
}
if r.S2 != nil {
if findu1(r.S2, v) != 0 {
return 1
if findu1(r.S2, v) {
return true
}
}
}
return 0
return false
}
func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) int {
func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
var r1 *gc.Flow
for r1 = g.Start; r1 != nil; r1 = r1.Link {
@ -960,7 +960,7 @@ func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) int {
* into
* MOVBU R0<<0(R1),R0
*/
func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
var r1 *gc.Flow
var r2 *gc.Flow
var r3 *gc.Flow
@ -983,14 +983,14 @@ func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
}
if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
if nochange(gc.Uniqs(r1), r, p1) != 0 {
if nochange(gc.Uniqs(r1), r, p1) {
if a != &p.From || v.Reg != p.To.Reg {
if finduse(g, r.S1, &v) != 0 {
if finduse(g, r.S1, &v) {
if p1.Reg == 0 || p1.Reg == v.Reg {
/* pre-indexing */
p.Scond |= arm.C_WBIT
} else {
return 0
return false
}
}
}
@ -999,18 +999,18 @@ func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
/* register offset */
case obj.TYPE_REG:
if gc.Nacl {
return 0
return false
}
*a = obj.Zprog.From
*a = obj.Addr{}
a.Type = obj.TYPE_SHIFT
a.Offset = int64(p1.From.Reg) & 15
/* scaled register offset */
case obj.TYPE_SHIFT:
if gc.Nacl {
return 0
return false
}
*a = obj.Zprog.From
*a = obj.Addr{}
a.Type = obj.TYPE_SHIFT
fallthrough
@ -1024,7 +1024,7 @@ func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
a.Reg = p1.Reg
}
excise(r1)
return 1
return true
}
}
@ -1041,11 +1041,11 @@ func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
a.Reg = p1.To.Reg
a.Offset = p1.From.Offset
p.Scond |= arm.C_PBIT
if !(finduse(g, r, &r1.Prog.To) != 0) {
if !finduse(g, r, &r1.Prog.To) {
excise(r1)
}
excise(r2)
return 1
return true
}
}
}
@ -1062,11 +1062,11 @@ func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
a.Offset = p1.From.Offset
p.Scond |= arm.C_PBIT
excise(r1)
return 1
return true
}
}
return 0
return false
}
/*
@ -1098,7 +1098,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
if p.Scond&arm.C_WBIT != 0 {
return 2
}
@ -1119,7 +1119,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
if p.Scond&arm.C_WBIT != 0 {
return 2
}
@ -1170,7 +1170,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
if copysub(&p.From, v, s, 1) != 0 {
return 1
}
if !(copyas(&p.To, v) != 0) {
if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
@ -1178,20 +1178,20 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
if p.Scond != arm.C_SCOND_NONE {
return 2
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 4
}
return 3
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
return 0
@ -1243,7 +1243,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
if copysub1(p, v, s, 1) != 0 {
return 1
}
if !(copyas(&p.To, v) != 0) {
if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
@ -1251,29 +1251,29 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
if p.Scond != arm.C_SCOND_NONE {
return 2
}
if p.Reg == 0 {
p.Reg = p.To.Reg
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 4
}
if copyau1(p, v) != 0 {
if copyau1(p, v) {
return 4
}
return 3
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
if copyau1(p, v) != 0 {
if copyau1(p, v) {
return 1
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
return 0
@ -1301,10 +1301,10 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return copysub1(p, v, s, 1)
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
if copyau1(p, v) != 0 {
if copyau1(p, v) {
return 1
}
return 0
@ -1317,7 +1317,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
return 0
@ -1357,7 +1357,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 4
}
return 3
@ -1411,11 +1411,11 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
* could be set/use depending on
* semantics
*/
func copyas(a *obj.Addr, v *obj.Addr) int {
if regtyp(v) != 0 {
func copyas(a *obj.Addr, v *obj.Addr) bool {
if regtyp(v) {
if a.Type == v.Type {
if a.Reg == v.Reg {
return 1
return true
}
}
} else if v.Type == obj.TYPE_CONST { /* for constprop */
@ -1424,7 +1424,7 @@ func copyas(a *obj.Addr, v *obj.Addr) int {
if a.Sym == v.Sym {
if a.Reg == v.Reg {
if a.Offset == v.Offset {
return 1
return true
}
}
}
@ -1432,15 +1432,15 @@ func copyas(a *obj.Addr, v *obj.Addr) int {
}
}
return 0
return false
}
func sameaddr(a *obj.Addr, v *obj.Addr) int {
func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if a.Type != v.Type {
return 0
return false
}
if regtyp(v) != 0 && a.Reg == v.Reg {
return 1
if regtyp(v) && a.Reg == v.Reg {
return true
}
// TODO(rsc): Change v->type to v->name and enable.
@ -1448,54 +1448,54 @@ func sameaddr(a *obj.Addr, v *obj.Addr) int {
// if(v->offset == a->offset)
// return 1;
//}
return 0
return false
}
/*
* either direct or indirect
*/
func copyau(a *obj.Addr, v *obj.Addr) int {
if copyas(a, v) != 0 {
return 1
func copyau(a *obj.Addr, v *obj.Addr) bool {
if copyas(a, v) {
return true
}
if v.Type == obj.TYPE_REG {
if a.Type == obj.TYPE_ADDR && a.Reg != 0 {
if a.Reg == v.Reg {
return 1
return true
}
} else if a.Type == obj.TYPE_MEM {
if a.Reg == v.Reg {
return 1
return true
}
} else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
if a.Reg == v.Reg {
return 1
return true
}
if a.Offset == int64(v.Reg) {
return 1
return true
}
} else if a.Type == obj.TYPE_SHIFT {
if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
return 1
return true
}
if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
return 1
return true
}
}
}
return 0
return false
}
/*
* compare v to the center
* register in p (p->reg)
*/
func copyau1(p *obj.Prog, v *obj.Addr) int {
func copyau1(p *obj.Prog, v *obj.Addr) bool {
if v.Type == obj.TYPE_REG && v.Reg == 0 {
return 0
return false
}
return bool2int(p.Reg == v.Reg)
return p.Reg == v.Reg
}
/*
@ -1504,7 +1504,7 @@ func copyau1(p *obj.Prog, v *obj.Addr) int {
*/
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
if copyau(a, v) != 0 {
if copyau(a, v) {
if a.Type == obj.TYPE_SHIFT {
if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf
@ -1530,7 +1530,7 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
if copyau1(p1, v) != 0 {
if copyau1(p1, v) {
p1.Reg = s.Reg
}
}
@ -1664,11 +1664,11 @@ const (
Keepbranch
)
func isbranch(p *obj.Prog) int {
return bool2int((arm.ABEQ <= p.As) && (p.As <= arm.ABLE))
func isbranch(p *obj.Prog) bool {
return (arm.ABEQ <= p.As) && (p.As <= arm.ABLE)
}
func predicable(p *obj.Prog) int {
func predicable(p *obj.Prog) bool {
switch p.As {
case obj.ANOP,
obj.AXXX,
@ -1678,13 +1678,13 @@ func predicable(p *obj.Prog) int {
arm.AWORD,
arm.ABCASE,
arm.ACASE:
return 0
return false
}
if isbranch(p) != 0 {
return 0
if isbranch(p) {
return false
}
return 1
return true
}
/*
@ -1694,7 +1694,7 @@ func predicable(p *obj.Prog) int {
*
* C_SBIT may also have been set explicitly in p->scond.
*/
func modifiescpsr(p *obj.Prog) int {
func modifiescpsr(p *obj.Prog) bool {
switch p.As {
case arm.AMULLU,
arm.AMULA,
@ -1709,13 +1709,13 @@ func modifiescpsr(p *obj.Prog) int {
arm.AMOD,
arm.AMODU,
arm.ABL:
return 1
return true
}
if p.Scond&arm.C_SBIT != 0 {
return 1
return true
}
return 0
return false
}
/*
@ -1741,7 +1741,7 @@ func joinsplit(r *gc.Flow, j *Joininfo) int {
if r.Prog.As != obj.ANOP {
j.len++
}
if !(r.S1 != nil) && !(r.S2 != nil) {
if r.S1 == nil && r.S2 == nil {
j.end = r.Link
return End
}
@ -1751,13 +1751,13 @@ func joinsplit(r *gc.Flow, j *Joininfo) int {
return Branch
}
if modifiescpsr(r.Prog) != 0 {
if modifiescpsr(r.Prog) {
j.end = r.S1
return Setcond
}
r = r.S1
if !(j.len < 4) {
if j.len >= 4 {
break
}
}
@ -1798,7 +1798,7 @@ func applypred(rstart *gc.Flow, j *Joininfo, cond int, branch int) {
r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
}
}
} else if predicable(r.Prog) != 0 {
} else if predicable(r.Prog) {
r.Prog.Scond = uint8(int(r.Prog.Scond&^arm.C_SCOND) | pred)
}
if r.S1 != r.Link {
@ -1820,7 +1820,7 @@ func predicate(g *gc.Graph) {
var j2 Joininfo
for r = g.Start; r != nil; r = r.Link {
if isbranch(r.Prog) != 0 {
if isbranch(r.Prog) {
t1 = joinsplit(r.S1, &j1)
t2 = joinsplit(r.S2, &j2)
if j1.last.Link != j2.start {
@ -1844,20 +1844,20 @@ func predicate(g *gc.Graph) {
}
}
func isdconst(a *obj.Addr) int {
return bool2int(a.Type == obj.TYPE_CONST)
func isdconst(a *obj.Addr) bool {
return a.Type == obj.TYPE_CONST
}
func isfloatreg(a *obj.Addr) int {
return bool2int(arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
func isfloatreg(a *obj.Addr) bool {
return arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15
}
func stackaddr(a *obj.Addr) int {
return bool2int(regtyp(a) != 0 && a.Reg == arm.REGSP)
func stackaddr(a *obj.Addr) bool {
return regtyp(a) && a.Reg == arm.REGSP
}
func smallindir(a *obj.Addr, reg *obj.Addr) int {
return bool2int(reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096)
func smallindir(a *obj.Addr, reg *obj.Addr) bool {
return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
}
func excise(r *gc.Flow) {

View file

@ -59,7 +59,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || !(res.Addable != 0) {
if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
@ -69,7 +69,7 @@ func cgen(n *gc.Node, res *gc.Node) {
goto ret
case gc.OEFACE:
if res.Op != gc.ONAME || !(res.Addable != 0) {
if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
@ -91,7 +91,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
}
if gc.Isfat(n.Type) != 0 {
if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
@ -99,7 +99,7 @@ func cgen(n *gc.Node, res *gc.Node) {
goto ret
}
if !(res.Addable != 0) {
if res.Addable == 0 {
if n.Ullman > res.Ullman {
regalloc(&n1, n.Type, res)
cgen(n, &n1)
@ -118,7 +118,7 @@ func cgen(n *gc.Node, res *gc.Node) {
goto gen
}
if gc.Complexop(n, res) != 0 {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
}
@ -126,7 +126,7 @@ func cgen(n *gc.Node, res *gc.Node) {
f = 1 // gen thru register
switch n.Op {
case gc.OLITERAL:
if gc.Smallintconst(n) != 0 {
if gc.Smallintconst(n) {
f = 0
}
@ -134,9 +134,9 @@ func cgen(n *gc.Node, res *gc.Node) {
f = 0
}
if !(gc.Iscomplex[n.Type.Etype] != 0) {
if gc.Iscomplex[n.Type.Etype] == 0 {
a = optoas(gc.OAS, res.Type)
if sudoaddable(a, res, &addr) != 0 {
if sudoaddable(a, res, &addr) {
if f != 0 {
regalloc(&n2, res.Type, nil)
cgen(n, &n2)
@ -167,12 +167,12 @@ func cgen(n *gc.Node, res *gc.Node) {
switch n.Op {
case gc.OSPTR,
gc.OLEN:
if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
if gc.Isslice(n.Left.Type) != 0 {
if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
@ -180,7 +180,7 @@ func cgen(n *gc.Node, res *gc.Node) {
n.Addable = n.Left.Addable
}
if gc.Complexop(n, res) != 0 {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
}
@ -204,9 +204,9 @@ func cgen(n *gc.Node, res *gc.Node) {
}
}
if !(gc.Iscomplex[n.Type.Etype] != 0) {
if gc.Iscomplex[n.Type.Etype] == 0 {
a = optoas(gc.OAS, n.Type)
if sudoaddable(a, n, &addr) != 0 {
if sudoaddable(a, n, &addr) {
if res.Op == gc.OREGISTER {
p1 = gins(a, nil, res)
p1.From = addr
@ -241,11 +241,11 @@ func cgen(n *gc.Node, res *gc.Node) {
p1 = gc.Gbranch(obj.AJMP, nil, 0)
p2 = gc.Pc
gmove(gc.Nodbool(1), res)
gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(0), res)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
@ -353,7 +353,7 @@ func cgen(n *gc.Node, res *gc.Node) {
// pointer is the first word of string or slice.
case gc.OSPTR:
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 = gins(x86.ALEAQ, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@ -368,7 +368,7 @@ func cgen(n *gc.Node, res *gc.Node) {
regfree(&n1)
case gc.OLEN:
if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map and chan have len in the first int-sized word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
@ -391,7 +391,7 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
// a zero pointer means zero length
igen(nl, &n1, res)
@ -406,7 +406,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
if gc.Istype(nl.Type, gc.TCHAN) != 0 {
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second int-sized word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
@ -430,7 +430,7 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
if gc.Isslice(nl.Type) != 0 {
if gc.Isslice(nl.Type) {
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
n1.Xoffset += int64(gc.Array_cap)
@ -442,11 +442,11 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OADDR:
if n.Bounded != 0 { // let race detector avoid nil checks
if n.Bounded { // let race detector avoid nil checks
gc.Disable_checknil++
}
agen(nl, res)
if n.Bounded != 0 {
if n.Bounded {
gc.Disable_checknil--
}
@ -475,7 +475,7 @@ func cgen(n *gc.Node, res *gc.Node) {
cgen_div(int(n.Op), &n1, nr, res)
regfree(&n1)
} else {
if !(gc.Smallintconst(nr) != 0) {
if !gc.Smallintconst(nr) {
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
} else {
@ -491,7 +491,7 @@ func cgen(n *gc.Node, res *gc.Node) {
case gc.OLSH,
gc.ORSH,
gc.OLROT:
cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
goto ret
@ -513,7 +513,7 @@ func cgen(n *gc.Node, res *gc.Node) {
* register for the computation.
*/
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) != 0 || (nr.Op == gc.OLITERAL && !(gc.Smallintconst(nr) != 0)))) {
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
r = nl
nl = nr
nr = r
@ -541,14 +541,14 @@ abop: // asymmetric binary
}
*
*/
if gc.Smallintconst(nr) != 0 {
if gc.Smallintconst(nr) {
n2 = *nr
} else {
regalloc(&n2, nr.Type, nil)
cgen(nr, &n2)
}
} else {
if gc.Smallintconst(nr) != 0 {
if gc.Smallintconst(nr) {
n2 = *nr
} else {
regalloc(&n2, nr.Type, res)
@ -591,7 +591,7 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gc.Dump("cgenr-n", n)
}
if gc.Isfat(n.Type) != 0 {
if gc.Isfat(n.Type) {
gc.Fatal("cgenr on fat node")
}
@ -674,8 +674,8 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
}
if nl.Addable != 0 {
cgenr(nr, &n1, nil)
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if gc.Isfixedarray(nl.Type) != 0 {
if !gc.Isconst(nl, gc.CTSTR) {
if gc.Isfixedarray(nl.Type) {
agenr(nl, &n3, res)
} else {
igen(nl, &nlen, res)
@ -697,11 +697,11 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
nr = &tmp
irad:
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if gc.Isfixedarray(nl.Type) != 0 {
if !gc.Isconst(nl, gc.CTSTR) {
if gc.Isfixedarray(nl.Type) {
agenr(nl, &n3, res)
} else {
if !(nl.Addable != 0) {
if nl.Addable == 0 {
// igen will need an addressable node.
gc.Tempname(&tmp2, nl.Type)
@ -720,7 +720,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
}
}
if !(gc.Isconst(nr, gc.CTINT) != 0) {
if !gc.Isconst(nr, gc.CTINT) {
cgenr(nr, &n1, nil)
}
@ -733,15 +733,15 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
// constant index
index:
if gc.Isconst(nr, gc.CTINT) != 0 {
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nr, gc.CTINT) {
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index") // front end should handle
}
v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
if gc.Smallintconst(nr) != 0 {
if gc.Smallintconst(nr) {
gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
} else {
regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil)
@ -776,17 +776,17 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gmove(&n1, &n2)
regfree(&n1)
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
t = gc.Types[gc.Simtype[gc.TUINT]]
if gc.Is64(nr.Type) != 0 {
if gc.Is64(nr.Type) {
t = gc.Types[gc.TUINT64]
}
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
if gc.Is64(nr.Type) != 0 {
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Is64(nr.Type) {
regalloc(&n5, t, nil)
gmove(&nlen, &n5)
regfree(&nlen)
@ -794,7 +794,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
}
} else {
gc.Nodconst(&nlen, t, nl.Type.Bound)
if !(gc.Smallintconst(&nlen) != 0) {
if !gc.Smallintconst(&nlen) {
regalloc(&n5, t, nil)
gmove(&nlen, &n5)
nlen = n5
@ -808,7 +808,7 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc)
}
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 = gins(x86.ALEAQ, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@ -865,7 +865,7 @@ func agen(n *gc.Node, res *gc.Node) {
n = n.Left
}
if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
@ -934,7 +934,7 @@ func agen(n *gc.Node, res *gc.Node) {
}
// should only get here for heap vars or paramref
if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME class %#x", n.Class)
}
@ -1044,10 +1044,10 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
if gc.Isconst(n.Right, gc.CTINT) != 0 {
if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
if !(gc.Isptr[n.Left.Type.Etype] != 0) {
if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
igen(n.Left, &n1, res)
@ -1096,7 +1096,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
if n == nil {
n = gc.Nodbool(1)
n = gc.Nodbool(true)
}
if n.Ninit != nil {
@ -1132,7 +1132,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == !(n.Val.U.Bval != 0) {
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
}
goto ret
@ -1228,7 +1228,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
nr = r
}
if gc.Isslice(nl.Type) != 0 {
if gc.Isslice(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal slice comparison")
@ -1246,7 +1246,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
break
}
if gc.Isinter(nl.Type) != 0 {
if gc.Isinter(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
@ -1288,7 +1288,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
if gc.Smallintconst(nr) != 0 {
if gc.Smallintconst(nr) {
gins(optoas(gc.OCMP, nr.Type), &n1, nr)
gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
regfree(&n1)
@ -1378,14 +1378,14 @@ func stkof(n *gc.Node) int64 {
case gc.OINDEX:
t = n.Left.Type
if !(gc.Isfixedarray(t) != 0) {
if !gc.Isfixedarray(t) {
break
}
off = stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
if gc.Isconst(n.Right, gc.CTINT) != 0 {
if gc.Isconst(n.Right, gc.CTINT) {
return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
}
return 1000
@ -1453,7 +1453,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
}
// Avoid taking the address for simple enough types.
if componentgen(n, ns) != 0 {
if componentgen(n, ns) {
return
}
@ -1617,19 +1617,19 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
restx(&cx, &oldcx)
}
func cadable(n *gc.Node) int {
if !(n.Addable != 0) {
func cadable(n *gc.Node) bool {
if n.Addable == 0 {
// dont know how it happens,
// but it does
return 0
return false
}
switch n.Op {
case gc.ONAME:
return 1
return true
}
return 0
return false
}
/*
@ -1640,7 +1640,7 @@ func cadable(n *gc.Node) int {
* nr is N when assigning a zero value.
* return 1 if can do, 0 if can't.
*/
func componentgen(nr *gc.Node, nl *gc.Node) int {
func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
var tmp gc.Node
@ -1662,12 +1662,12 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
t = nl.Type
// Slices are ok.
if gc.Isslice(t) != 0 {
if gc.Isslice(t) {
break
}
// Small arrays are ok.
if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
break
}
@ -1679,7 +1679,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
fldcount = 0
for t = nl.Type.Type; t != nil; t = t.Down {
if gc.Isfat(t.Type) != 0 {
if gc.Isfat(t.Type) {
goto no
}
if t.Etype != gc.TFIELD {
@ -1698,8 +1698,8 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
}
nodl = *nl
if !(cadable(nl) != 0) {
if nr != nil && !(cadable(nr) != 0) {
if !cadable(nl) {
if nr != nil && !cadable(nr) {
goto no
}
igen(nl, &nodl, nil)
@ -1708,7 +1708,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
if nr != nil {
nodr = *nr
if !(cadable(nr) != 0) {
if !cadable(nr) {
igen(nr, &nodr, nil)
freer = 1
}
@ -1736,7 +1736,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
gc.Gvardef(nl)
}
t = nl.Type
if !(gc.Isslice(t) != 0) {
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount = 0; fldcount < t.Bound; fldcount++ {
@ -1876,7 +1876,7 @@ no:
if freel != 0 {
regfree(&nodl)
}
return 0
return false
yes:
if freer != 0 {
@ -1885,5 +1885,5 @@ yes:
if freel != 0 {
regfree(&nodl)
}
return 1
return true
}

View file

@ -38,7 +38,7 @@ func defframe(ptxt *obj.Prog) {
// iterate through declarations - they are sorted in decreasing xoffset order.
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if !(n.Needzero != 0) {
if n.Needzero == 0 {
continue
}
if n.Class != gc.PAUTO {
@ -174,7 +174,7 @@ func ginscall(f *gc.Node, proc int) {
p = gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) != 0 {
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
@ -224,7 +224,7 @@ func ginscall(f *gc.Node, proc int) {
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
if !(gc.Hasdefer != 0) {
if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
@ -265,7 +265,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i = i.Left // interface
if !(i.Addable != 0) {
if i.Addable == 0 {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
@ -499,9 +499,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
check = 0
if gc.Issigned[t.Etype] != 0 {
check = 1
if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
@ -578,7 +578,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
savex(x86.REG_DX, &dx, &olddx, res, t)
if !(gc.Issigned[t.Etype] != 0) {
if gc.Issigned[t.Etype] == 0 {
gc.Nodconst(&n4, t, 0)
gmove(&n4, &dx)
} else {
@ -618,7 +618,7 @@ func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
*oldx = gc.Node{}
gc.Nodreg(x, t, dr)
if r > 1 && !(gc.Samereg(x, res) != 0) {
if r > 1 && !gc.Samereg(x, res) {
regalloc(oldx, gc.Types[gc.TINT64], nil)
x.Type = gc.Types[gc.TINT64]
gmove(x, oldx)
@ -760,7 +760,7 @@ longmod:
a = x86.AIMULW
}
if !(gc.Smallintconst(nr) != 0) {
if !gc.Smallintconst(nr) {
regalloc(&n3, nl.Type, nil)
cgen(nr, &n3)
gins(a, &n3, &n2)
@ -820,7 +820,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
@ -884,14 +884,14 @@ func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
oldcx = gc.Node{}
if rcx > 0 && !(gc.Samereg(&cx, res) != 0) {
if rcx > 0 && !gc.Samereg(&cx, res) {
regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
}
cx.Type = tcount
if gc.Samereg(&cx, res) != 0 {
if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
} else {
regalloc(&n2, nl.Type, res)
@ -909,7 +909,7 @@ func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
regfree(&n3)
// test and fix up large shifts
if !(bounded != 0) {
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
@ -1007,7 +1007,7 @@ func clearfat(nl *gc.Node) {
w = nl.Type.Width
// Avoid taking the address for simple enough types.
if componentgen(nil, nl) != 0 {
if componentgen(nil, nl) {
return
}
@ -1028,7 +1028,7 @@ func clearfat(nl *gc.Node) {
for {
tmp14 := q
q--
if !(tmp14 > 0) {
if tmp14 <= 0 {
break
}
n1.Type = z.Type
@ -1048,7 +1048,7 @@ func clearfat(nl *gc.Node) {
for {
tmp15 := c
c--
if !(tmp15 > 0) {
if tmp15 <= 0 {
break
}
n1.Type = z.Type
@ -1156,7 +1156,7 @@ func expandchecks(firstp *obj.Prog) {
p2.From.Type = obj.TYPE_REG
p2.From.Reg = x86.REG_AX
if regtyp(&p.From) != 0 {
if regtyp(&p.From) {
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = p.From.Reg
} else {

View file

@ -103,7 +103,7 @@ func gclean() {
}
}
func anyregalloc() int {
func anyregalloc() bool {
var i int
var j int
@ -116,11 +116,11 @@ func anyregalloc() int {
goto ok
}
}
return 1
return true
ok:
}
return 0
return false
}
var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32
@ -170,7 +170,6 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
fmt.Printf("%d %p\n", i, regpc[i])
}
gc.Fatal("out of fixed registers")
fallthrough
case gc.TFLOAT32,
gc.TFLOAT64:
@ -187,7 +186,6 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
}
}
gc.Fatal("out of floating registers")
fallthrough
case gc.TCOMPLEX64,
gc.TCOMPLEX128:
@ -339,7 +337,7 @@ func gmove(f *gc.Node, t *gc.Node) {
}
// cannot have two memory operands
if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
@ -350,7 +348,7 @@ func gmove(f *gc.Node, t *gc.Node) {
ft = tt // so big switch will choose a simple mov
// some constants can't move directly to memory.
if gc.Ismem(t) != 0 {
if gc.Ismem(t) {
// float constants come from memory.
if gc.Isfloat[tt] != 0 {
goto hard
@ -379,7 +377,6 @@ func gmove(f *gc.Node, t *gc.Node) {
switch uint32(ft)<<16 | uint32(tt) {
default:
gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
fallthrough
/*
* integer copy and truncate
@ -699,9 +696,9 @@ hard:
return
}
func samaddr(f *gc.Node, t *gc.Node) int {
func samaddr(f *gc.Node, t *gc.Node) bool {
if f.Op != t.Op {
return 0
return false
}
switch f.Op {
@ -709,10 +706,10 @@ func samaddr(f *gc.Node, t *gc.Node) int {
if f.Val.U.Reg != t.Val.U.Reg {
break
}
return 1
return true
}
return 0
return false
}
/*
@ -722,9 +719,9 @@ func samaddr(f *gc.Node, t *gc.Node) int {
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
var w int32
var p *obj.Prog
var af obj.Addr
// Node nod;
var af obj.Addr
var at obj.Addr
// if(f != N && f->op == OINDEX) {
@ -751,12 +748,12 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
x86.AMOVQ,
x86.AMOVSS,
x86.AMOVSD:
if f != nil && t != nil && samaddr(f, t) != 0 {
if f != nil && t != nil && samaddr(f, t) {
return nil
}
case x86.ALEAQ:
if f != nil && gc.Isconst(f, gc.CTNIL) != 0 {
if f != nil && gc.Isconst(f, gc.CTNIL) {
gc.Fatal("gins LEAQ nil %v", gc.Tconv(f.Type, 0))
}
}
@ -1326,21 +1323,21 @@ var clean [20]gc.Node
var cleani int = 0
func xgen(n *gc.Node, a *gc.Node, o int) int {
func xgen(n *gc.Node, a *gc.Node, o int) bool {
regalloc(a, gc.Types[gc.Tptr], nil)
if o&ODynam != 0 {
if n.Addable != 0 {
if n.Op != gc.OINDREG {
if n.Op != gc.OREGISTER {
return 1
return true
}
}
}
}
agen(n, a)
return 0
return false
}
func sudoclean() {
@ -1364,7 +1361,7 @@ func sudoclean() {
* after successful sudoaddable,
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
var o int
var i int
var oary [10]int64
@ -1383,14 +1380,14 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
var t *gc.Type
if n.Type == nil {
return 0
return false
}
*a = obj.Addr{}
switch n.Op {
case gc.OLITERAL:
if !(gc.Isconst(n, gc.CTINT) != 0) {
if !gc.Isconst(n, gc.CTINT) {
break
}
v = gc.Mpgetfix(n.Val.U.Xval)
@ -1409,22 +1406,22 @@ func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
goto odot
case gc.OINDEX:
return 0
return false
// disabled: OINDEX case is now covered by agenr
// for a more suitable register allocation pattern.
if n.Left.Type.Etype == gc.TSTRING {
return 0
return false
}
goto oindex
}
return 0
return false
lit:
switch as {
default:
return 0
return false
case x86.AADDB,
x86.AADDW,
@ -1516,7 +1513,7 @@ oindex:
l = n.Left
r = n.Right
if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
return 0
return false
}
// set o to type of array
@ -1533,13 +1530,13 @@ oindex:
}
w = n.Type.Width
if gc.Isconst(r, gc.CTINT) != 0 {
if gc.Isconst(r, gc.CTINT) {
goto oindex_const
}
switch w {
default:
return 0
return false
case 1,
2,
@ -1556,7 +1553,7 @@ oindex:
// load the array (reg)
if l.Ullman > r.Ullman {
if xgen(l, reg, o) != 0 {
if xgen(l, reg, o) {
o |= OAddable
}
}
@ -1575,13 +1572,13 @@ oindex:
// load the array (reg)
if l.Ullman <= r.Ullman {
if xgen(l, reg, o) != 0 {
if xgen(l, reg, o) {
o |= OAddable
}
}
// check bounds
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
n4.Op = gc.OXXX
@ -1598,7 +1595,7 @@ oindex:
n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
}
} else {
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
t = gc.Types[gc.TUINT64]
}
gc.Nodconst(&n2, gc.Types[gc.TUINT64], l.Type.Bound)
@ -1653,7 +1650,7 @@ oindex:
oindex_const:
v = gc.Mpgetfix(r.Val.U.Xval)
if sudoaddable(as, l, a) != 0 {
if sudoaddable(as, l, a) {
goto oindex_const_sudo
}
@ -1667,7 +1664,7 @@ oindex_const:
regalloc(reg, gc.Types[gc.Tptr], nil)
agen(l, reg)
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Debug['B'] == 0 && !n.Bounded {
n1 = *reg
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
@ -1717,7 +1714,7 @@ oindex_const_sudo:
}
// slice indexed by a constant
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Debug['B'] == 0 && !n.Bounded {
a.Offset += int64(gc.Array_nel)
gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
p1 = gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), nil, &n2)
@ -1747,9 +1744,9 @@ oindex_const_sudo:
goto yes
yes:
return 1
return true
no:
sudoclean()
return 0
return false
}

View file

@ -44,21 +44,21 @@ const (
)
// do we need the carry bit
func needc(p *obj.Prog) int {
func needc(p *obj.Prog) bool {
var info gc.ProgInfo
for p != nil {
proginfo(&info, p)
if info.Flags&gc.UseCarry != 0 {
return 1
return true
}
if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
return 0
return false
}
p = p.Link
}
return 0
return false
}
func rnops(r *gc.Flow) *gc.Flow {
@ -108,7 +108,7 @@ func peep(firstp *obj.Prog) {
switch p.As {
case x86.ALEAL,
x86.ALEAQ:
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
if p.From.Sym != nil {
if p.From.Index == x86.REG_NONE {
conprop(r)
@ -122,7 +122,7 @@ func peep(firstp *obj.Prog) {
x86.AMOVQ,
x86.AMOVSS,
x86.AMOVSD:
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
conprop(r)
}
@ -143,12 +143,12 @@ loop1:
x86.AMOVQ,
x86.AMOVSS,
x86.AMOVSD:
if regtyp(&p.To) != 0 {
if regtyp(&p.From) != 0 {
if copyprop(g, r) != 0 {
if regtyp(&p.To) {
if regtyp(&p.From) {
if copyprop(g, r) {
excise(r)
t++
} else if subprop(r) != 0 && copyprop(g, r) != 0 {
} else if subprop(r) && copyprop(g, r) {
excise(r)
t++
}
@ -159,7 +159,7 @@ loop1:
x86.AMOVWLZX,
x86.AMOVBLSX,
x86.AMOVWLSX:
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
r1 = rnops(gc.Uniqs(r))
if r1 != nil {
p1 = r1.Prog
@ -177,7 +177,7 @@ loop1:
x86.AMOVLQSX,
x86.AMOVLQZX,
x86.AMOVQL:
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
r1 = rnops(gc.Uniqs(r))
if r1 != nil {
p1 = r1.Prog
@ -191,7 +191,7 @@ loop1:
case x86.AADDL,
x86.AADDQ,
x86.AADDW:
if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
@ -202,7 +202,7 @@ loop1:
} else {
p.As = x86.ADECW
}
p.From = obj.Zprog.From
p.From = obj.Addr{}
break
}
@ -214,14 +214,14 @@ loop1:
} else {
p.As = x86.AINCW
}
p.From = obj.Zprog.From
p.From = obj.Addr{}
break
}
case x86.ASUBL,
x86.ASUBQ,
x86.ASUBW:
if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
@ -232,7 +232,7 @@ loop1:
} else {
p.As = x86.AINCW
}
p.From = obj.Zprog.From
p.From = obj.Addr{}
break
}
@ -244,7 +244,7 @@ loop1:
} else {
p.As = x86.ADECW
}
p.From = obj.Zprog.From
p.From = obj.Addr{}
break
}
}
@ -269,9 +269,9 @@ loop1:
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
if p.As == x86.AMOVLQZX {
if regtyp(&p.From) != 0 {
if regtyp(&p.From) {
if p.From.Type == p.To.Type && p.From.Reg == p.To.Reg {
if prevl(r, int(p.From.Reg)) != 0 {
if prevl(r, int(p.From.Reg)) {
excise(r)
}
}
@ -279,8 +279,8 @@ loop1:
}
if p.As == x86.AMOVSD {
if regtyp(&p.From) != 0 {
if regtyp(&p.To) != 0 {
if regtyp(&p.From) {
if regtyp(&p.To) {
p.As = x86.AMOVAPD
}
}
@ -298,7 +298,7 @@ loop1:
x86.AMOVL,
x86.AMOVQ,
x86.AMOVLQZX:
if regtyp(&p.To) != 0 && !(regconsttyp(&p.From) != 0) {
if regtyp(&p.To) && !regconsttyp(&p.From) {
pushback(r)
}
}
@ -319,7 +319,7 @@ func pushback(r0 *gc.Flow) {
for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
p = r.Prog
if p.As != obj.ANOP {
if !(regconsttyp(&p.From) != 0) || !(regtyp(&p.To) != 0) {
if !regconsttyp(&p.From) || !regtyp(&p.To) {
break
}
if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
@ -398,8 +398,8 @@ func excise(r *gc.Flow) {
gc.Ostats.Ndelmov++
}
func regtyp(a *obj.Addr) int {
return bool2int(a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15))
func regtyp(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15)
}
// movb elimination.
@ -418,7 +418,7 @@ func elimshortmov(g *gc.Graph) {
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
switch p.As {
case x86.AINCB,
x86.AINCW:
@ -437,7 +437,7 @@ func elimshortmov(g *gc.Graph) {
p.As = x86.ANOTQ
}
if regtyp(&p.From) != 0 || p.From.Type == obj.TYPE_CONST {
if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
// move or artihmetic into partial register.
// from another register or constant can be movl.
// we don't switch to 64-bit arithmetic if it can
@ -449,13 +449,13 @@ func elimshortmov(g *gc.Graph) {
case x86.AADDB,
x86.AADDW:
if !(needc(p.Link) != 0) {
if !needc(p.Link) {
p.As = x86.AADDQ
}
case x86.ASUBB,
x86.ASUBW:
if !(needc(p.Link) != 0) {
if !needc(p.Link) {
p.As = x86.ASUBQ
}
@ -500,23 +500,23 @@ func elimshortmov(g *gc.Graph) {
}
// is 'a' a register or constant?
func regconsttyp(a *obj.Addr) int {
if regtyp(a) != 0 {
return 1
func regconsttyp(a *obj.Addr) bool {
if regtyp(a) {
return true
}
switch a.Type {
case obj.TYPE_CONST,
obj.TYPE_FCONST,
obj.TYPE_SCONST,
obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
return 1
return true
}
return 0
return false
}
// is reg guaranteed to be truncated by a previous L instruction?
func prevl(r0 *gc.Flow, reg int) int {
func prevl(r0 *gc.Flow, reg int) bool {
var p *obj.Prog
var r *gc.Flow
var info gc.ProgInfo
@ -527,14 +527,14 @@ func prevl(r0 *gc.Flow, reg int) int {
proginfo(&info, p)
if info.Flags&gc.RightWrite != 0 {
if info.Flags&gc.SizeL != 0 {
return 1
return true
}
return 0
return false
}
}
}
return 0
return false
}
/*
@ -551,7 +551,7 @@ func prevl(r0 *gc.Flow, reg int) int {
* hopefully, then the former or latter MOV
* will be eliminated by copy propagation.
*/
func subprop(r0 *gc.Flow) int {
func subprop(r0 *gc.Flow) bool {
var p *obj.Prog
var info gc.ProgInfo
var v1 *obj.Addr
@ -564,19 +564,19 @@ func subprop(r0 *gc.Flow) int {
}
p = r0.Prog
v1 = &p.From
if !(regtyp(v1) != 0) {
if !regtyp(v1) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
}
return 0
return false
}
v2 = &p.To
if !(regtyp(v2) != 0) {
if !regtyp(v2) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
}
return 0
return false
}
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
@ -599,21 +599,21 @@ func subprop(r0 *gc.Flow) int {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tfound %v; return 0\n", p)
}
return 0
return false
}
if info.Reguse|info.Regset != 0 {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tfound %v; return 0\n", p)
}
return 0
return false
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
goto gotit
}
if copyau(&p.From, v2) != 0 || copyau(&p.To, v2) != 0 {
if copyau(&p.From, v2) || copyau(&p.To, v2) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2))
}
@ -631,7 +631,7 @@ func subprop(r0 *gc.Flow) int {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tran off end; return 0\n")
}
return 0
return false
gotit:
copysub(&p.To, v1, v2, 1)
@ -658,7 +658,7 @@ gotit:
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return 1
return true
}
/*
@ -673,7 +673,7 @@ gotit:
* set v1 F=1
* set v2 return success
*/
func copyprop(g *gc.Graph, r0 *gc.Flow) int {
func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
@ -684,14 +684,14 @@ func copyprop(g *gc.Graph, r0 *gc.Flow) int {
p = r0.Prog
v1 = &p.From
v2 = &p.To
if copyas(v1, v2) != 0 {
return 1
if copyas(v1, v2) {
return true
}
gactive++
return copy1(v1, v2, r0.S1, 0)
}
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
var t int
var p *obj.Prog
@ -699,7 +699,7 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
}
return 1
return true
}
r.Active = int32(gactive)
@ -711,7 +711,7 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
if !(f != 0) && gc.Uniqp(r) == nil {
if f == 0 && gc.Uniqp(r) == nil {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; merge; f=%d", f)
@ -724,33 +724,33 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
}
return 0
return false
case 3: /* set */
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
}
return 1
return true
case 1, /* used, substitute */
4: /* use and set */
if f != 0 {
if !(gc.Debug['P'] != 0) {
return 0
if gc.Debug['P'] == 0 {
return false
}
if t == 4 {
fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
} else {
fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
}
return 0
return false
}
if copyu(p, v2, v1) != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub fail; return 0\n")
}
return 0
return false
}
if gc.Debug['P'] != 0 {
@ -760,13 +760,13 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
}
return 1
return true
}
}
if !(f != 0) {
if f == 0 {
t = copyu(p, v1, nil)
if !(f != 0) && (t == 2 || t == 3 || t == 4) {
if f == 0 && (t == 2 || t == 3 || t == 4) {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
@ -778,13 +778,13 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
fmt.Printf("\n")
}
if r.S2 != nil {
if !(copy1(v1, v2, r.S2, f) != 0) {
return 0
if !copy1(v1, v2, r.S2, f) {
return false
}
}
}
return 1
return true
}
/*
@ -807,7 +807,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
return 0
@ -836,7 +836,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 4
}
return 3
@ -858,23 +858,23 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
}
if info.Flags&gc.LeftAddr != 0 {
if copyas(&p.From, v) != 0 {
if copyas(&p.From, v) {
return 2
}
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
return 2
}
}
if info.Flags&gc.RightWrite != 0 {
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
if s != nil {
return copysub(&p.From, v, s, 1)
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 4
}
return 3
@ -889,10 +889,10 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return copysub(&p.To, v, s, 1)
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
}
@ -905,7 +905,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
* could be set/use depending on
* semantics
*/
func copyas(a *obj.Addr, v *obj.Addr) int {
func copyas(a *obj.Addr, v *obj.Addr) bool {
if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B {
gc.Fatal("use of byte register")
}
@ -914,62 +914,62 @@ func copyas(a *obj.Addr, v *obj.Addr) int {
}
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
return 0
return false
}
if regtyp(v) != 0 {
return 1
if regtyp(v) {
return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return 1
return true
}
}
return 0
return false
}
func sameaddr(a *obj.Addr, v *obj.Addr) int {
func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
return 0
return false
}
if regtyp(v) != 0 {
return 1
if regtyp(v) {
return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return 1
return true
}
}
return 0
return false
}
/*
* either direct or indirect
*/
func copyau(a *obj.Addr, v *obj.Addr) int {
if copyas(a, v) != 0 {
func copyau(a *obj.Addr, v *obj.Addr) bool {
if copyas(a, v) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tcopyau: copyas returned 1\n")
}
return 1
return true
}
if regtyp(v) != 0 {
if regtyp(v) {
if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tcopyau: found indir use - return 1\n")
}
return 1
return true
}
if a.Index == v.Reg {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tcopyau: found index use - return 1\n")
}
return 1
return true
}
}
return 0
return false
}
/*
@ -979,7 +979,7 @@ func copyau(a *obj.Addr, v *obj.Addr) int {
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
var reg int
if copyas(a, v) != 0 {
if copyas(a, v) {
reg = int(s.Reg)
if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
if f != 0 {
@ -990,7 +990,7 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
return 0
}
if regtyp(v) != 0 {
if regtyp(v) {
reg = int(v.Reg)
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
@ -1068,10 +1068,10 @@ loop:
}
}
func smallindir(a *obj.Addr, reg *obj.Addr) int {
return bool2int(regtyp(reg) != 0 && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096)
func smallindir(a *obj.Addr, reg *obj.Addr) bool {
return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
}
func stackaddr(a *obj.Addr) int {
return bool2int(a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP)
func stackaddr(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
}

View file

@ -84,7 +84,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || !(res.Addable != 0) {
if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
@ -94,7 +94,7 @@ func cgen(n *gc.Node, res *gc.Node) {
return
case gc.OEFACE:
if res.Op != gc.ONAME || !(res.Addable != 0) {
if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
@ -117,7 +117,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
// structs etc get handled specially
if gc.Isfat(n.Type) != 0 {
if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
@ -131,12 +131,12 @@ func cgen(n *gc.Node, res *gc.Node) {
switch n.Op {
case gc.OSPTR,
gc.OLEN:
if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
if gc.Isslice(n.Left.Type) != 0 {
if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
@ -151,7 +151,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
// if both are not addressable, use a temporary.
if !(n.Addable != 0) && !(res.Addable != 0) {
if n.Addable == 0 && res.Addable == 0 {
// could use regalloc here sometimes,
// but have to check for ullman >= UINF.
gc.Tempname(&n1, n.Type)
@ -163,7 +163,7 @@ func cgen(n *gc.Node, res *gc.Node) {
// if result is not addressable directly but n is,
// compute its address and then store via the address.
if !(res.Addable != 0) {
if res.Addable == 0 {
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
@ -171,7 +171,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
// complex types
if gc.Complexop(n, res) != 0 {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
return
}
@ -197,7 +197,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
// 64-bit ops are hard on 32-bit machine.
if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Left != nil && gc.Is64(n.Left.Type) != 0 {
if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
switch n.Op {
// math goes to cgen64.
case gc.OMINUS,
@ -246,11 +246,11 @@ func cgen(n *gc.Node, res *gc.Node) {
p1 = gc.Gbranch(obj.AJMP, nil, 0)
p2 = gc.Pc
gmove(gc.Nodbool(1), res)
gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(0), res)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
@ -288,7 +288,7 @@ func cgen(n *gc.Node, res *gc.Node) {
cgen_hmul(nl, nr, res)
case gc.OCONV:
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
cgen(nl, res)
break
}
@ -317,7 +317,7 @@ func cgen(n *gc.Node, res *gc.Node) {
// pointer is the first word of string or slice.
case gc.OSPTR:
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 = gins(i386.ALEAL, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@ -332,7 +332,7 @@ func cgen(n *gc.Node, res *gc.Node) {
regfree(&n1)
case gc.OLEN:
if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map has len in the first 32-bit word.
// a zero pointer means zero length
gc.Tempname(&n1, gc.Types[gc.Tptr])
@ -358,7 +358,7 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
igen(nl, &n1, res)
@ -372,7 +372,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
if gc.Istype(nl.Type, gc.TCHAN) != 0 {
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second 32-bit word.
// a zero pointer means zero length
gc.Tempname(&n1, gc.Types[gc.Tptr])
@ -399,7 +399,7 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
if gc.Isslice(nl.Type) != 0 {
if gc.Isslice(nl.Type) {
igen(nl, &n1, res)
n1.Type = gc.Types[gc.TUINT32]
n1.Xoffset += int64(gc.Array_cap)
@ -432,7 +432,7 @@ func cgen(n *gc.Node, res *gc.Node) {
case gc.OLSH,
gc.ORSH,
gc.OLROT:
cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
return
@ -445,7 +445,7 @@ sbop: // symmetric binary
}
abop: // asymmetric binary
if gc.Smallintconst(nr) != 0 {
if gc.Smallintconst(nr) {
mgen(nl, &n1, res)
regalloc(&n2, nl.Type, &n1)
gmove(&n1, &n2)
@ -496,7 +496,7 @@ func igenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
var hi gc.Node
var zero gc.Node
if !(gc.Is64(n.Type) != 0) {
if !gc.Is64(n.Type) {
if n.Addable != 0 {
// nothing to do.
*res = *n
@ -542,7 +542,7 @@ func agen(n *gc.Node, res *gc.Node) {
var v uint64
var p1 *obj.Prog
var p2 *obj.Prog
var bounded int
var bounded bool
if gc.Debug['g'] != 0 {
gc.Dump("\nagen-res", res)
@ -557,7 +557,7 @@ func agen(n *gc.Node, res *gc.Node) {
n = n.Left
}
if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
@ -593,7 +593,6 @@ func agen(n *gc.Node, res *gc.Node) {
switch n.Op {
default:
gc.Fatal("agen %v", gc.Oconv(int(n.Op), 0))
fallthrough
case gc.OCALLMETH:
gc.Cgen_callmeth(n, 0)
@ -624,32 +623,32 @@ func agen(n *gc.Node, res *gc.Node) {
case gc.OINDEX:
p2 = nil // to be patched to panicindex.
w = uint32(n.Type.Width)
bounded = bool2int(gc.Debug['B'] != 0 || n.Bounded != 0)
bounded = gc.Debug['B'] != 0 || n.Bounded
if nr.Addable != 0 {
// Generate &nl first, and move nr into register.
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
igen(nl, &n3, res)
}
if !(gc.Isconst(nr, gc.CTINT) != 0) {
p2 = igenindex(nr, &tmp, bounded)
if !gc.Isconst(nr, gc.CTINT) {
p2 = igenindex(nr, &tmp, bool2int(bounded))
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
} else if nl.Addable != 0 {
// Generate nr first, and move &nl into register.
if !(gc.Isconst(nr, gc.CTINT) != 0) {
p2 = igenindex(nr, &tmp, bounded)
if !gc.Isconst(nr, gc.CTINT) {
p2 = igenindex(nr, &tmp, bool2int(bounded))
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
igen(nl, &n3, res)
}
} else {
p2 = igenindex(nr, &tmp, bounded)
p2 = igenindex(nr, &tmp, bool2int(bounded))
nr = &tmp
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
igen(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
@ -657,7 +656,7 @@ func agen(n *gc.Node, res *gc.Node) {
}
// For fixed array we really want the pointer in n3.
if gc.Isfixedarray(nl.Type) != 0 {
if gc.Isfixedarray(nl.Type) {
regalloc(&n2, gc.Types[gc.Tptr], &n3)
agen(&n3, &n2)
regfree(&n3)
@ -670,13 +669,13 @@ func agen(n *gc.Node, res *gc.Node) {
// w is width
// constant index
if gc.Isconst(nr, gc.CTINT) != 0 {
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nr, gc.CTINT) {
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index") // front end should handle
}
v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
nlen = n3
nlen.Type = gc.Types[gc.TUINT32]
nlen.Xoffset += int64(gc.Array_nel)
@ -716,13 +715,13 @@ func agen(n *gc.Node, res *gc.Node) {
gmove(&n1, &n2)
regfree(&n1)
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
t = gc.Types[gc.TUINT32]
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
nlen = n3
nlen.Type = t
nlen.Xoffset += int64(gc.Array_nel)
@ -739,7 +738,7 @@ func agen(n *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc)
}
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 = gins(i386.ALEAL, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@ -751,7 +750,7 @@ func agen(n *gc.Node, res *gc.Node) {
// Load base pointer in n3.
regalloc(&tmp, gc.Types[gc.Tptr], &n3)
if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n3.Type = gc.Types[gc.Tptr]
n3.Xoffset += int64(gc.Array_array)
gmove(&n3, &tmp)
@ -789,7 +788,7 @@ func agen(n *gc.Node, res *gc.Node) {
}
// should only get here for heap vars or paramref
if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME class %#x", n.Class)
}
@ -813,7 +812,7 @@ func agen(n *gc.Node, res *gc.Node) {
case gc.ODOTPTR:
t = nl.Type
if !(gc.Isptr[t.Etype] != 0) {
if gc.Isptr[t.Etype] == 0 {
gc.Fatal("agen: not ptr %v", gc.Nconv(n, 0))
}
cgen(nl, res)
@ -919,10 +918,10 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
if gc.Isconst(n.Right, gc.CTINT) != 0 {
if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
if !(gc.Isptr[n.Left.Type.Etype] != 0) {
if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
igen(n.Left, &n1, res)
@ -979,7 +978,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
if n == nil {
n = gc.Nodbool(1)
n = gc.Nodbool(true)
}
if n.Ninit != nil {
@ -1021,13 +1020,13 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == !(n.Val.U.Bval != 0) {
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
}
return
case gc.ONAME:
if !(n.Addable != 0) {
if n.Addable == 0 {
goto def
}
gc.Nodconst(&n1, n.Type, 0)
@ -1101,7 +1100,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
nr = r
}
if gc.Isslice(nl.Type) != 0 {
if gc.Isslice(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal slice comparison")
@ -1119,7 +1118,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
break
}
if gc.Isinter(nl.Type) != 0 {
if gc.Isinter(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
@ -1141,14 +1140,14 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
break
}
if gc.Is64(nr.Type) != 0 {
if !(nl.Addable != 0) || gc.Isconst(nl, gc.CTINT) != 0 {
if gc.Is64(nr.Type) {
if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if !(nr.Addable != 0) {
if nr.Addable == 0 {
gc.Tempname(&n2, nr.Type)
cgen(nr, &n2)
nr = &n2
@ -1159,13 +1158,13 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
if nr.Ullman >= gc.UINF {
if !(nl.Addable != 0) {
if nl.Addable == 0 {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if !(nr.Addable != 0) {
if nr.Addable == 0 {
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
@ -1177,19 +1176,19 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
goto cmp
}
if !(nl.Addable != 0) {
if nl.Addable == 0 {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if gc.Smallintconst(nr) != 0 {
if gc.Smallintconst(nr) {
gins(optoas(gc.OCMP, nr.Type), nl, nr)
gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
break
}
if !(nr.Addable != 0) {
if nr.Addable == 0 {
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
@ -1252,14 +1251,14 @@ func stkof(n *gc.Node) int32 {
case gc.OINDEX:
t = n.Left.Type
if !(gc.Isfixedarray(t) != 0) {
if !gc.Isfixedarray(t) {
break
}
off = stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
if gc.Isconst(n.Right, gc.CTINT) != 0 {
if gc.Isconst(n.Right, gc.CTINT) {
return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
}
return 1000
@ -1334,7 +1333,7 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
}
// Avoid taking the address for simple enough types.
if componentgen(n, res) != 0 {
if componentgen(n, res) {
return
}
@ -1360,10 +1359,10 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
gc.Tempname(&tdst, gc.Types[gc.Tptr])
if !(n.Addable != 0) {
if n.Addable == 0 {
agen(n, &tsrc)
}
if !(res.Addable != 0) {
if res.Addable == 0 {
agen(res, &tdst)
}
if n.Addable != 0 {
@ -1459,19 +1458,19 @@ func sgen(n *gc.Node, res *gc.Node, w int64) {
}
}
func cadable(n *gc.Node) int {
if !(n.Addable != 0) {
func cadable(n *gc.Node) bool {
if n.Addable == 0 {
// dont know how it happens,
// but it does
return 0
return false
}
switch n.Op {
case gc.ONAME:
return 1
return true
}
return 0
return false
}
/*
@ -1482,7 +1481,7 @@ func cadable(n *gc.Node) int {
* nr is N when assigning a zero value.
* return 1 if can do, 0 if can't.
*/
func componentgen(nr *gc.Node, nl *gc.Node) int {
func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
var tmp gc.Node
@ -1504,12 +1503,12 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
t = nl.Type
// Slices are ok.
if gc.Isslice(t) != 0 {
if gc.Isslice(t) {
break
}
// Small arrays are ok.
if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
break
}
@ -1521,7 +1520,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
fldcount = 0
for t = nl.Type.Type; t != nil; t = t.Down {
if gc.Isfat(t.Type) != 0 {
if gc.Isfat(t.Type) {
goto no
}
if t.Etype != gc.TFIELD {
@ -1540,8 +1539,8 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
}
nodl = *nl
if !(cadable(nl) != 0) {
if nr != nil && !(cadable(nr) != 0) {
if !cadable(nl) {
if nr != nil && !cadable(nr) {
goto no
}
igen(nl, &nodl, nil)
@ -1550,7 +1549,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
if nr != nil {
nodr = *nr
if !(cadable(nr) != 0) {
if !cadable(nr) {
igen(nr, &nodr, nil)
freer = 1
}
@ -1578,7 +1577,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
gc.Gvardef(nl)
}
t = nl.Type
if !(gc.Isslice(t) != 0) {
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount = 0; fldcount < t.Bound; fldcount++ {
@ -1718,7 +1717,7 @@ no:
if freel != 0 {
regfree(&nodl)
}
return 0
return false
yes:
if freer != 0 {
@ -1727,5 +1726,5 @@ yes:
if freel != 0 {
regfree(&nodl)
}
return 1
return true
}

View file

@ -44,7 +44,6 @@ func cgen64(n *gc.Node, res *gc.Node) {
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
fallthrough
case gc.OMINUS:
cgen(n.Left, res)
@ -79,13 +78,13 @@ func cgen64(n *gc.Node, res *gc.Node) {
l = n.Left
r = n.Right
if !(l.Addable != 0) {
if l.Addable == 0 {
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
l = &t1
}
if r != nil && !(r.Addable != 0) {
if r != nil && r.Addable == 0 {
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
r = &t2
@ -98,7 +97,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
// Setup for binary operation.
split64(l, &lo1, &hi1)
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
@ -196,7 +195,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
if r.Op == gc.OLITERAL {
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
splitclean()
}
splitclean()
@ -208,7 +207,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
}
if v >= 32 {
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
splitclean()
}
split64(res, &lo2, &hi2)
@ -243,7 +242,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
// if high bits are set, zero value.
p1 = nil
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
gins(i386.ACMPL, &hi2, ncon(0))
p1 = gc.Gbranch(i386.AJNE, nil, +1)
gins(i386.AMOVL, &lo2, &cx)
@ -285,7 +284,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
if r.Op == gc.OLITERAL {
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
splitclean()
}
splitclean()
@ -305,7 +304,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
}
if v >= 32 {
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
splitclean()
}
split64(res, &lo2, &hi2)
@ -344,7 +343,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
// if high bits are set, zero value.
p1 = nil
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
gins(i386.ACMPL, &hi2, ncon(0))
p1 = gc.Gbranch(i386.AJNE, nil, +1)
gins(i386.AMOVL, &lo2, &cx)
@ -496,7 +495,7 @@ func cgen64(n *gc.Node, res *gc.Node) {
gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
}
if gc.Is64(r.Type) != 0 {
if gc.Is64(r.Type) {
splitclean()
}
splitclean()
@ -542,7 +541,6 @@ func cmp64(nl *gc.Node, nr *gc.Node, op int, likely int, to *obj.Prog) {
switch op {
default:
gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
fallthrough
// cmp hi
// jne L

View file

@ -36,7 +36,7 @@ func defframe(ptxt *obj.Prog) {
ax = 0
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if !(n.Needzero != 0) {
if n.Needzero == 0 {
continue
}
if n.Class != gc.PAUTO {
@ -129,7 +129,7 @@ func clearfat(nl *gc.Node) {
w = uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
if componentgen(nil, nl) != 0 {
if componentgen(nil, nl) {
return
}
@ -151,7 +151,7 @@ func clearfat(nl *gc.Node) {
for {
tmp14 := q
q--
if !(tmp14 > 0) {
if tmp14 <= 0 {
break
}
n1.Type = z.Type
@ -163,7 +163,7 @@ func clearfat(nl *gc.Node) {
for {
tmp15 := c
c--
if !(tmp15 > 0) {
if tmp15 <= 0 {
break
}
n1.Type = z.Type
@ -252,7 +252,7 @@ func ginscall(f *gc.Node, proc int) {
p = gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) != 0 {
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
@ -327,7 +327,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i = i.Left // interface
if !(i.Addable != 0) {
if i.Addable == 0 {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
@ -563,9 +563,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
check = 0
if gc.Issigned[t.Etype] != 0 {
check = 1
if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
@ -596,7 +596,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
cgen(nr, &t2)
}
if !(gc.Samereg(ax, res) != 0) && !(gc.Samereg(dx, res) != 0) {
if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
regalloc(&n1, t, res)
} else {
regalloc(&n1, t, nil)
@ -639,7 +639,7 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node, ax *gc.Node, dx *gc.N
gc.Patch(p1, gc.Pc)
}
if !(gc.Issigned[t.Etype] != 0) {
if gc.Issigned[t.Etype] == 0 {
gc.Nodconst(&nz, t, 0)
gmove(&nz, dx)
} else {
@ -668,7 +668,7 @@ func savex(dr int, x *gc.Node, oldx *gc.Node, res *gc.Node, t *gc.Type) {
// and not the destination
*oldx = gc.Node{}
if r > 0 && !(gc.Samereg(x, res) != 0) {
if r > 0 && !gc.Samereg(x, res) {
gc.Tempname(oldx, gc.Types[gc.TINT32])
gmove(x, oldx)
}
@ -697,7 +697,7 @@ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var olddx gc.Node
var t *gc.Type
if gc.Is64(nl.Type) != 0 {
if gc.Is64(nl.Type) {
gc.Fatal("cgen_div %v", gc.Tconv(nl.Type, 0))
}
@ -718,7 +718,7 @@ func cgen_div(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var nt gc.Node
@ -761,7 +761,7 @@ func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
oldcx = gc.Node{}
gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
if reg[i386.REG_CX] > 1 && !(gc.Samereg(&cx, res) != 0) {
if reg[i386.REG_CX] > 1 && !gc.Samereg(&cx, res) {
gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
gmove(&cx, &oldcx)
}
@ -774,7 +774,7 @@ func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
}
if gc.Samereg(&cx, res) != 0 {
if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
} else {
regalloc(&n2, nl.Type, res)
@ -788,7 +788,7 @@ func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
}
// test and fix up large shifts
if bounded != 0 {
if bounded {
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
@ -943,11 +943,11 @@ func cgen_float(n *gc.Node, res *gc.Node) {
gc.OGE:
p1 = gc.Gbranch(obj.AJMP, nil, 0)
p2 = gc.Pc
gmove(gc.Nodbool(1), res)
gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(0), res)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
@ -956,7 +956,7 @@ func cgen_float(n *gc.Node, res *gc.Node) {
return
case gc.OCONV:
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
cgen(nl, res)
return
}
@ -1114,7 +1114,7 @@ func bgen_float(n *gc.Node, true_ int, likely int, to *obj.Prog) {
nl = n.Left
nr = n.Right
a = int(n.Op)
if !(true_ != 0) {
if true_ == 0 {
// brcom is not valid on floats when NaN is involved.
p1 = gc.Gbranch(obj.AJMP, nil, 0)
@ -1183,13 +1183,13 @@ x87:
goto ret
sse:
if !(nl.Addable != 0) {
if nl.Addable == 0 {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
if !(nr.Addable != 0) {
if nr.Addable == 0 {
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
@ -1286,7 +1286,7 @@ func expandchecks(firstp *obj.Prog) {
p2.From.Type = obj.TYPE_REG
p2.From.Reg = i386.REG_AX
if regtyp(&p.From) != 0 {
if regtyp(&p.From) {
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = p.From.Reg
} else {

View file

@ -601,7 +601,7 @@ func gclean() {
}
}
func anyregalloc() int {
func anyregalloc() bool {
var i int
var j int
@ -614,16 +614,16 @@ func anyregalloc() int {
goto ok
}
}
return 1
return true
ok:
}
for i = i386.REG_X0; i <= i386.REG_X7; i++ {
if reg[i] != 0 {
return 1
return true
}
}
return 0
return false
}
/*
@ -644,7 +644,6 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
case gc.TINT64,
gc.TUINT64:
gc.Fatal("regalloc64")
fallthrough
case gc.TINT8,
gc.TUINT8,
@ -677,7 +676,7 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
case gc.TFLOAT32,
gc.TFLOAT64:
if !(gc.Use_sse != 0) {
if gc.Use_sse == 0 {
i = i386.REG_F0
goto out
}
@ -798,7 +797,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
var n1 gc.Node
var i int64
if !(gc.Is64(n.Type) != 0) {
if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
@ -811,7 +810,7 @@ func split64(n *gc.Node, lo *gc.Node, hi *gc.Node) {
default:
switch n.Op {
default:
if !(dotaddable(n, &n1) != 0) {
if !dotaddable(n, &n1) {
igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
@ -934,7 +933,7 @@ func gmove(f *gc.Node, t *gc.Node) {
// cannot have two integer memory operands;
// except 64-bit, which always copies via registers anyway.
if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !(gc.Is64(f.Type) != 0) && !(gc.Is64(t.Type) != 0) && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
@ -1200,7 +1199,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
cvt = t.Type
// cannot have two floating point memory operands.
if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
@ -1211,7 +1210,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
ft = gc.Simsimtype(con.Type)
// some constants can't move directly to memory.
if gc.Ismem(t) != 0 {
if gc.Ismem(t) {
// float constants come from memory.
if gc.Isfloat[tt] != 0 {
goto hard
@ -1269,7 +1268,7 @@ func floatmove(f *gc.Node, t *gc.Node) {
case gc.TFLOAT32<<16 | gc.TUINT64,
gc.TFLOAT64<<16 | gc.TUINT64:
if !(gc.Ismem(f) != 0) {
if !gc.Ismem(f) {
cvt = f.Type
goto hardmem
}
@ -1500,7 +1499,6 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
switch tt {
default:
gc.Fatal("gmove %v", gc.Nconv(t, 0))
fallthrough
case gc.TINT8:
gins(i386.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
@ -1595,7 +1593,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
*/
case gc.TFLOAT32<<16 | gc.TFLOAT32,
gc.TFLOAT64<<16 | gc.TFLOAT64:
if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
@ -1609,7 +1607,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
if ft == gc.TFLOAT64 {
a = i386.AFMOVD
}
if gc.Ismem(t) != 0 {
if gc.Ismem(t) {
if f.Op != gc.OREGISTER || f.Val.U.Reg != i386.REG_F0 {
gc.Fatal("gmove %v", gc.Nconv(f, 0))
}
@ -1620,7 +1618,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
}
case gc.TFLOAT32<<16 | gc.TFLOAT64:
if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
@ -1638,7 +1636,7 @@ func floatmove_387(f *gc.Node, t *gc.Node) {
return
case gc.TFLOAT64<<16 | gc.TFLOAT32:
if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
@ -1810,9 +1808,9 @@ rdst:
return
}
func samaddr(f *gc.Node, t *gc.Node) int {
func samaddr(f *gc.Node, t *gc.Node) bool {
if f.Op != t.Op {
return 0
return false
}
switch f.Op {
@ -1820,10 +1818,10 @@ func samaddr(f *gc.Node, t *gc.Node) int {
if f.Val.U.Reg != t.Val.U.Reg {
break
}
return 1
return true
}
return 0
return false
}
/*
@ -1850,12 +1848,12 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
case i386.AMOVB,
i386.AMOVW,
i386.AMOVL:
if f != nil && t != nil && samaddr(f, t) != 0 {
if f != nil && t != nil && samaddr(f, t) {
return nil
}
case i386.ALEAL:
if f != nil && gc.Isconst(f, gc.CTNIL) != 0 {
if f != nil && gc.Isconst(f, gc.CTNIL) {
gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0))
}
}
@ -1904,13 +1902,13 @@ func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
return p
}
func dotaddable(n *gc.Node, n1 *gc.Node) int {
func dotaddable(n *gc.Node, n1 *gc.Node) bool {
var o int
var oary [10]int64
var nn *gc.Node
if n.Op != gc.ODOT {
return 0
return false
}
o = gc.Dotoffset(n, oary[:], &nn)
@ -1918,16 +1916,16 @@ func dotaddable(n *gc.Node, n1 *gc.Node) int {
*n1 = *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
return 1
return true
}
return 0
return false
}
func sudoclean() {
}
func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
*a = obj.Addr{}
return 0
return false
}

View file

@ -45,21 +45,21 @@ const (
var gactive uint32
// do we need the carry bit
func needc(p *obj.Prog) int {
func needc(p *obj.Prog) bool {
var info gc.ProgInfo
for p != nil {
proginfo(&info, p)
if info.Flags&gc.UseCarry != 0 {
return 1
return true
}
if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
return 0
return false
}
p = p.Link
}
return 0
return false
}
func rnops(r *gc.Flow) *gc.Flow {
@ -108,7 +108,7 @@ func peep(firstp *obj.Prog) {
p = r.Prog
switch p.As {
case i386.ALEAL:
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
if p.From.Sym != nil {
if p.From.Index == i386.REG_NONE {
conprop(r)
@ -121,7 +121,7 @@ func peep(firstp *obj.Prog) {
i386.AMOVL,
i386.AMOVSS,
i386.AMOVSD:
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
conprop(r)
}
@ -141,12 +141,12 @@ loop1:
case i386.AMOVL,
i386.AMOVSS,
i386.AMOVSD:
if regtyp(&p.To) != 0 {
if regtyp(&p.From) != 0 {
if copyprop(g, r) != 0 {
if regtyp(&p.To) {
if regtyp(&p.From) {
if copyprop(g, r) {
excise(r)
t++
} else if subprop(r) != 0 && copyprop(g, r) != 0 {
} else if subprop(r) && copyprop(g, r) {
excise(r)
t++
}
@ -157,7 +157,7 @@ loop1:
i386.AMOVWLZX,
i386.AMOVBLSX,
i386.AMOVWLSX:
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
r1 = rnops(gc.Uniqs(r))
if r1 != nil {
p1 = r1.Prog
@ -170,7 +170,7 @@ loop1:
case i386.AADDL,
i386.AADDW:
if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
@ -179,7 +179,7 @@ loop1:
} else {
p.As = i386.ADECW
}
p.From = obj.Zprog.From
p.From = obj.Addr{}
break
}
@ -189,13 +189,13 @@ loop1:
} else {
p.As = i386.AINCW
}
p.From = obj.Zprog.From
p.From = obj.Addr{}
break
}
case i386.ASUBL,
i386.ASUBW:
if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
@ -204,7 +204,7 @@ loop1:
} else {
p.As = i386.AINCW
}
p.From = obj.Zprog.From
p.From = obj.Addr{}
break
}
@ -214,7 +214,7 @@ loop1:
} else {
p.As = i386.ADECW
}
p.From = obj.Zprog.From
p.From = obj.Addr{}
break
}
}
@ -232,8 +232,8 @@ loop1:
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
if p.As == i386.AMOVSD {
if regtyp(&p.From) != 0 {
if regtyp(&p.To) != 0 {
if regtyp(&p.From) {
if regtyp(&p.To) {
p.As = i386.AMOVAPD
}
}
@ -256,8 +256,8 @@ func excise(r *gc.Flow) {
gc.Ostats.Ndelmov++
}
func regtyp(a *obj.Addr) int {
return bool2int(a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7))
func regtyp(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7)
}
// movb elimination.
@ -273,7 +273,7 @@ func elimshortmov(g *gc.Graph) {
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
switch p.As {
case i386.AINCB,
i386.AINCW:
@ -292,7 +292,7 @@ func elimshortmov(g *gc.Graph) {
p.As = i386.ANOTL
}
if regtyp(&p.From) != 0 || p.From.Type == obj.TYPE_CONST {
if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
// move or artihmetic into partial register.
// from another register or constant can be movl.
// we don't switch to 32-bit arithmetic if it can
@ -304,13 +304,13 @@ func elimshortmov(g *gc.Graph) {
case i386.AADDB,
i386.AADDW:
if !(needc(p.Link) != 0) {
if !needc(p.Link) {
p.As = i386.AADDL
}
case i386.ASUBB,
i386.ASUBW:
if !(needc(p.Link) != 0) {
if !needc(p.Link) {
p.As = i386.ASUBL
}
@ -366,7 +366,7 @@ func elimshortmov(g *gc.Graph) {
* hopefully, then the former or latter MOV
* will be eliminated by copy propagation.
*/
func subprop(r0 *gc.Flow) int {
func subprop(r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
@ -376,12 +376,12 @@ func subprop(r0 *gc.Flow) int {
p = r0.Prog
v1 = &p.From
if !(regtyp(v1) != 0) {
return 0
if !regtyp(v1) {
return false
}
v2 = &p.To
if !(regtyp(v2) != 0) {
return 0
if !regtyp(v2) {
return false
}
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
@ -396,18 +396,18 @@ func subprop(r0 *gc.Flow) int {
}
proginfo(&info, p)
if info.Flags&gc.Call != 0 {
return 0
return false
}
if info.Reguse|info.Regset != 0 {
return 0
return false
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
goto gotit
}
if copyau(&p.From, v2) != 0 || copyau(&p.To, v2) != 0 {
if copyau(&p.From, v2) || copyau(&p.To, v2) {
break
}
if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
@ -415,7 +415,7 @@ func subprop(r0 *gc.Flow) int {
}
}
return 0
return false
gotit:
copysub(&p.To, v1, v2, 1)
@ -442,7 +442,7 @@ gotit:
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return 1
return true
}
/*
@ -457,7 +457,7 @@ gotit:
* set v1 F=1
* set v2 return success
*/
func copyprop(g *gc.Graph, r0 *gc.Flow) int {
func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
@ -465,14 +465,14 @@ func copyprop(g *gc.Graph, r0 *gc.Flow) int {
p = r0.Prog
v1 = &p.From
v2 = &p.To
if copyas(v1, v2) != 0 {
return 1
if copyas(v1, v2) {
return true
}
gactive++
return copy1(v1, v2, r0.S1, 0)
}
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
var t int
var p *obj.Prog
@ -480,7 +480,7 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
}
return 1
return true
}
r.Active = int32(gactive)
@ -492,7 +492,7 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
if !(f != 0) && gc.Uniqp(r) == nil {
if f == 0 && gc.Uniqp(r) == nil {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; merge; f=%d", f)
@ -505,33 +505,33 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
}
return 0
return false
case 3: /* set */
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
}
return 1
return true
case 1, /* used, substitute */
4: /* use and set */
if f != 0 {
if !(gc.Debug['P'] != 0) {
return 0
if gc.Debug['P'] == 0 {
return false
}
if t == 4 {
fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
} else {
fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
}
return 0
return false
}
if copyu(p, v2, v1) != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub fail; return 0\n")
}
return 0
return false
}
if gc.Debug['P'] != 0 {
@ -541,13 +541,13 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
}
return 1
return true
}
}
if !(f != 0) {
if f == 0 {
t = copyu(p, v1, nil)
if !(f != 0) && (t == 2 || t == 3 || t == 4) {
if f == 0 && (t == 2 || t == 3 || t == 4) {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
@ -559,13 +559,13 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
fmt.Printf("\n")
}
if r.S2 != nil {
if !(copy1(v1, v2, r.S2, f) != 0) {
return 0
if !copy1(v1, v2, r.S2, f) {
return false
}
}
}
return 1
return true
}
/*
@ -588,7 +588,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
return 0
@ -617,7 +617,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 4
}
return 3
@ -639,23 +639,23 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
}
if info.Flags&gc.LeftAddr != 0 {
if copyas(&p.From, v) != 0 {
if copyas(&p.From, v) {
return 2
}
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
return 2
}
}
if info.Flags&gc.RightWrite != 0 {
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
if s != nil {
return copysub(&p.From, v, s, 1)
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 4
}
return 3
@ -670,10 +670,10 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return copysub(&p.To, v, s, 1)
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
}
@ -686,7 +686,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
* could be set/use depending on
* semantics
*/
func copyas(a *obj.Addr, v *obj.Addr) int {
func copyas(a *obj.Addr, v *obj.Addr) bool {
if i386.REG_AL <= a.Reg && a.Reg <= i386.REG_BL {
gc.Fatal("use of byte register")
}
@ -695,51 +695,51 @@ func copyas(a *obj.Addr, v *obj.Addr) int {
}
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
return 0
return false
}
if regtyp(v) != 0 {
return 1
if regtyp(v) {
return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return 1
return true
}
}
return 0
return false
}
func sameaddr(a *obj.Addr, v *obj.Addr) int {
func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
return 0
return false
}
if regtyp(v) != 0 {
return 1
if regtyp(v) {
return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
return 1
return true
}
}
return 0
return false
}
/*
* either direct or indirect
*/
func copyau(a *obj.Addr, v *obj.Addr) int {
if copyas(a, v) != 0 {
return 1
func copyau(a *obj.Addr, v *obj.Addr) bool {
if copyas(a, v) {
return true
}
if regtyp(v) != 0 {
if regtyp(v) {
if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
return 1
return true
}
if a.Index == v.Reg {
return 1
return true
}
}
return 0
return false
}
/*
@ -749,7 +749,7 @@ func copyau(a *obj.Addr, v *obj.Addr) int {
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
var reg int
if copyas(a, v) != 0 {
if copyas(a, v) {
reg = int(s.Reg)
if reg >= i386.REG_AX && reg <= i386.REG_DI || reg >= i386.REG_X0 && reg <= i386.REG_X7 {
if f != 0 {
@ -760,7 +760,7 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
return 0
}
if regtyp(v) != 0 {
if regtyp(v) {
reg = int(v.Reg)
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (s.Reg == i386.REG_BP) && a.Index != obj.TYPE_NONE {
@ -838,10 +838,10 @@ loop:
}
}
func smallindir(a *obj.Addr, reg *obj.Addr) int {
return bool2int(regtyp(reg) != 0 && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096)
func smallindir(a *obj.Addr, reg *obj.Addr) bool {
return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096
}
func stackaddr(a *obj.Addr) int {
return bool2int(a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP)
func stackaddr(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP
}

View file

@ -56,7 +56,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
if res.Op != gc.ONAME || !(res.Addable != 0) {
if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
@ -66,7 +66,7 @@ func cgen(n *gc.Node, res *gc.Node) {
goto ret
case gc.OEFACE:
if res.Op != gc.ONAME || !(res.Addable != 0) {
if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
@ -88,7 +88,7 @@ func cgen(n *gc.Node, res *gc.Node) {
}
}
if gc.Isfat(n.Type) != 0 {
if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
@ -96,7 +96,7 @@ func cgen(n *gc.Node, res *gc.Node) {
goto ret
}
if !(res.Addable != 0) {
if res.Addable == 0 {
if n.Ullman > res.Ullman {
regalloc(&n1, n.Type, res)
cgen(n, &n1)
@ -115,7 +115,7 @@ func cgen(n *gc.Node, res *gc.Node) {
goto gen
}
if gc.Complexop(n, res) != 0 {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
}
@ -123,7 +123,7 @@ func cgen(n *gc.Node, res *gc.Node) {
f = 1 // gen thru register
switch n.Op {
case gc.OLITERAL:
if gc.Smallintconst(n) != 0 {
if gc.Smallintconst(n) {
f = 0
}
@ -131,9 +131,9 @@ func cgen(n *gc.Node, res *gc.Node) {
f = 0
}
if !(gc.Iscomplex[n.Type.Etype] != 0) {
if gc.Iscomplex[n.Type.Etype] == 0 {
a = optoas(gc.OAS, res.Type)
if sudoaddable(a, res, &addr) != 0 {
if sudoaddable(a, res, &addr) {
if f != 0 {
regalloc(&n2, res.Type, nil)
cgen(n, &n2)
@ -164,12 +164,12 @@ func cgen(n *gc.Node, res *gc.Node) {
switch n.Op {
case gc.OSPTR,
gc.OLEN:
if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
if gc.Isslice(n.Left.Type) != 0 {
if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
@ -177,7 +177,7 @@ func cgen(n *gc.Node, res *gc.Node) {
n.Addable = n.Left.Addable
}
if gc.Complexop(n, res) != 0 {
if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
}
@ -210,9 +210,9 @@ func cgen(n *gc.Node, res *gc.Node) {
}
}
if !(gc.Iscomplex[n.Type.Etype] != 0) {
if gc.Iscomplex[n.Type.Etype] == 0 {
a = optoas(gc.OAS, n.Type)
if sudoaddable(a, n, &addr) != 0 {
if sudoaddable(a, n, &addr) {
if res.Op == gc.OREGISTER {
p1 = gins(a, nil, res)
p1.From = addr
@ -251,11 +251,11 @@ func cgen(n *gc.Node, res *gc.Node) {
p1 = gc.Gbranch(ppc64.ABR, nil, 0)
p2 = gc.Pc
gmove(gc.Nodbool(1), res)
gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(ppc64.ABR, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
gmove(gc.Nodbool(0), res)
gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
@ -358,7 +358,7 @@ func cgen(n *gc.Node, res *gc.Node) {
// pointer is the first word of string or slice.
case gc.OSPTR:
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 = gins(ppc64.AMOVD, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
@ -373,7 +373,7 @@ func cgen(n *gc.Node, res *gc.Node) {
regfree(&n1)
case gc.OLEN:
if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map and chan have len in the first int-sized word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
@ -396,7 +396,7 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
// a zero pointer means zero length
igen(nl, &n1, res)
@ -411,7 +411,7 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
if gc.Istype(nl.Type, gc.TCHAN) != 0 {
if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second int-sized word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
@ -435,7 +435,7 @@ func cgen(n *gc.Node, res *gc.Node) {
break
}
if gc.Isslice(nl.Type) != 0 {
if gc.Isslice(nl.Type) {
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
n1.Xoffset += int64(gc.Array_cap)
@ -447,11 +447,11 @@ func cgen(n *gc.Node, res *gc.Node) {
gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OADDR:
if n.Bounded != 0 { // let race detector avoid nil checks
if n.Bounded { // let race detector avoid nil checks
gc.Disable_checknil++
}
agen(nl, res)
if n.Bounded != 0 {
if n.Bounded {
gc.Disable_checknil--
}
@ -480,7 +480,7 @@ func cgen(n *gc.Node, res *gc.Node) {
cgen_div(int(n.Op), &n1, nr, res)
regfree(&n1)
} else {
if !(gc.Smallintconst(nr) != 0) {
if !gc.Smallintconst(nr) {
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
} else {
@ -496,7 +496,7 @@ func cgen(n *gc.Node, res *gc.Node) {
case gc.OLSH,
gc.ORSH,
gc.OLROT:
cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
goto ret
@ -518,7 +518,7 @@ func cgen(n *gc.Node, res *gc.Node) {
* register for the computation.
*/
sbop: // symmetric binary
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) != 0 || (nr.Op == gc.OLITERAL && !(gc.Smallintconst(nr) != 0)))) {
if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
r = nl
nl = nr
nr = r
@ -612,7 +612,7 @@ func cgenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gc.Dump("cgenr-n", n)
}
if gc.Isfat(n.Type) != 0 {
if gc.Isfat(n.Type) {
gc.Fatal("cgenr on fat node")
}
@ -688,33 +688,33 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
//bounded = debug['B'] || n->bounded;
if nr.Addable != 0 {
if !(gc.Isconst(nr, gc.CTINT) != 0) {
if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT64])
}
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
if !(gc.Isconst(nr, gc.CTINT) != 0) {
if !gc.Isconst(nr, gc.CTINT) {
cgen(nr, &tmp)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
} else if nl.Addable != 0 {
if !(gc.Isconst(nr, gc.CTINT) != 0) {
if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(nr, &tmp)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
} else {
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(nr, &tmp)
nr = &tmp
if !(gc.Isconst(nl, gc.CTSTR) != 0) {
if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
@ -726,13 +726,13 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
// w is width
// constant index
if gc.Isconst(nr, gc.CTINT) != 0 {
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nr, gc.CTINT) {
if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index")
}
v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
if gc.Debug['B'] == 0 && !n.Bounded {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
@ -765,11 +765,11 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gmove(&n1, &n2)
regfree(&n1)
if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval.S)))
} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
@ -799,12 +799,12 @@ func agenr(n *gc.Node, a *gc.Node, res *gc.Node) {
gc.Patch(p1, gc.Pc)
}
if gc.Isconst(nl, gc.CTSTR) != 0 {
if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 = gins(ppc64.AMOVD, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
p1.From.Type = obj.TYPE_ADDR
} else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
} else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
@ -872,7 +872,7 @@ func agen(n *gc.Node, res *gc.Node) {
n = n.Left
}
if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
@ -950,7 +950,7 @@ func agen(n *gc.Node, res *gc.Node) {
}
// should only get here for heap vars or paramref
if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME class %#x", n.Class)
}
@ -1060,10 +1060,10 @@ func igen(n *gc.Node, a *gc.Node, res *gc.Node) {
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
if gc.Isconst(n.Right, gc.CTINT) != 0 {
if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
if !(gc.Isptr[n.Left.Type.Etype] != 0) {
if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
igen(n.Left, &n1, res)
@ -1112,7 +1112,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
}
if n == nil {
n = gc.Nodbool(1)
n = gc.Nodbool(true)
}
if n.Ninit != nil {
@ -1158,7 +1158,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
// need to ask if it is bool?
case gc.OLITERAL:
if !true_ == !(n.Val.U.Bval != 0) {
if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
}
goto ret
@ -1241,7 +1241,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
nr = r
}
if gc.Isslice(nl.Type) != 0 {
if gc.Isslice(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal slice comparison")
@ -1262,7 +1262,7 @@ func bgen(n *gc.Node, true_ bool, likely int, to *obj.Prog) {
break
}
if gc.Isinter(nl.Type) != 0 {
if gc.Isinter(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
@ -1376,14 +1376,14 @@ func stkof(n *gc.Node) int64 {
case gc.OINDEX:
t = n.Left.Type
if !(gc.Isfixedarray(t) != 0) {
if !gc.Isfixedarray(t) {
break
}
off = stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
if gc.Isconst(n.Right, gc.CTINT) != 0 {
if gc.Isconst(n.Right, gc.CTINT) {
return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
}
return 1000
@ -1473,7 +1473,6 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
switch align {
default:
gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
fallthrough
case 1:
op = ppc64.AMOVBU
@ -1598,7 +1597,7 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
for {
tmp14 := c
c--
if !(tmp14 > 0) {
if tmp14 <= 0 {
break
}
@ -1617,19 +1616,19 @@ func sgen(n *gc.Node, ns *gc.Node, w int64) {
regfree(&tmp)
}
func cadable(n *gc.Node) int {
if !(n.Addable != 0) {
func cadable(n *gc.Node) bool {
if n.Addable == 0 {
// dont know how it happens,
// but it does
return 0
return false
}
switch n.Op {
case gc.ONAME:
return 1
return true
}
return 0
return false
}
/*
@ -1640,7 +1639,7 @@ func cadable(n *gc.Node) int {
* nr is N when assigning a zero value.
* return 1 if can do, 0 if can't.
*/
func componentgen(nr *gc.Node, nl *gc.Node) int {
func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
var tmp gc.Node
@ -1662,12 +1661,12 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
t = nl.Type
// Slices are ok.
if gc.Isslice(t) != 0 {
if gc.Isslice(t) {
break
}
// Small arrays are ok.
if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
break
}
@ -1679,7 +1678,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
fldcount = 0
for t = nl.Type.Type; t != nil; t = t.Down {
if gc.Isfat(t.Type) != 0 {
if gc.Isfat(t.Type) {
goto no
}
if t.Etype != gc.TFIELD {
@ -1698,8 +1697,8 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
}
nodl = *nl
if !(cadable(nl) != 0) {
if nr != nil && !(cadable(nr) != 0) {
if !cadable(nl) {
if nr != nil && !cadable(nr) {
goto no
}
igen(nl, &nodl, nil)
@ -1708,7 +1707,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
if nr != nil {
nodr = *nr
if !(cadable(nr) != 0) {
if !cadable(nr) {
igen(nr, &nodr, nil)
freer = 1
}
@ -1736,7 +1735,7 @@ func componentgen(nr *gc.Node, nl *gc.Node) int {
gc.Gvardef(nl)
}
t = nl.Type
if !(gc.Isslice(t) != 0) {
if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount = 0; fldcount < t.Bound; fldcount++ {
@ -1876,7 +1875,7 @@ no:
if freel != 0 {
regfree(&nodl)
}
return 0
return false
yes:
if freer != 0 {
@ -1885,5 +1884,5 @@ yes:
if freel != 0 {
regfree(&nodl)
}
return 1
return true
}

View file

@ -37,7 +37,7 @@ func defframe(ptxt *obj.Prog) {
// iterate through declarations - they are sorted in decreasing xoffset order.
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
if !(n.Needzero != 0) {
if n.Needzero == 0 {
continue
}
if n.Class != gc.PAUTO {
@ -187,7 +187,7 @@ func ginscall(f *gc.Node, proc int) {
p = gins(ppc64.ABL, nil, f)
gc.Afunclit(&p.To, f)
if proc == -1 || gc.Noreturn(p) != 0 {
if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
@ -226,7 +226,7 @@ func ginscall(f *gc.Node, proc int) {
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
if !(gc.Hasdefer != 0) {
if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
@ -270,7 +270,7 @@ func cgen_callinter(n *gc.Node, res *gc.Node, proc int) {
i = i.Left // interface
if !(i.Addable != 0) {
if i.Addable == 0 {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
@ -503,9 +503,9 @@ func dodiv(op int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
check = 0
if gc.Issigned[t.Etype] != 0 {
check = 1
if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
check = 0
} else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
} else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
@ -723,7 +723,7 @@ longmod:
// use 2-operand 16-bit multiply
// because there is no 2-operand 8-bit multiply
//a = AIMULW;
if !(gc.Smallintconst(nr) != 0) {
if !gc.Smallintconst(nr) {
regalloc(&n3, nl.Type, nil)
cgen(nr, &n3)
gins(a, &n3, &n2)
@ -799,7 +799,7 @@ func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
* res = nl << nr
* res = nl >> nr
*/
func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
@ -869,7 +869,7 @@ func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
regfree(&n3)
// test and fix up large shifts
if !(bounded != 0) {
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)

View file

@ -93,7 +93,7 @@ func gclean() {
}
}
func anyregalloc() int {
func anyregalloc() bool {
var i int
var j int
@ -106,11 +106,11 @@ func anyregalloc() int {
goto ok
}
}
return 1
return true
ok:
}
return 0
return false
}
/*
@ -176,7 +176,6 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
}
gc.Fatal("out of fixed registers")
fallthrough
case gc.TFLOAT32,
gc.TFLOAT64:
@ -199,7 +198,6 @@ func regalloc(n *gc.Node, t *gc.Type, o *gc.Node) {
fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
}
gc.Fatal("out of floating registers")
fallthrough
case gc.TCOMPLEX64,
gc.TCOMPLEX128:
@ -277,7 +275,6 @@ func ginscon2(as int, n2 *gc.Node, c int64) {
switch as {
default:
gc.Fatal("ginscon2")
fallthrough
case ppc64.ACMP:
if -ppc64.BIG <= c && c <= ppc64.BIG {
@ -356,7 +353,7 @@ func gmove(f *gc.Node, t *gc.Node) {
}
// cannot have two memory operands
if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
@ -391,7 +388,7 @@ func gmove(f *gc.Node, t *gc.Node) {
ft = tt // so big switch will choose a simple mov
// constants can't move directly to memory.
if gc.Ismem(t) != 0 {
if gc.Ismem(t) {
goto hard
}
}
@ -422,7 +419,6 @@ func gmove(f *gc.Node, t *gc.Node) {
switch uint32(ft)<<16 | uint32(tt) {
default:
gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
fallthrough
/*
* integer copy and truncate
@ -1140,10 +1136,10 @@ const (
OAddable = 1 << 1
)
func xgen(n *gc.Node, a *gc.Node, o int) int {
func xgen(n *gc.Node, a *gc.Node, o int) bool {
// TODO(minux)
return -1
return -1 != 0 /*TypeKind(100016)*/
}
func sudoclean() {
@ -1161,9 +1157,9 @@ func sudoclean() {
* after successful sudoaddable,
* to release the register used for a.
*/
func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
// TODO(minux)
*a = obj.Addr{}
return 0
return false
}

View file

@ -69,14 +69,14 @@ loop1:
// breaking moves that do care. This might let us
// simplify or remove the next peep loop, too.
if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
if regtyp(&p.To) != 0 {
if regtyp(&p.To) {
// Try to eliminate reg->reg moves
if regtyp(&p.From) != 0 {
if regtyp(&p.From) {
if p.From.Type == p.To.Type {
if copyprop(r) != 0 {
if copyprop(r) {
excise(r)
t++
} else if subprop(r) != 0 && copyprop(r) != 0 {
} else if subprop(r) && copyprop(r) {
excise(r)
t++
}
@ -89,10 +89,10 @@ loop1:
if p.To.Type == obj.TYPE_REG {
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
if copyprop(r) != 0 {
if copyprop(r) {
excise(r)
t++
} else if subprop(r) != 0 && copyprop(r) != 0 {
} else if subprop(r) && copyprop(r) {
excise(r)
t++
}
@ -156,7 +156,7 @@ loop1:
switch p.As {
case ppc64.ACMP,
ppc64.ACMPW: /* always safe? */
if !(regzer(&p.To) != 0) {
if regzer(&p.To) == 0 {
continue
}
r1 = r.S1
@ -186,7 +186,7 @@ loop1:
r1 = r
for {
r1 = gc.Uniqp(r1)
if !(r1 != nil && r1.Prog.As == obj.ANOP) {
if r1 == nil || r1.Prog.As != obj.ANOP {
break
}
}
@ -379,9 +379,9 @@ func regzer(a *obj.Addr) int {
return 0
}
func regtyp(a *obj.Addr) int {
func regtyp(a *obj.Addr) bool {
// TODO(rsc): Floating point register exclusions?
return bool2int(a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO)
return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
}
/*
@ -401,7 +401,7 @@ func regtyp(a *obj.Addr) int {
* r0 (the argument, not the register) is the MOV at the end of the
* above sequences. This returns 1 if it modified any instructions.
*/
func subprop(r0 *gc.Flow) int {
func subprop(r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
@ -411,12 +411,12 @@ func subprop(r0 *gc.Flow) int {
p = r0.Prog
v1 = &p.From
if !(regtyp(v1) != 0) {
return 0
if !regtyp(v1) {
return false
}
v2 = &p.To
if !(regtyp(v2) != 0) {
return 0
if !regtyp(v2) {
return false
}
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
@ -428,7 +428,7 @@ func subprop(r0 *gc.Flow) int {
}
proginfo(&info, p)
if info.Flags&gc.Call != 0 {
return 0
return false
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
@ -439,7 +439,7 @@ func subprop(r0 *gc.Flow) int {
}
}
if copyau(&p.From, v2) != 0 || copyau1(p, v2) != 0 || copyau(&p.To, v2) != 0 {
if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
break
}
if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
@ -447,7 +447,7 @@ func subprop(r0 *gc.Flow) int {
}
}
return 0
return false
gotit:
copysub(&p.To, v1, v2, 1)
@ -475,7 +475,7 @@ gotit:
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
return 1
return true
}
/*
@ -490,7 +490,7 @@ gotit:
* set v1 F=1
* set v2 return success (caller can remove v1->v2 move)
*/
func copyprop(r0 *gc.Flow) int {
func copyprop(r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
@ -498,11 +498,11 @@ func copyprop(r0 *gc.Flow) int {
p = r0.Prog
v1 = &p.From
v2 = &p.To
if copyas(v1, v2) != 0 {
if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move\n", r0.Prog)
}
return 1
return true
}
gactive++
@ -514,7 +514,7 @@ func copyprop(r0 *gc.Flow) int {
// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
// all uses were rewritten.
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
var t int
var p *obj.Prog
@ -522,7 +522,7 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
}
return 1
return true
}
r.Active = int32(gactive)
@ -534,7 +534,7 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
if !(f != 0) && gc.Uniqp(r) == nil {
if f == 0 && gc.Uniqp(r) == nil {
// Multiple predecessors; conservatively
// assume v1 was set on other path
f = 1
@ -550,33 +550,33 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
}
return 0
return false
case 3: /* set */
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
}
return 1
return true
case 1, /* used, substitute */
4: /* use and set */
if f != 0 {
if !(gc.Debug['P'] != 0) {
return 0
if gc.Debug['P'] == 0 {
return false
}
if t == 4 {
fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
} else {
fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
}
return 0
return false
}
if copyu(p, v2, v1) != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub fail; return 0\n")
}
return 0
return false
}
if gc.Debug['P'] != 0 {
@ -586,13 +586,13 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
}
return 1
return true
}
}
if !(f != 0) {
if f == 0 {
t = copyu(p, v1, nil)
if !(f != 0) && (t == 2 || t == 3 || t == 4) {
if f == 0 && (t == 2 || t == 3 || t == 4) {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
@ -604,13 +604,13 @@ func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
fmt.Printf("\n")
}
if r.S2 != nil {
if !(copy1(v1, v2, r.S2, f) != 0) {
return 0
if !copy1(v1, v2, r.S2, f) {
return false
}
}
}
return 1
return true
}
// If s==nil, copyu returns the set/use of v in p; otherwise, it
@ -671,7 +671,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
}
// Update only indirect uses of v in p->to
if !(copyas(&p.To, v) != 0) {
if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
@ -679,21 +679,21 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
// Fix up implicit from
if p.From.Type == obj.TYPE_NONE {
p.From = p.To
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 4
}
return 3
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
// p->to only indirectly uses v
return 1
}
@ -707,7 +707,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
ppc64.AMOVWZU,
ppc64.AMOVDU:
if p.From.Type == obj.TYPE_MEM {
if copyas(&p.From, v) != 0 {
if copyas(&p.From, v) {
// No s!=nil check; need to fail
// anyway in that case
return 2
@ -720,11 +720,11 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
return 3
}
} else if p.To.Type == obj.TYPE_MEM {
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
return 2
}
if s != nil {
@ -734,7 +734,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
} else {
@ -745,7 +745,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
ppc64.ARLWMICC:
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
return 2
}
fallthrough
@ -806,7 +806,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
}
// Update only indirect uses of v in p->to
if !(copyas(&p.To, v) != 0) {
if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
@ -814,7 +814,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyas(&p.To, v) != 0 {
if copyas(&p.To, v) {
if p.Reg == 0 {
// Fix up implicit reg (e.g., ADD
// R3,R4 -> ADD R3,R4,R4) so we can
@ -822,22 +822,22 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
p.Reg = p.To.Reg
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 4
}
if copyau1(p, v) != 0 {
if copyau1(p, v) {
return 4
}
return 3
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
if copyau1(p, v) != 0 {
if copyau1(p, v) {
return 1
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
return 0
@ -866,10 +866,10 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return copysub(&p.To, v, s, 1)
}
if copyau(&p.From, v) != 0 {
if copyau(&p.From, v) {
return 1
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
return 0
@ -885,7 +885,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 1
}
return 0
@ -928,7 +928,7 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
return 0
}
if copyau(&p.To, v) != 0 {
if copyau(&p.To, v) {
return 4
}
return 3
@ -982,15 +982,15 @@ func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
// If a is the from operand, this means this operation reads the
// register in v. If a is the to operand, this means this operation
// writes the register in v.
func copyas(a *obj.Addr, v *obj.Addr) int {
if regtyp(v) != 0 {
func copyas(a *obj.Addr, v *obj.Addr) bool {
if regtyp(v) {
if a.Type == v.Type {
if a.Reg == v.Reg {
return 1
return true
}
}
}
return 0
return false
}
// copyau returns 1 if a either directly or indirectly addresses the
@ -1000,36 +1000,36 @@ func copyas(a *obj.Addr, v *obj.Addr) int {
// register in v. If a is the to operand, this means the operation
// either reads or writes the register in v (if !copyas(a, v), then
// the operation reads the register in v).
func copyau(a *obj.Addr, v *obj.Addr) int {
if copyas(a, v) != 0 {
return 1
func copyau(a *obj.Addr, v *obj.Addr) bool {
if copyas(a, v) {
return true
}
if v.Type == obj.TYPE_REG {
if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
if v.Reg == a.Reg {
return 1
return true
}
}
}
return 0
return false
}
// copyau1 returns 1 if p->reg references the same register as v and v
// is a direct reference.
func copyau1(p *obj.Prog, v *obj.Addr) int {
if regtyp(v) != 0 && v.Reg != 0 {
func copyau1(p *obj.Prog, v *obj.Addr) bool {
if regtyp(v) && v.Reg != 0 {
if p.Reg == v.Reg {
return 1
return true
}
}
return 0
return false
}
// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
// Returns 1 on failure to substitute (it always succeeds on ppc64).
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
if copyau(a, v) != 0 {
if copyau(a, v) {
a.Reg = s.Reg
}
}
@ -1040,32 +1040,32 @@ func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
// Returns 1 on failure to substitute (it always succeeds on ppc64).
func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
if copyau1(p1, v) != 0 {
if copyau1(p1, v) {
p1.Reg = s.Reg
}
}
return 0
}
func sameaddr(a *obj.Addr, v *obj.Addr) int {
func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if a.Type != v.Type {
return 0
return false
}
if regtyp(v) != 0 && a.Reg == v.Reg {
return 1
if regtyp(v) && a.Reg == v.Reg {
return true
}
if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
if v.Offset == a.Offset {
return 1
return true
}
}
return 0
return false
}
func smallindir(a *obj.Addr, reg *obj.Addr) int {
return bool2int(reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096)
func smallindir(a *obj.Addr, reg *obj.Addr) bool {
return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
}
func stackaddr(a *obj.Addr) int {
return bool2int(a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP)
func stackaddr(a *obj.Addr) bool {
return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
}