[dev.typeparams] all: merge dev.regabi (37f138d) into dev.typeparams

Conflicts:

* src/cmd/compile/fmtmap_test.go
* src/cmd/compile/internal/gc/go.go
* src/cmd/compile/internal/gc/main.go
* src/cmd/compile/internal/noder/noder.go

Merge List:

* 2020-12-23 37f138df6b [dev.regabi] cmd/compile: split out package test [generated]
* 2020-12-23 3d8a3cb06b [dev.regabi] cmd/compile: split out package pkginit [generated]
* 2020-12-23 3f04d964ab [dev.regabi] cmd/compile: split up walkexpr1, walkstmt [generated]
* 2020-12-23 e4895ab4c0 [dev.regabi] cmd/compile: split out package walk [generated]
* 2020-12-23 01fd2d05c8 [dev.regabi] cmd/compile: split out package dwarfgen [generated]
* 2020-12-23 6c34d2f420 [dev.regabi] cmd/compile: split out package ssagen [generated]
* 2020-12-23 de65151e50 [dev.regabi] cmd/compile: split out package reflectdata [generated]
* 2020-12-23 4dfb5d91a8 [dev.regabi] cmd/compile: split out package staticdata [generated]
* 2020-12-23 fbc82f03b1 [dev.regabi] cmd/compile: split out package noder [generated]
* 2020-12-23 de454eef5f [dev.regabi] cmd/compile: split out package escape [generated]
* 2020-12-23 071ab0a14c [dev.regabi] cmd/compile: split out package liveness [generated]
* 2020-12-23 0ced54062e [dev.regabi] cmd/compile: split out package objw [generated]
* 2020-12-23 575fd6ff0a [dev.regabi] cmd/compile: split out package inline [generated]
* 2020-12-23 0256ba99a8 [dev.regabi] cmd/compile: split up typecheck1 [generated]
* 2020-12-23 b9693d7627 [dev.regabi] cmd/compile: split out package typecheck [generated]
* 2020-12-23 dac0de3748 [dev.regabi] cmd/compile: move type size calculations into package types [generated]
* 2020-12-23 527a1895d6 [dev.regabi] cmd/compile: move helpers into package ir [generated]
* 2020-12-23 65c4c6dfb2 [dev.regabi] cmd/compile: group known symbols, packages, names [generated]
* 2020-12-23 9ee309255a [dev.regabi] cmd/compile: move helpers into package types [generated]
* 2020-12-23 ead4957892 [dev.regabi] cmd/compile: move helpers into package base [generated]
* 2020-12-23 440308ffd7 [dev.regabi] cmd/compile: simplify Nodes usage [generated]
* 2020-12-23 f9d373720e [dev.regabi] cmd/compile: remove Left, Right etc methods [generated]
* 2020-12-23 14d667341f [dev.regabi] cmd/compile: remove Node.Left etc [generated]
* 2020-12-23 6f27d29be0 [dev.regabi] cmd/compile: remove ir.Nod [generated]
* 2020-12-23 69cf39089f [dev.regabi] cmd/compile: do not die in early base.FlushErrors

Change-Id: Ic4686e77c6ee38b3cd7d37fc7f3e93aaa9017b7a
This commit is contained in:
Russ Cox 2020-12-23 01:43:22 -05:00
commit d99dd17827
207 changed files with 23461 additions and 22887 deletions

View file

@ -57,7 +57,6 @@ var knownFormats = map[string]string{
"[]cmd/compile/internal/types2.Type %s": "",
"cmd/compile/internal/arm.shift %d": "",
"cmd/compile/internal/gc.RegIndex %d": "",
"cmd/compile/internal/gc.initKind %d": "",
"cmd/compile/internal/ir.Class %d": "",
"cmd/compile/internal/ir.Node %+v": "",
"cmd/compile/internal/ir.Node %L": "",
@ -93,6 +92,7 @@ var knownFormats = map[string]string{
"cmd/compile/internal/types2.Object %s": "",
"cmd/compile/internal/types2.Type %s": "",
"cmd/compile/internal/types2.color %s": "",
"cmd/compile/internal/walk.initKind %d": "",
"go/constant.Value %#v": "",
"go/constant.Value %s": "",
"map[*cmd/compile/internal/types2.TypeParam]cmd/compile/internal/types2.Type %s": "",

View file

@ -5,13 +5,13 @@
package amd64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/x86"
)
var leaptr = x86.ALEAQ
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &x86.Linkamd64
arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50

View file

@ -6,7 +6,9 @@ package amd64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
@ -52,7 +54,7 @@ func dzDI(b int64) int64 {
return -dzClearStep * (dzBlockLen - tailSteps)
}
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
const (
ax = 1 << iota
x0
@ -62,67 +64,67 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
return p
}
if cnt%int64(gc.Widthreg) != 0 {
if cnt%int64(types.RegSize) != 0 {
// should only happen with nacl
if cnt%int64(gc.Widthptr) != 0 {
if cnt%int64(types.PtrSize) != 0 {
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Appendpp(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
off += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
off += int64(types.PtrSize)
cnt -= int64(types.PtrSize)
}
if cnt == 8 {
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*gc.Widthreg) {
p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off)
} else if !isPlan9 && cnt <= int64(8*types.RegSize) {
if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
for i := int64(0); i < cnt/16; i++ {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
}
if cnt%16 != 0 {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
}
} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
} else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
if *state&x0 == 0 {
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
p = pp.Append(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
*state |= x0
}
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = gc.Duffzero
p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
p.To.Sym = ir.Syms.Duffzero
if cnt%16 != 0 {
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
}
} else {
if *state&ax == 0 {
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
*state |= ax
}
p = pp.Appendpp(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(gc.Widthreg), obj.TYPE_REG, x86.REG_CX, 0)
p = pp.Appendpp(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Appendpp(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Appendpp(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
// This is a hardware nop (1-byte 0x90) instruction,
// even though we describe it as an explicit XCHGL here.
// Particularly, this does not zero the high 32 bits

View file

@ -9,16 +9,17 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
@ -111,7 +112,7 @@ func moveByType(t *types.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@ -165,7 +166,7 @@ func duff(size int64) (int64, int64) {
return off, adj
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpAMD64VFMADD231SD:
p := s.Prog(v.Op.Asm())
@ -631,12 +632,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = o
}
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
@ -672,7 +673,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
@ -680,20 +681,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux2(&p.From, v, sc.Off())
ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[2].Reg()
case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
sc := v.AuxValAndOff()
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux2(&p.From, v, sc.Off())
ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
@ -733,14 +734,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
@ -752,7 +753,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
@ -764,7 +765,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
memIdx(&p.To, v)
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
@ -787,7 +788,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(asm)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
@ -802,7 +803,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := s.Prog(v.Op.Asm())
@ -811,7 +812,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
@ -836,7 +837,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_NONE
}
memIdx(&p.To, v)
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
@ -866,7 +867,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@ -892,7 +893,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@ -912,7 +913,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffzero
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = off
case ssa.OpAMD64MOVOconst:
if v.AuxInt != 0 {
@ -923,7 +924,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_ADDR
p.To.Sym = gc.Duffcopy
p.To.Sym = ir.Syms.Duffcopy
if v.AuxInt%16 != 0 {
v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
}
@ -950,7 +951,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -962,16 +963,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpAMD64LoweredHasCPUFeature:
p := s.Prog(x86.AMOVBQZX)
p.From.Type = obj.TYPE_MEM
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64LoweredGetClosurePtr:
// Closure pointer is DX.
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
@ -1013,7 +1014,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpAMD64LoweredGetCallerSP:
// caller's SP is the address of the first arg
mov := x86.AMOVQ
if gc.Widthptr == 4 {
if types.PtrSize == 4 {
mov = x86.AMOVL
}
p := s.Prog(mov)
@ -1028,14 +1029,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
// arg0 is in DI. Set sym to match where regalloc put arg1.
p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * gc.Widthptr)) // space used in callee args area by assembly stubs
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
@ -1116,7 +1117,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64SETNEF:
p := s.Prog(v.Op.Asm())
@ -1172,7 +1173,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
@ -1185,7 +1186,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
r := v.Reg0()
if r != v.Args[0].Reg() {
@ -1197,7 +1198,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString())
@ -1208,7 +1209,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
@ -1219,20 +1220,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p.To.Offset += 4
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
@ -1258,22 +1259,22 @@ var blockJump = [...]struct {
ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
}
var eqfJumps = [2][2]gc.IndexJump{
var eqfJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
}
var nefJumps = [2][2]gc.IndexJump{
var nefJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in rax:
@ -1286,11 +1287,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.To.Reg = x86.REG_AX
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,13 +5,13 @@
package arm
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/arm"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm.Linkarm
arch.REGSP = arm.REGSP
arch.MAXWIDTH = (1 << 32) - 1
@ -20,7 +20,7 @@ func Init(arch *gc.Arch) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -5,49 +5,51 @@
package arm
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
)
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if *r0 == 0 {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
*r0 = 1
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
} else if cnt <= int64(128*types.PtrSize) {
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else {
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
p.Reg = arm.REGSP
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
p.Reg = arm.REG_R1
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
p1 := p
p.Scond |= arm.C_PBIT
p = pp.Appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm.REG_R2
p = pp.Appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
p.To.SetTarget(p1)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm.AAND)
p.From.Type = obj.TYPE_REG
p.From.Reg = arm.REG_R0

View file

@ -10,10 +10,10 @@ import (
"math/bits"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
@ -93,7 +93,7 @@ func makeshift(reg int16, typ int64, s int64) shift {
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeshift(r1, typ, n))
@ -111,7 +111,7 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift {
}
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeregshift(r1, typ, r2))
@ -145,7 +145,7 @@ func getBFC(v uint32) (uint32, uint32) {
return 0xffffffff, 0
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARMMOVWreg:
if v.Type.IsMemory() {
@ -183,7 +183,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -194,7 +194,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpARMADD,
ssa.OpARMADC,
ssa.OpARMSUB,
@ -545,10 +545,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
@ -568,7 +568,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMMOVBstore,
@ -581,7 +581,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
// this is just shift 0 bits
fallthrough
@ -702,7 +702,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Udiv
p.To.Sym = ir.Syms.Udiv
case ssa.OpARMLoweredWB:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
@ -712,32 +712,32 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.OpARMDUFFZERO:
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARMDUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARMLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
if logopt.Enabled() {
@ -779,7 +779,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.Reg = arm.REG_R1
p3 := s.Prog(arm.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpARMLoweredMove:
// MOVW.P 4(R1), Rtmp
// MOVW.P Rtmp, 4(R2)
@ -820,7 +820,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.Reg = arm.REG_R1
p4 := s.Prog(arm.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p4.To.SetTarget(p)
case ssa.OpARMEqual,
ssa.OpARMNotEqual,
ssa.OpARMLessThan,
@ -846,7 +846,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpARMLoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
@ -901,24 +901,24 @@ var blockJump = map[ssa.BlockKind]struct {
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
var leJumps = [2][2]gc.IndexJump{
var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
var gtJumps = [2][2]gc.IndexJump{
var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
@ -931,11 +931,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.Reg = arm.REG_R0
p = s.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:

View file

@ -5,12 +5,12 @@
package arm64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/arm64"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm64.Linkarm64
arch.REGSP = arm64.REGSP
arch.MAXWIDTH = 1 << 50
@ -20,7 +20,7 @@ func Init(arch *gc.Arch) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -5,7 +5,9 @@
package arm64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
"cmd/internal/objabi"
@ -22,52 +24,52 @@ func padframe(frame int64) int64 {
return frame
}
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
if cnt%(2*int64(gc.Widthptr)) != 0 {
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
off += int64(gc.Widthptr)
cnt -= int64(gc.Widthptr)
} else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
if cnt%(2*int64(types.PtrSize)) != 0 {
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
off += int64(types.PtrSize)
cnt -= int64(types.PtrSize)
}
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
p.Reg = arm64.REG_R20
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (64 - cnt/(2*int64(gc.Widthptr)))
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
} else {
// Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
// We are at the function entry, where no register is live, so it is okay to clobber
// other registers
const rtmp = arm64.REG_R20
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
p.Reg = arm64.REGRT1
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
p = pp.Appendpp(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
p.Reg = arm64.REGRT1
p = pp.Appendpp(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(gc.Widthptr))
p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
p.Scond = arm64.C_XPRE
p1 := p
p = pp.Appendpp(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
p.Reg = arm64.REGRT2
p = pp.Appendpp(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
p.To.SetTarget(p1)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(arm64.AHINT)
p.From.Type = obj.TYPE_CONST
return p

View file

@ -8,10 +8,10 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
@ -83,7 +83,7 @@ func makeshift(reg int16, typ int64, s int64) int64 {
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = makeshift(r1, typ, n)
@ -112,7 +112,7 @@ func genIndexedOperand(v *ssa.Value) obj.Addr {
return mop
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARM64MOVDreg:
if v.Type.IsMemory() {
@ -150,7 +150,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -161,7 +161,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
@ -395,10 +395,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVD $off(SP), R
wantreg = "SP"
@ -419,7 +419,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64MOVBloadidx,
@ -446,7 +446,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpARM64MOVBstore,
@ -463,7 +463,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstoreidx,
ssa.OpARM64MOVHstoreidx,
ssa.OpARM64MOVWstoreidx,
@ -484,7 +484,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = int64(v.Args[2].Reg())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezero,
ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero,
@ -494,7 +494,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezeroidx,
ssa.OpARM64MOVHstorezeroidx,
ssa.OpARM64MOVWstorezeroidx,
@ -513,7 +513,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = int64(arm64.REGZERO)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64BFI,
ssa.OpARM64BFXIL:
r := v.Reg()
@ -582,7 +582,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.From.Type = obj.TYPE_REG
p2.From.Reg = arm64.REGTMP
p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p)
p2.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicExchange64Variant,
ssa.OpARM64LoweredAtomicExchange32Variant:
swap := arm64.ASWPALD
@ -636,7 +636,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAdd64Variant,
ssa.OpARM64LoweredAtomicAdd32Variant:
// LDADDAL Rarg1, (Rarg0), Rout
@ -700,13 +700,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Type = obj.TYPE_REG
p4.From.Reg = arm64.REGTMP
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p4.To.SetTarget(p)
p5 := s.Prog(arm64.ACSET)
p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
p5.From.Reg = arm64.COND_EQ
p5.To.Type = obj.TYPE_REG
p5.To.Reg = out
gc.Patch(p2, p5)
p2.To.SetTarget(p5)
case ssa.OpARM64LoweredAtomicCas64Variant,
ssa.OpARM64LoweredAtomicCas32Variant:
// Rarg0: ptr
@ -794,7 +794,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = arm64.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpARM64LoweredAtomicAnd8Variant,
ssa.OpARM64LoweredAtomicAnd32Variant:
atomic_clear := arm64.ALDCLRALW
@ -961,7 +961,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredZero:
// STP.P (ZR,ZR), 16(R16)
@ -982,12 +982,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.Reg = arm64.REG_R16
p3 := s.Prog(arm64.ABLE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpARM64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpARM64LoweredMove:
// MOVD.P 8(R16), Rtmp
@ -1015,7 +1015,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.Reg = arm64.REG_R16
p4 := s.Prog(arm64.ABLE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p4.To.SetTarget(p)
case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
s.Call(v)
case ssa.OpARM64LoweredWB:
@ -1027,14 +1027,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
if logopt.Enabled() {
@ -1065,7 +1065,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARM64LoweredGetClosurePtr:
// Closure pointer is R26 (arm64.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpARM64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
@ -1134,24 +1134,24 @@ var blockJump = map[ssa.BlockKind]struct {
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
var leJumps = [2][2]gc.IndexJump{
var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
var gtJumps = [2][2]gc.IndexJump{
var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
@ -1164,11 +1164,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.Reg = arm64.REG_R0
p = s.Prog(arm64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:

View file

@ -26,3 +26,54 @@ func Exit(code int) {
}
os.Exit(code)
}
// To enable tracing support (-t flag), set EnableTrace to true.
const EnableTrace = false
func Compiling(pkgs []string) bool {
if Ctxt.Pkgpath != "" {
for _, p := range pkgs {
if Ctxt.Pkgpath == p {
return true
}
}
}
return false
}
// The racewalk pass is currently handled in three parts.
//
// First, for flag_race, it inserts calls to racefuncenter and
// racefuncexit at the start and end (respectively) of each
// function. This is handled below.
//
// Second, during buildssa, it inserts appropriate instrumentation
// calls immediately before each memory load or store. This is handled
// by the (*state).instrument method in ssa.go, so here we just set
// the Func.InstrumentBody flag as needed. For background on why this
// is done during SSA construction rather than a separate SSA pass,
// see issue #19054.
//
// Third we remove calls to racefuncenter and racefuncexit, for leaf
// functions without instrumented operations. This is done as part of
// ssa opt pass via special rule.
// TODO(dvyukov): do not instrument initialization as writes:
// a := make([]int, 10)
// Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion.
var NoInstrumentPkgs = []string{
"runtime/internal/atomic",
"runtime/internal/sys",
"runtime/internal/math",
"runtime",
"runtime/race",
"runtime/msan",
"internal/cpu",
}
// Don't insert racefuncenterfp/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives.
var NoRacePkgs = []string{"sync", "sync/atomic"}

View file

@ -131,6 +131,9 @@ type CmdFlags struct {
ImportMap map[string]string // set by -importmap OR -importcfg
PackageFile map[string]string // set by -importcfg; nil means not in use
SpectreIndex bool // set by -spectre=index or -spectre=all
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
Instrumenting bool
}
}

View file

@ -73,7 +73,9 @@ func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
// FlushErrors sorts errors seen so far by line number, prints them to stdout,
// and empties the errors array.
func FlushErrors() {
if Ctxt != nil && Ctxt.Bso != nil {
Ctxt.Bso.Flush()
}
if len(errorMsgs) == 0 {
return
}
@ -258,3 +260,5 @@ func ExitIfErrors() {
ErrorExit()
}
}
var AutogeneratedPos src.XPos

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package base
import (
"fmt"
@ -11,7 +11,7 @@ import (
"time"
)
var timings Timings
var Timer Timings
// Timings collects the execution times of labeled phases
// which are added trough a sequence of Start/Stop calls.

View file

@ -0,0 +1,190 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bitvec
import (
"math/bits"
"cmd/compile/internal/base"
)
const (
wordBits = 32
wordMask = wordBits - 1
wordShift = 5
)
// A BitVec is a bit vector.
type BitVec struct {
N int32 // number of bits in vector
B []uint32 // words holding bits
}
func New(n int32) BitVec {
nword := (n + wordBits - 1) / wordBits
return BitVec{n, make([]uint32, nword)}
}
type Bulk struct {
words []uint32
nbit int32
nword int32
}
func NewBulk(nbit int32, count int32) Bulk {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return Bulk{
words: make([]uint32, size),
nbit: nbit,
nword: nword,
}
}
func (b *Bulk) Next() BitVec {
out := BitVec{b.nbit, b.words[:b.nword]}
b.words = b.words[b.nword:]
return out
}
func (bv1 BitVec) Eq(bv2 BitVec) bool {
if bv1.N != bv2.N {
base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
}
for i, x := range bv1.B {
if x != bv2.B[i] {
return false
}
}
return true
}
func (dst BitVec) Copy(src BitVec) {
copy(dst.B, src.B)
}
func (bv BitVec) Get(i int32) bool {
if i < 0 || i >= bv.N {
base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
return bv.B[i>>wordShift]&mask != 0
}
func (bv BitVec) Set(i int32) {
if i < 0 || i >= bv.N {
base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
bv.B[i/wordBits] |= mask
}
func (bv BitVec) Unset(i int32) {
if i < 0 || i >= bv.N {
base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
}
mask := uint32(1 << uint(i%wordBits))
bv.B[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv BitVec) Next(i int32) int32 {
if i >= bv.N {
return -1
}
// Jump i ahead to next word with bits.
if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
i &^= wordMask
i += wordBits
for i < bv.N && bv.B[i>>wordShift] == 0 {
i += wordBits
}
}
if i >= bv.N {
return -1
}
// Find 1 bit.
w := bv.B[i>>wordShift] >> uint(i&wordMask)
i += int32(bits.TrailingZeros32(w))
return i
}
func (bv BitVec) IsEmpty() bool {
for _, x := range bv.B {
if x != 0 {
return false
}
}
return true
}
func (bv BitVec) Not() {
for i, x := range bv.B {
bv.B[i] = ^x
}
}
// union
func (dst BitVec) Or(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x | src2.B[i]
}
}
// intersection
func (dst BitVec) And(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x & src2.B[i]
}
}
// difference
func (dst BitVec) AndNot(src1, src2 BitVec) {
if len(src1.B) == 0 {
return
}
_, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
for i, x := range src1.B {
dst.B[i] = x &^ src2.B[i]
}
}
func (bv BitVec) String() string {
s := make([]byte, 2+bv.N)
copy(s, "#*")
for i := int32(0); i < bv.N; i++ {
ch := byte('0')
if bv.Get(i) {
ch = '1'
}
s[2+i] = ch
}
return string(s)
}
func (bv BitVec) Clear() {
for i := range bv.B {
bv.B[i] = 0
}
}

View file

@ -2,391 +2,27 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package dwarfgen
import (
"bytes"
"flag"
"fmt"
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
"internal/race"
"math/rand"
"sort"
"sync"
"time"
)
// "Portable" code generation.
var (
compilequeue []*ir.Func // functions waiting to be compiled
)
func emitptrargsmap(fn *ir.Func) {
if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
return
}
lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
nptr := int(fn.Type().ArgWidth() / int64(Widthptr))
bv := bvalloc(int32(nptr) * 2)
nbitmap := 1
if fn.Type().NumResults() > 0 {
nbitmap = 2
}
off := duint32(lsym, 0, uint32(nbitmap))
off = duint32(lsym, off, uint32(bv.n))
if ir.IsMethod(fn) {
onebitwalktype1(fn.Type().Recvs(), 0, bv)
}
if fn.Type().NumParams() > 0 {
onebitwalktype1(fn.Type().Params(), 0, bv)
}
off = dbvec(lsym, off, bv)
if fn.Type().NumResults() > 0 {
onebitwalktype1(fn.Type().Results(), 0, bv)
off = dbvec(lsym, off, bv)
}
ggloblsym(lsym, int32(off), obj.RODATA|obj.LOCAL)
}
// cmpstackvarlt reports whether the stack variable a sorts before b.
//
// Sort the list of stack variables. Autos after anything else,
// within autos, unused after used, within used, things with
// pointers first, zeroed things first, and then decreasing size.
// Because autos are laid out in decreasing addresses
// on the stack, pointers first, zeroed things first and decreasing size
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
func cmpstackvarlt(a, b *ir.Name) bool {
if (a.Class() == ir.PAUTO) != (b.Class() == ir.PAUTO) {
return b.Class() == ir.PAUTO
}
if a.Class() != ir.PAUTO {
return a.FrameOffset() < b.FrameOffset()
}
if a.Used() != b.Used() {
return a.Used()
}
ap := a.Type().HasPointers()
bp := b.Type().HasPointers()
if ap != bp {
return ap
}
ap = a.Needzero()
bp = b.Needzero()
if ap != bp {
return ap
}
if a.Type().Width != b.Type().Width {
return a.Type().Width > b.Type().Width
}
return a.Sym().Name < b.Sym().Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
type byStackVar []*ir.Name
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
fn := s.curfn
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
if ln.Class() == ir.PAUTO {
ln.SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
ls.N.Name().SetUsed(true)
}
}
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
if n, ok := v.Aux.(*ir.Name); ok {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != nodfp {
n.Name().SetUsed(true)
}
case ir.PAUTO:
n.Name().SetUsed(true)
}
}
if !scratchUsed {
scratchUsed = v.Op.UsesScratch()
}
}
}
if f.Config.NeedsFpScratch && scratchUsed {
s.scratchFpMem = tempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
if n.Op() != ir.ONAME || n.Class() != ir.PAUTO {
continue
}
if !n.Used() {
fn.Dcl = fn.Dcl[:i]
break
}
dowidth(n.Type())
w := n.Type().Width
if w >= MaxWidth || w < 0 {
base.Fatalf("bad width")
}
if w == 0 && lastHasPtr {
// Pad between a pointer-containing object and a zero-sized object.
// This prevents a pointer to the zero-sized object from being interpreted
// as a pointer to the pointer-containing object (and causing it
// to be scanned when it shouldn't be). See issue 24993.
w = 1
}
s.stksize += w
s.stksize = Rnd(s.stksize, int64(n.Type().Align))
if n.Type().HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
lastHasPtr = false
}
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = Rnd(s.stksize, int64(Widthptr))
}
n.SetFrameOffset(-s.stksize)
}
s.stksize = Rnd(s.stksize, int64(Widthreg))
s.stkptrsize = Rnd(s.stkptrsize, int64(Widthreg))
}
func funccompile(fn *ir.Func) {
if Curfn != nil {
base.Fatalf("funccompile %v inside %v", fn.Sym(), Curfn.Sym())
}
if fn.Type() == nil {
if base.Errors() == 0 {
base.Fatalf("funccompile missing type")
}
return
}
// assign parameter offsets
dowidth(fn.Type())
if fn.Body().Len() == 0 {
// Initialize ABI wrappers if necessary.
initLSym(fn, false)
emitptrargsmap(fn)
return
}
dclcontext = ir.PAUTO
Curfn = fn
compile(fn)
Curfn = nil
dclcontext = ir.PEXTERN
}
func compile(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
initLSym(fn, true)
errorsBefore := base.Errors()
walk(fn)
if base.Errors() > errorsBefore {
return
}
// From this point, there should be no uses of Curfn. Enforce that.
Curfn = nil
if ir.FuncName(fn) == "_" {
// We don't need to generate code for this function, just report errors in its body.
// At this point we've generated any errors needed.
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
// See issue 29870.
return
}
// Make sure type syms are declared for all types that might
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
for _, n := range fn.Dcl {
switch n.Class() {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if livenessShouldTrack(n) && n.Addrtaken() {
dtypesym(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.LSym.Func().StackObjects == nil {
fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
}
}
}
}
if compilenow(fn) {
compileSSA(fn, 0)
} else {
compilequeue = append(compilequeue, fn)
}
}
// compilenow reports whether to compile immediately.
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
func compilenow(fn *ir.Func) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
// compiling a function later on in the Target.Decls list.
if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
return false
}
return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
}
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
func isInlinableButNotInlined(fn *ir.Func) bool {
if fn.Inl == nil {
return false
}
if fn.Sym() == nil {
return true
}
return !fn.Sym().Linksym().WasInlined()
}
const maxStackSize = 1 << 30
// compileSSA builds an SSA backend function,
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
func compileSSA(fn *ir.Func, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp := newProgs(fn, worker)
defer pp.Free()
genssa(f, pp)
// Check frame size again.
// The check above included only the space needed for local variables.
// After genssa, the space needed includes local variables and the callee arg region.
// We must do this check prior to calling pp.Flush.
// If there are any oversized stack frames,
// the assembler may emit inscrutable complaints about invalid instructions.
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
}
func init() {
if race.Enabled {
rand.Seed(time.Now().UnixNano())
}
}
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
func compileFunctions() {
if len(compilequeue) != 0 {
sizeCalculationDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*ir.Func, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
copy(compilequeue, tmp)
} else {
// Compile the longest functions first,
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
return compilequeue[i].Body().Len() > compilequeue[j].Body().Len()
})
}
var wg sync.WaitGroup
base.Ctxt.InParallel = true
c := make(chan *ir.Func, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
for fn := range c {
compileSSA(fn, worker)
}
wg.Done()
}(i)
}
for _, fn := range compilequeue {
c <- fn
}
close(c)
compilequeue = nil
wg.Wait()
base.Ctxt.InParallel = false
sizeCalculationDisabled = false
}
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*ir.Func)
if fn.Nname != nil {
@ -440,7 +76,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
continue
}
switch n.Class() {
switch n.Class_ {
case ir.PAUTO:
if !n.Used() {
// Text == nil -> generating abstract function
@ -454,7 +90,7 @@ func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.S
continue
}
apdecls = append(apdecls, n)
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
fnsym.Func().RecordAutoType(reflectdata.TypeSym(n.Type()).Linksym())
}
}
@ -511,100 +147,6 @@ func declPos(decl *ir.Name) src.XPos {
return decl.Pos()
}
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
var vars []*dwarf.Var
var decls []*ir.Name
selected := make(map[*ir.Name]bool)
for _, n := range apDecls {
if ir.IsAutoTmp(n) {
continue
}
decls = append(decls, n)
vars = append(vars, createSimpleVar(fnsym, n))
selected[n] = true
}
return decls, vars, selected
}
func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
var abbrev int
var offs int64
switch n.Class() {
case ir.PAUTO:
offs = n.FrameOffset()
abbrev = dwarf.DW_ABRV_AUTO
if base.Ctxt.FixedFrameSize() == 0 {
offs -= int64(Widthptr)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
offs -= int64(Widthptr)
}
case ir.PPARAM, ir.PPARAMOUT:
abbrev = dwarf.DW_ABRV_PARAM
offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
default:
base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class(), n)
}
typename := dwarf.InfoPrefix + typesymname(n.Type())
delete(fnsym.Func().Autot, ngotype(n).Linksym())
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.Name().InlFormal() || n.Name().InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
declpos := base.Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: n.Class() == ir.PPARAMOUT,
IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
}
}
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
var decls []*ir.Name
var vars []*dwarf.Var
ssaVars := make(map[*ir.Name]bool)
for varID, dvar := range debugInfo.Vars {
n := dvar
ssaVars[n] = true
for _, slot := range debugInfo.VarSlots[varID] {
ssaVars[debugInfo.Slots[slot].N] = true
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
decls = append(decls, n)
vars = append(vars, dvar)
}
}
return decls, vars, ssaVars
}
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
@ -643,7 +185,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
if c == '.' || n.Type().IsUntyped() {
continue
}
if n.Class() == ir.PPARAM && !canSSAType(n.Type()) {
if n.Class_ == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
@ -655,13 +197,13 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
decls = append(decls, n)
continue
}
typename := dwarf.InfoPrefix + typesymname(n.Type())
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
isReturnValue := (n.Class() == ir.PPARAMOUT)
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
isReturnValue := (n.Class_ == ir.PPARAMOUT)
if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
} else if n.Class() == ir.PAUTOHEAP {
} else if n.Class_ == ir.PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
@ -670,9 +212,9 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
// and not stack).
// TODO(thanm): generate a better location expression
stackcopy := n.Name().Stackcopy
if stackcopy != nil && (stackcopy.Class() == ir.PPARAM || stackcopy.Class() == ir.PPARAMOUT) {
if stackcopy != nil && (stackcopy.Class_ == ir.PPARAM || stackcopy.Class_ == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
isReturnValue = (stackcopy.Class() == ir.PPARAMOUT)
isReturnValue = (stackcopy.Class_ == ir.PPARAMOUT)
}
}
inlIndex := 0
@ -698,7 +240,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
fnsym.Func().RecordAutoType(reflectdata.TypeSym(n.Type()).Linksym())
}
return decls, vars
@ -725,26 +267,98 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
return rdcl
}
// stackOffset returns the stack location of a LocalSlot relative to the
// stack pointer, suitable for use in a DWARF location entry. This has nothing
// to do with its offset in the user variable.
func stackOffset(slot ssa.LocalSlot) int32 {
n := slot.N
var off int64
switch n.Class() {
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
var vars []*dwarf.Var
var decls []*ir.Name
selected := make(map[*ir.Name]bool)
for _, n := range apDecls {
if ir.IsAutoTmp(n) {
continue
}
decls = append(decls, n)
vars = append(vars, createSimpleVar(fnsym, n))
selected[n] = true
}
return decls, vars, selected
}
func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
var abbrev int
var offs int64
switch n.Class_ {
case ir.PAUTO:
off = n.FrameOffset()
offs = n.FrameOffset()
abbrev = dwarf.DW_ABRV_AUTO
if base.Ctxt.FixedFrameSize() == 0 {
off -= int64(Widthptr)
offs -= int64(types.PtrSize)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
off -= int64(Widthptr)
offs -= int64(types.PtrSize)
}
case ir.PPARAM, ir.PPARAMOUT:
off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
abbrev = dwarf.DW_ABRV_PARAM
offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
default:
base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class_, n)
}
return int32(off + slot.Off)
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
delete(fnsym.Func().Autot, reflectdata.TypeSym(n.Type()).Linksym())
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.Name().InlFormal() || n.Name().InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM
}
}
}
declpos := base.Ctxt.InnermostPos(declPos(n))
return &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: n.Class_ == ir.PPARAMOUT,
IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
StackOffset: int32(offs),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
}
}
// createComplexVars creates recomposed DWARF vars with location lists,
// suitable for describing optimized code.
func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
// Produce a DWARF variable entry for each user variable.
var decls []*ir.Name
var vars []*dwarf.Var
ssaVars := make(map[*ir.Name]bool)
for varID, dvar := range debugInfo.Vars {
n := dvar
ssaVars[n] = true
for _, slot := range debugInfo.VarSlots[varID] {
ssaVars[debugInfo.Slots[slot].N] = true
}
if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
decls = append(decls, n)
vars = append(vars, dvar)
}
}
return decls, vars, ssaVars
}
// createComplexVar builds a single DWARF variable entry and location list.
@ -753,7 +367,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
n := debug.Vars[varID]
var abbrev int
switch n.Class() {
switch n.Class_ {
case ir.PAUTO:
abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
case ir.PPARAM, ir.PPARAMOUT:
@ -762,7 +376,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
return nil
}
gotype := ngotype(n).Linksym()
gotype := reflectdata.TypeSym(n.Type()).Linksym()
delete(fnsym.Func().Autot, gotype)
typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
inlIndex := 0
@ -777,7 +391,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
declpos := base.Ctxt.InnermostPos(n.Pos())
dvar := &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: n.Class() == ir.PPARAMOUT,
IsReturnValue: n.Class_ == ir.PPARAMOUT,
IsInlFormal: n.Name().InlFormal(),
Abbrev: abbrev,
Type: base.Ctxt.Lookup(typename),
@ -785,7 +399,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
// location, but it's not obvious how to do better.
StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
@ -801,30 +415,69 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
return dvar
}
// fieldtrack adds R_USEFIELD relocations to fnsym to record any
// struct fields that it used.
func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
if fnsym == nil {
return
}
if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
// RecordFlags records the specified command-line flags to be placed
// in the DWARF info.
func RecordFlags(flags ...string) {
if base.Ctxt.Pkgpath == "" {
// We can't record the flags if we don't know what the
// package name is.
return
}
trackSyms := make([]*types.Sym, 0, len(tracked))
for sym := range tracked {
trackSyms = append(trackSyms, sym)
type BoolFlag interface {
IsBoolFlag() bool
}
sort.Sort(symByName(trackSyms))
for _, sym := range trackSyms {
r := obj.Addrel(fnsym)
r.Sym = sym.Linksym()
r.Type = objabi.R_USEFIELD
type CountFlag interface {
IsCountFlag() bool
}
var cmd bytes.Buffer
for _, name := range flags {
f := flag.Lookup(name)
if f == nil {
continue
}
getter := f.Value.(flag.Getter)
if getter.String() == f.DefValue {
// Flag has default value, so omit it.
continue
}
if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
val, ok := getter.Get().(bool)
if ok && val {
fmt.Fprintf(&cmd, " -%s", f.Name)
continue
}
}
if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
val, ok := getter.Get().(int)
if ok && val == 1 {
fmt.Fprintf(&cmd, " -%s", f.Name)
continue
}
}
fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
}
type symByName []*types.Sym
if cmd.Len() == 0 {
return
}
s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
base.Ctxt.Data = append(base.Ctxt.Data, s)
s.P = cmd.Bytes()[1:]
}
func (a symByName) Len() int { return len(a) }
func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// RecordPackageName records the name of the package being
// compiled, so that the linker can save it in the compile unit's DIE.
func RecordPackageName() {
s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
s.Type = objabi.SDWARFCUINFO
// Sometimes (for example when building tests) we can link
// together two package main archives. So allow dups.
s.Set(obj.AttrDuplicateOK, true)
base.Ctxt.Data = append(base.Ctxt.Data, s)
s.P = []byte(types.LocalPkg.Name)
}

View file

@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package dwarfgen
import (
"fmt"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
"strings"
)
// To identify variables by original source position.
@ -206,7 +207,7 @@ func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
// late in the compilation when it is determined that we need an
// abstract function DIE for an inlined routine imported from a
// previously compiled package.
func genAbstractFunc(fn *obj.LSym) {
func AbstractFunc(fn *obj.LSym) {
ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
if ifn == nil {
base.Ctxt.Diag("failed to locate precursor fn for %v", fn)

View file

@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package dwarfgen
import (
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/src"
"sort"
)
// See golang.org/issue/20390.
@ -30,13 +31,13 @@ func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
// Initialize the DWARF scope tree based on lexical scopes.
dwarfScopes := make([]dwarf.Scope, 1+len(fn.Func().Parents))
for i, parent := range fn.Func().Parents {
dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents))
for i, parent := range fn.Parents {
dwarfScopes[i+1].Parent = int32(parent)
}
scopeVariables(dwarfVars, varScopes, dwarfScopes)
scopePCs(fnsym, fn.Func().Marks, dwarfScopes)
scopePCs(fnsym, fn.Marks, dwarfScopes)
return compactScopes(dwarfScopes)
}

View file

@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc_test
package dwarfgen
import (
"cmd/internal/objfile"
"debug/dwarf"
"fmt"
"internal/testenv"
@ -18,6 +17,8 @@ import (
"strconv"
"strings"
"testing"
"cmd/internal/objfile"
)
type testline struct {

View file

@ -91,7 +91,7 @@ func ABIAnalyze(t *types.Type, config ABIConfig) ABIParamResultInfo {
result.inparams = append(result.inparams,
s.assignParamOrReturn(f.Type))
}
s.stackOffset = Rnd(s.stackOffset, int64(Widthreg))
s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
// Record number of spill slots needed.
result.intSpillSlots = s.rUsed.intRegs
@ -160,7 +160,7 @@ type assignState struct {
// specified type.
func (state *assignState) stackSlot(t *types.Type) int64 {
if t.Align > 0 {
state.stackOffset = Rnd(state.stackOffset, int64(t.Align))
state.stackOffset = types.Rnd(state.stackOffset, int64(t.Align))
}
rv := state.stackOffset
state.stackOffset += t.Width
@ -226,7 +226,7 @@ func (state *assignState) floatUsed() int {
// can register allocate, FALSE otherwise (and updates state
// accordingly).
func (state *assignState) regassignIntegral(t *types.Type) bool {
regsNeeded := int(Rnd(t.Width, int64(Widthptr)) / int64(Widthptr))
regsNeeded := int(types.Rnd(t.Width, int64(types.PtrSize)) / int64(types.PtrSize))
// Floating point and complex.
if t.IsFloat() || t.IsComplex() {

View file

@ -7,6 +7,9 @@ package gc
import (
"bufio"
"cmd/compile/internal/base"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
@ -26,23 +29,23 @@ var configAMD64 = ABIConfig{
}
func TestMain(m *testing.M) {
thearch.LinkArch = &x86.Linkamd64
thearch.REGSP = x86.REGSP
thearch.MAXWIDTH = 1 << 50
MaxWidth = thearch.MAXWIDTH
base.Ctxt = obj.Linknew(thearch.LinkArch)
ssagen.Arch.LinkArch = &x86.Linkamd64
ssagen.Arch.REGSP = x86.REGSP
ssagen.Arch.MAXWIDTH = 1 << 50
types.MaxWidth = ssagen.Arch.MAXWIDTH
base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
base.Ctxt.DiagFunc = base.Errorf
base.Ctxt.DiagFlush = base.FlushErrors
base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
Widthptr = thearch.LinkArch.PtrSize
Widthreg = thearch.LinkArch.RegSize
types.PtrSize = ssagen.Arch.LinkArch.PtrSize
types.RegSize = ssagen.Arch.LinkArch.RegSize
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return typenamesym(t).Linksym()
return reflectdata.TypeSym(t).Linksym()
}
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return typenamesym(t).Linksym()
return reflectdata.TypeSym(t).Linksym()
}
TypecheckInit()
typecheck.Init()
os.Exit(m.Run())
}

View file

@ -9,6 +9,7 @@ package gc
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
@ -19,8 +20,8 @@ import (
func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field {
field := types.NewField(src.NoXPos, s, t)
n := NewName(s)
n.SetClass(which)
n := typecheck.NewName(s)
n.Class_ = which
field.Nname = n
n.SetType(t)
return field
@ -42,7 +43,7 @@ func mkstruct(fieldtypes []*types.Type) *types.Type {
}
func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type {
q := lookup("?")
q := typecheck.Lookup("?")
inf := []*types.Field{}
for _, it := range ins {
inf = append(inf, mkParamResultField(it, q, ir.PPARAM))
@ -78,7 +79,7 @@ func verifyParamResultOffset(t *testing.T, f *types.Field, r ABIParamAssignment,
n := ir.AsNode(f.Nname).(*ir.Name)
if n.FrameOffset() != int64(r.Offset) {
t.Errorf("%s %d: got offset %d wanted %d t=%v",
which, idx, r.Offset, n.Offset(), f.Type)
which, idx, r.Offset, n.Offset_, f.Type)
return 1
}
return 0
@ -106,7 +107,7 @@ func difftokens(atoks []string, etoks []string) string {
func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
dowidth(ft)
types.CalcSize(ft)
// Analyze with full set of registers.
regRes := ABIAnalyze(ft, configAMD64)

View file

@ -1,185 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class() == ir.PFUNC {
inlFlood(n, exportsym)
}
}
p.markType(n.Type())
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
if p.marked[t] {
return
}
p.marked[t] = true
// If this is a named type, mark all of its associated
// methods. Skip interface types because t.Methods contains
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym() != nil && t.Kind() != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markObject(ir.AsNode(m.Nname))
}
}
}
// Recursively mark any types that can be produced given a
// value of type t: dereferencing a pointer; indexing or
// iterating over an array, slice, or map; receiving from a
// channel; accessing a struct field or interface method; or
// calling a function.
//
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
switch t.Kind() {
case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case types.TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
}
}
}
}
// ----------------------------------------------------------------------------
// Export format
// Tags. Must be < 0.
const (
// Objects
packageTag = -(iota + 1)
constTag
typeTag
varTag
funcTag
endTag
// Types
namedTag
arrayTag
sliceTag
dddTag
structTag
pointerTag
signatureTag
interfaceTag
mapTag
chanTag
// Values
falseTag
trueTag
int64Tag
floatTag
fractionTag // not used by gc
complexTag
stringTag
nilTag
unknownTag // not used by gc (only appears in packages with errors)
// Type aliases
aliasTag
)
var predecl []*types.Type // initialized lazily
func predeclared() []*types.Type {
if predecl == nil {
// initialize lazily to be sure that all
// elements have been initialized before
predecl = []*types.Type{
// basic types
types.Types[types.TBOOL],
types.Types[types.TINT],
types.Types[types.TINT8],
types.Types[types.TINT16],
types.Types[types.TINT32],
types.Types[types.TINT64],
types.Types[types.TUINT],
types.Types[types.TUINT8],
types.Types[types.TUINT16],
types.Types[types.TUINT32],
types.Types[types.TUINT64],
types.Types[types.TUINTPTR],
types.Types[types.TFLOAT32],
types.Types[types.TFLOAT64],
types.Types[types.TCOMPLEX64],
types.Types[types.TCOMPLEX128],
types.Types[types.TSTRING],
// basic type aliases
types.ByteType,
types.RuneType,
// error
types.ErrorType,
// untyped types
types.UntypedBool,
types.UntypedInt,
types.UntypedRune,
types.UntypedFloat,
types.UntypedComplex,
types.UntypedString,
types.Types[types.TNIL],
// package unsafe
types.Types[types.TUNSAFEPTR],
// invalid type (package contains errors)
types.Types[types.Txxx],
// any type, for builtin export data
types.Types[types.TANY],
}
}
return predecl
}

View file

@ -1,343 +0,0 @@
// Code generated by mkbuiltin.go. DO NOT EDIT.
package gc
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
var runtimeDecls = [...]struct {
name string
tag int
typ int
}{
{"newobject", funcTag, 4},
{"mallocgc", funcTag, 8},
{"panicdivide", funcTag, 9},
{"panicshift", funcTag, 9},
{"panicmakeslicelen", funcTag, 9},
{"panicmakeslicecap", funcTag, 9},
{"throwinit", funcTag, 9},
{"panicwrap", funcTag, 9},
{"gopanic", funcTag, 11},
{"gorecover", funcTag, 14},
{"goschedguarded", funcTag, 9},
{"goPanicIndex", funcTag, 16},
{"goPanicIndexU", funcTag, 18},
{"goPanicSliceAlen", funcTag, 16},
{"goPanicSliceAlenU", funcTag, 18},
{"goPanicSliceAcap", funcTag, 16},
{"goPanicSliceAcapU", funcTag, 18},
{"goPanicSliceB", funcTag, 16},
{"goPanicSliceBU", funcTag, 18},
{"goPanicSlice3Alen", funcTag, 16},
{"goPanicSlice3AlenU", funcTag, 18},
{"goPanicSlice3Acap", funcTag, 16},
{"goPanicSlice3AcapU", funcTag, 18},
{"goPanicSlice3B", funcTag, 16},
{"goPanicSlice3BU", funcTag, 18},
{"goPanicSlice3C", funcTag, 16},
{"goPanicSlice3CU", funcTag, 18},
{"printbool", funcTag, 19},
{"printfloat", funcTag, 21},
{"printint", funcTag, 23},
{"printhex", funcTag, 25},
{"printuint", funcTag, 25},
{"printcomplex", funcTag, 27},
{"printstring", funcTag, 29},
{"printpointer", funcTag, 30},
{"printuintptr", funcTag, 31},
{"printiface", funcTag, 30},
{"printeface", funcTag, 30},
{"printslice", funcTag, 30},
{"printnl", funcTag, 9},
{"printsp", funcTag, 9},
{"printlock", funcTag, 9},
{"printunlock", funcTag, 9},
{"concatstring2", funcTag, 34},
{"concatstring3", funcTag, 35},
{"concatstring4", funcTag, 36},
{"concatstring5", funcTag, 37},
{"concatstrings", funcTag, 39},
{"cmpstring", funcTag, 40},
{"intstring", funcTag, 43},
{"slicebytetostring", funcTag, 44},
{"slicebytetostringtmp", funcTag, 45},
{"slicerunetostring", funcTag, 48},
{"stringtoslicebyte", funcTag, 50},
{"stringtoslicerune", funcTag, 53},
{"slicecopy", funcTag, 54},
{"decoderune", funcTag, 55},
{"countrunes", funcTag, 56},
{"convI2I", funcTag, 57},
{"convT16", funcTag, 58},
{"convT32", funcTag, 58},
{"convT64", funcTag, 58},
{"convTstring", funcTag, 58},
{"convTslice", funcTag, 58},
{"convT2E", funcTag, 59},
{"convT2Enoptr", funcTag, 59},
{"convT2I", funcTag, 59},
{"convT2Inoptr", funcTag, 59},
{"assertE2I", funcTag, 57},
{"assertE2I2", funcTag, 60},
{"assertI2I", funcTag, 57},
{"assertI2I2", funcTag, 60},
{"panicdottypeE", funcTag, 61},
{"panicdottypeI", funcTag, 61},
{"panicnildottype", funcTag, 62},
{"ifaceeq", funcTag, 64},
{"efaceeq", funcTag, 64},
{"fastrand", funcTag, 66},
{"makemap64", funcTag, 68},
{"makemap", funcTag, 69},
{"makemap_small", funcTag, 70},
{"mapaccess1", funcTag, 71},
{"mapaccess1_fast32", funcTag, 72},
{"mapaccess1_fast64", funcTag, 72},
{"mapaccess1_faststr", funcTag, 72},
{"mapaccess1_fat", funcTag, 73},
{"mapaccess2", funcTag, 74},
{"mapaccess2_fast32", funcTag, 75},
{"mapaccess2_fast64", funcTag, 75},
{"mapaccess2_faststr", funcTag, 75},
{"mapaccess2_fat", funcTag, 76},
{"mapassign", funcTag, 71},
{"mapassign_fast32", funcTag, 72},
{"mapassign_fast32ptr", funcTag, 72},
{"mapassign_fast64", funcTag, 72},
{"mapassign_fast64ptr", funcTag, 72},
{"mapassign_faststr", funcTag, 72},
{"mapiterinit", funcTag, 77},
{"mapdelete", funcTag, 77},
{"mapdelete_fast32", funcTag, 78},
{"mapdelete_fast64", funcTag, 78},
{"mapdelete_faststr", funcTag, 78},
{"mapiternext", funcTag, 79},
{"mapclear", funcTag, 80},
{"makechan64", funcTag, 82},
{"makechan", funcTag, 83},
{"chanrecv1", funcTag, 85},
{"chanrecv2", funcTag, 86},
{"chansend1", funcTag, 88},
{"closechan", funcTag, 30},
{"writeBarrier", varTag, 90},
{"typedmemmove", funcTag, 91},
{"typedmemclr", funcTag, 92},
{"typedslicecopy", funcTag, 93},
{"selectnbsend", funcTag, 94},
{"selectnbrecv", funcTag, 95},
{"selectnbrecv2", funcTag, 97},
{"selectsetpc", funcTag, 98},
{"selectgo", funcTag, 99},
{"block", funcTag, 9},
{"makeslice", funcTag, 100},
{"makeslice64", funcTag, 101},
{"makeslicecopy", funcTag, 102},
{"growslice", funcTag, 104},
{"memmove", funcTag, 105},
{"memclrNoHeapPointers", funcTag, 106},
{"memclrHasPointers", funcTag, 106},
{"memequal", funcTag, 107},
{"memequal0", funcTag, 108},
{"memequal8", funcTag, 108},
{"memequal16", funcTag, 108},
{"memequal32", funcTag, 108},
{"memequal64", funcTag, 108},
{"memequal128", funcTag, 108},
{"f32equal", funcTag, 109},
{"f64equal", funcTag, 109},
{"c64equal", funcTag, 109},
{"c128equal", funcTag, 109},
{"strequal", funcTag, 109},
{"interequal", funcTag, 109},
{"nilinterequal", funcTag, 109},
{"memhash", funcTag, 110},
{"memhash0", funcTag, 111},
{"memhash8", funcTag, 111},
{"memhash16", funcTag, 111},
{"memhash32", funcTag, 111},
{"memhash64", funcTag, 111},
{"memhash128", funcTag, 111},
{"f32hash", funcTag, 111},
{"f64hash", funcTag, 111},
{"c64hash", funcTag, 111},
{"c128hash", funcTag, 111},
{"strhash", funcTag, 111},
{"interhash", funcTag, 111},
{"nilinterhash", funcTag, 111},
{"int64div", funcTag, 112},
{"uint64div", funcTag, 113},
{"int64mod", funcTag, 112},
{"uint64mod", funcTag, 113},
{"float64toint64", funcTag, 114},
{"float64touint64", funcTag, 115},
{"float64touint32", funcTag, 116},
{"int64tofloat64", funcTag, 117},
{"uint64tofloat64", funcTag, 118},
{"uint32tofloat64", funcTag, 119},
{"complex128div", funcTag, 120},
{"racefuncenter", funcTag, 31},
{"racefuncenterfp", funcTag, 9},
{"racefuncexit", funcTag, 9},
{"raceread", funcTag, 31},
{"racewrite", funcTag, 31},
{"racereadrange", funcTag, 121},
{"racewriterange", funcTag, 121},
{"msanread", funcTag, 121},
{"msanwrite", funcTag, 121},
{"msanmove", funcTag, 122},
{"checkptrAlignment", funcTag, 123},
{"checkptrArithmetic", funcTag, 125},
{"libfuzzerTraceCmp1", funcTag, 127},
{"libfuzzerTraceCmp2", funcTag, 129},
{"libfuzzerTraceCmp4", funcTag, 130},
{"libfuzzerTraceCmp8", funcTag, 131},
{"libfuzzerTraceConstCmp1", funcTag, 127},
{"libfuzzerTraceConstCmp2", funcTag, 129},
{"libfuzzerTraceConstCmp4", funcTag, 130},
{"libfuzzerTraceConstCmp8", funcTag, 131},
{"x86HasPOPCNT", varTag, 6},
{"x86HasSSE41", varTag, 6},
{"x86HasFMA", varTag, 6},
{"armHasVFPv4", varTag, 6},
{"arm64HasATOMICS", varTag, 6},
}
func runtimeTypes() []*types.Type {
var typs [132]*types.Type
typs[0] = types.ByteType
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[types.TANY]
typs[3] = types.NewPtr(typs[2])
typs[4] = functype(nil, []*ir.Field{anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])})
typs[5] = types.Types[types.TUINTPTR]
typs[6] = types.Types[types.TBOOL]
typs[7] = types.Types[types.TUNSAFEPTR]
typs[8] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[1]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[7])})
typs[9] = functype(nil, nil, nil)
typs[10] = types.Types[types.TINTER]
typs[11] = functype(nil, []*ir.Field{anonfield(typs[10])}, nil)
typs[12] = types.Types[types.TINT32]
typs[13] = types.NewPtr(typs[12])
typs[14] = functype(nil, []*ir.Field{anonfield(typs[13])}, []*ir.Field{anonfield(typs[10])})
typs[15] = types.Types[types.TINT]
typs[16] = functype(nil, []*ir.Field{anonfield(typs[15]), anonfield(typs[15])}, nil)
typs[17] = types.Types[types.TUINT]
typs[18] = functype(nil, []*ir.Field{anonfield(typs[17]), anonfield(typs[15])}, nil)
typs[19] = functype(nil, []*ir.Field{anonfield(typs[6])}, nil)
typs[20] = types.Types[types.TFLOAT64]
typs[21] = functype(nil, []*ir.Field{anonfield(typs[20])}, nil)
typs[22] = types.Types[types.TINT64]
typs[23] = functype(nil, []*ir.Field{anonfield(typs[22])}, nil)
typs[24] = types.Types[types.TUINT64]
typs[25] = functype(nil, []*ir.Field{anonfield(typs[24])}, nil)
typs[26] = types.Types[types.TCOMPLEX128]
typs[27] = functype(nil, []*ir.Field{anonfield(typs[26])}, nil)
typs[28] = types.Types[types.TSTRING]
typs[29] = functype(nil, []*ir.Field{anonfield(typs[28])}, nil)
typs[30] = functype(nil, []*ir.Field{anonfield(typs[2])}, nil)
typs[31] = functype(nil, []*ir.Field{anonfield(typs[5])}, nil)
typs[32] = types.NewArray(typs[0], 32)
typs[33] = types.NewPtr(typs[32])
typs[34] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
typs[35] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
typs[36] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
typs[37] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[28])})
typs[38] = types.NewSlice(typs[28])
typs[39] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[38])}, []*ir.Field{anonfield(typs[28])})
typs[40] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])})
typs[41] = types.NewArray(typs[0], 4)
typs[42] = types.NewPtr(typs[41])
typs[43] = functype(nil, []*ir.Field{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[28])})
typs[44] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])})
typs[45] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])})
typs[46] = types.RuneType
typs[47] = types.NewSlice(typs[46])
typs[48] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Field{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
typs[50] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[49])})
typs[51] = types.NewArray(typs[46], 32)
typs[52] = types.NewPtr(typs[51])
typs[53] = functype(nil, []*ir.Field{anonfield(typs[52]), anonfield(typs[28])}, []*ir.Field{anonfield(typs[47])})
typs[54] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[15])})
typs[55] = functype(nil, []*ir.Field{anonfield(typs[28]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[46]), anonfield(typs[15])})
typs[56] = functype(nil, []*ir.Field{anonfield(typs[28])}, []*ir.Field{anonfield(typs[15])})
typs[57] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2])})
typs[58] = functype(nil, []*ir.Field{anonfield(typs[2])}, []*ir.Field{anonfield(typs[7])})
typs[59] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[2])})
typs[60] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[2]), anonfield(typs[6])})
typs[61] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[1])}, nil)
typs[62] = functype(nil, []*ir.Field{anonfield(typs[1])}, nil)
typs[63] = types.NewPtr(typs[5])
typs[64] = functype(nil, []*ir.Field{anonfield(typs[63]), anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])})
typs[65] = types.Types[types.TUINT32]
typs[66] = functype(nil, nil, []*ir.Field{anonfield(typs[65])})
typs[67] = types.NewMap(typs[2], typs[2])
typs[68] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])})
typs[69] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[67])})
typs[70] = functype(nil, nil, []*ir.Field{anonfield(typs[67])})
typs[71] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3])})
typs[72] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3])})
typs[73] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3])})
typs[74] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
typs[75] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
typs[76] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3]), anonfield(typs[1])}, []*ir.Field{anonfield(typs[3]), anonfield(typs[6])})
typs[77] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[3])}, nil)
typs[78] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67]), anonfield(typs[2])}, nil)
typs[79] = functype(nil, []*ir.Field{anonfield(typs[3])}, nil)
typs[80] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[67])}, nil)
typs[81] = types.NewChan(typs[2], types.Cboth)
typs[82] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[81])})
typs[83] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[81])})
typs[84] = types.NewChan(typs[2], types.Crecv)
typs[85] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, nil)
typs[86] = functype(nil, []*ir.Field{anonfield(typs[84]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
typs[87] = types.NewChan(typs[2], types.Csend)
typs[88] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, nil)
typs[89] = types.NewArray(typs[0], 3)
typs[90] = tostruct([]*ir.Field{namedfield("enabled", typs[6]), namedfield("pad", typs[89]), namedfield("needed", typs[6]), namedfield("cgo", typs[6]), namedfield("alignme", typs[24])})
typs[91] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[3])}, nil)
typs[92] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3])}, nil)
typs[93] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[3]), anonfield(typs[15]), anonfield(typs[3]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[15])})
typs[94] = functype(nil, []*ir.Field{anonfield(typs[87]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
typs[95] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])})
typs[96] = types.NewPtr(typs[6])
typs[97] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[96]), anonfield(typs[84])}, []*ir.Field{anonfield(typs[6])})
typs[98] = functype(nil, []*ir.Field{anonfield(typs[63])}, nil)
typs[99] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[1]), anonfield(typs[63]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[6])}, []*ir.Field{anonfield(typs[15]), anonfield(typs[6])})
typs[100] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[7])})
typs[101] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[7])})
typs[102] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15]), anonfield(typs[15]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[7])})
typs[103] = types.NewSlice(typs[2])
typs[104] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[103]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[103])})
typs[105] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, nil)
typs[106] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, nil)
typs[107] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[6])})
typs[108] = functype(nil, []*ir.Field{anonfield(typs[3]), anonfield(typs[3])}, []*ir.Field{anonfield(typs[6])})
typs[109] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[7])}, []*ir.Field{anonfield(typs[6])})
typs[110] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])})
typs[111] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[5])}, []*ir.Field{anonfield(typs[5])})
typs[112] = functype(nil, []*ir.Field{anonfield(typs[22]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[22])})
typs[113] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, []*ir.Field{anonfield(typs[24])})
typs[114] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[22])})
typs[115] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[24])})
typs[116] = functype(nil, []*ir.Field{anonfield(typs[20])}, []*ir.Field{anonfield(typs[65])})
typs[117] = functype(nil, []*ir.Field{anonfield(typs[22])}, []*ir.Field{anonfield(typs[20])})
typs[118] = functype(nil, []*ir.Field{anonfield(typs[24])}, []*ir.Field{anonfield(typs[20])})
typs[119] = functype(nil, []*ir.Field{anonfield(typs[65])}, []*ir.Field{anonfield(typs[20])})
typs[120] = functype(nil, []*ir.Field{anonfield(typs[26]), anonfield(typs[26])}, []*ir.Field{anonfield(typs[26])})
typs[121] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[122] = functype(nil, []*ir.Field{anonfield(typs[5]), anonfield(typs[5]), anonfield(typs[5])}, nil)
typs[123] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[1]), anonfield(typs[5])}, nil)
typs[124] = types.NewSlice(typs[7])
typs[125] = functype(nil, []*ir.Field{anonfield(typs[7]), anonfield(typs[124])}, nil)
typs[126] = types.Types[types.TUINT8]
typs[127] = functype(nil, []*ir.Field{anonfield(typs[126]), anonfield(typs[126])}, nil)
typs[128] = types.Types[types.TUINT16]
typs[129] = functype(nil, []*ir.Field{anonfield(typs[128]), anonfield(typs[128])}, nil)
typs[130] = functype(nil, []*ir.Field{anonfield(typs[65]), anonfield(typs[65])}, nil)
typs[131] = functype(nil, []*ir.Field{anonfield(typs[24]), anonfield(typs[24])}, nil)
return typs[:]
}

View file

@ -1,280 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"math/bits"
"cmd/compile/internal/base"
)
const (
wordBits = 32
wordMask = wordBits - 1
wordShift = 5
)
// A bvec is a bit vector.
type bvec struct {
n int32 // number of bits in vector
b []uint32 // words holding bits
}
func bvalloc(n int32) bvec {
nword := (n + wordBits - 1) / wordBits
return bvec{n, make([]uint32, nword)}
}
type bulkBvec struct {
words []uint32
nbit int32
nword int32
}
func bvbulkalloc(nbit int32, count int32) bulkBvec {
nword := (nbit + wordBits - 1) / wordBits
size := int64(nword) * int64(count)
if int64(int32(size*4)) != size*4 {
base.Fatalf("bvbulkalloc too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
}
return bulkBvec{
words: make([]uint32, size),
nbit: nbit,
nword: nword,
}
}
func (b *bulkBvec) next() bvec {
out := bvec{b.nbit, b.words[:b.nword]}
b.words = b.words[b.nword:]
return out
}
func (bv1 bvec) Eq(bv2 bvec) bool {
if bv1.n != bv2.n {
base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
return false
}
}
return true
}
func (dst bvec) Copy(src bvec) {
copy(dst.b, src.b)
}
func (bv bvec) Get(i int32) bool {
if i < 0 || i >= bv.n {
base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
return bv.b[i>>wordShift]&mask != 0
}
func (bv bvec) Set(i int32) {
if i < 0 || i >= bv.n {
base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] |= mask
}
func (bv bvec) Unset(i int32) {
if i < 0 || i >= bv.n {
base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.n)
}
mask := uint32(1 << uint(i%wordBits))
bv.b[i/wordBits] &^= mask
}
// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
// If there is no such index, bvnext returns -1.
func (bv bvec) Next(i int32) int32 {
if i >= bv.n {
return -1
}
// Jump i ahead to next word with bits.
if bv.b[i>>wordShift]>>uint(i&wordMask) == 0 {
i &^= wordMask
i += wordBits
for i < bv.n && bv.b[i>>wordShift] == 0 {
i += wordBits
}
}
if i >= bv.n {
return -1
}
// Find 1 bit.
w := bv.b[i>>wordShift] >> uint(i&wordMask)
i += int32(bits.TrailingZeros32(w))
return i
}
func (bv bvec) IsEmpty() bool {
for _, x := range bv.b {
if x != 0 {
return false
}
}
return true
}
func (bv bvec) Not() {
for i, x := range bv.b {
bv.b[i] = ^x
}
}
// union
func (dst bvec) Or(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x | src2.b[i]
}
}
// intersection
func (dst bvec) And(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x & src2.b[i]
}
}
// difference
func (dst bvec) AndNot(src1, src2 bvec) {
if len(src1.b) == 0 {
return
}
_, _ = dst.b[len(src1.b)-1], src2.b[len(src1.b)-1] // hoist bounds checks out of the loop
for i, x := range src1.b {
dst.b[i] = x &^ src2.b[i]
}
}
func (bv bvec) String() string {
s := make([]byte, 2+bv.n)
copy(s, "#*")
for i := int32(0); i < bv.n; i++ {
ch := byte('0')
if bv.Get(i) {
ch = '1'
}
s[2+i] = ch
}
return string(s)
}
func (bv bvec) Clear() {
for i := range bv.b {
bv.b[i] = 0
}
}
// FNV-1 hash function constants.
const (
H0 = 2166136261
Hp = 16777619
)
func hashbitmap(h uint32, bv bvec) uint32 {
n := int((bv.n + 31) / 32)
for i := 0; i < n; i++ {
w := bv.b[i]
h = (h * Hp) ^ (w & 0xff)
h = (h * Hp) ^ ((w >> 8) & 0xff)
h = (h * Hp) ^ ((w >> 16) & 0xff)
h = (h * Hp) ^ ((w >> 24) & 0xff)
}
return h
}
// bvecSet is a set of bvecs, in initial insertion order.
type bvecSet struct {
index []int // hash -> uniq index. -1 indicates empty slot.
uniq []bvec // unique bvecs, in insertion order
}
func (m *bvecSet) grow() {
// Allocate new index.
n := len(m.index) * 2
if n == 0 {
n = 32
}
newIndex := make([]int, n)
for i := range newIndex {
newIndex[i] = -1
}
// Rehash into newIndex.
for i, bv := range m.uniq {
h := hashbitmap(H0, bv) % uint32(len(newIndex))
for {
j := newIndex[h]
if j < 0 {
newIndex[h] = i
break
}
h++
if h == uint32(len(newIndex)) {
h = 0
}
}
}
m.index = newIndex
}
// add adds bv to the set and returns its index in m.extractUniqe.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bvec) int {
if len(m.uniq)*4 >= len(m.index) {
m.grow()
}
index := m.index
h := hashbitmap(H0, bv) % uint32(len(index))
for {
j := index[h]
if j < 0 {
// New bvec.
index[h] = len(m.uniq)
m.uniq = append(m.uniq, bv)
return len(m.uniq) - 1
}
jlive := m.uniq[j]
if bv.Eq(jlive) {
// Existing bvec.
return j
}
h++
if h == uint32(len(index)) {
h = 0
}
}
}
// extractUniqe returns this slice of unique bit vectors in m, as
// indexed by the result of bvecSet.add.
func (m *bvecSet) extractUniqe() []bvec {
return m.uniq
}

View file

@ -1,570 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
)
func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
fn := ir.NewFunc(p.pos(expr))
fn.SetIsHiddenClosure(Curfn != nil)
fn.Nname = newFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
fn.Nname.Ntype = xtype
fn.Nname.Defn = fn
clo := ir.NewClosureExpr(p.pos(expr), fn)
fn.ClosureType = ntype
fn.OClosure = clo
p.funcBody(fn, expr.Body)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
for _, v := range fn.ClosureVars {
// Unlink from v1; see comment in syntax.go type Param for these fields.
v1 := v.Defn
v1.Name().Innermost = v.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
// of the function in which v appeared,
// look up v.Sym in the enclosing function
// and keep it around for use in the compiled code.
//
// That is, suppose we just finished parsing the innermost
// closure f4 in this code:
//
// func f() {
// v := 1
// func() { // f2
// use(v)
// func() { // f3
// func() { // f4
// use(v)
// }()
// }()
// }()
// }
//
// At this point v.Outer is f2's v; there is no f3's v.
// To construct the closure f4 from within f3,
// we need to use f3's v and in this case we need to create f3's v.
// We are now in the context of f3, so calling oldname(v.Sym)
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
v.Outer = oldname(v.Sym()).(*ir.Name)
}
return clo
}
// typecheckclosure typechecks an OCLOSURE node. It also creates the named
// function associated with the closure.
// TODO: This creation of the named function should probably really be done in a
// separate pass from type-checking.
func typecheckclosure(clo *ir.ClosureExpr, top int) {
fn := clo.Func()
// Set current associated iota value, so iota can be used inside
// function in ConstSpec, see issue #22344
if x := getIotaValue(); x >= 0 {
fn.SetIota(x)
}
fn.ClosureType = typecheck(fn.ClosureType, ctxType)
clo.SetType(fn.ClosureType.Type())
fn.SetClosureCalled(top&ctxCallee != 0)
// Do not typecheck fn twice, otherwise, we will end up pushing
// fn to Target.Decls multiple times, causing initLSym called twice.
// See #30709
if fn.Typecheck() == 1 {
return
}
for _, ln := range fn.ClosureVars {
n := ln.Defn
if !n.Name().Captured() {
n.Name().SetCaptured(true)
if n.Name().Decldepth == 0 {
base.Fatalf("typecheckclosure: var %v does not have decldepth assigned", n)
}
// Ignore assignments to the variable in straightline code
// preceding the first capturing by a closure.
if n.Name().Decldepth == decldepth {
n.Name().SetAssigned(false)
}
}
}
fn.Nname.SetSym(closurename(Curfn))
setNodeNameFunc(fn.Nname)
typecheckFunc(fn)
// Type check the body now, but only if we're inside a function.
// At top level (in a variable initialization: curfn==nil) we're not
// ready to type check code yet; we'll check it later, because the
// underlying closure function we create is added to Target.Decls.
if Curfn != nil && clo.Type() != nil {
oldfn := Curfn
Curfn = fn
olddd := decldepth
decldepth = 1
typecheckslice(fn.Body().Slice(), ctxStmt)
decldepth = olddd
Curfn = oldfn
}
Target.Decls = append(Target.Decls, fn)
}
// globClosgen is like Func.Closgen, but for the global scope.
var globClosgen int32
// closurename generates a new unique name for a closure within
// outerfunc.
func closurename(outerfunc *ir.Func) *types.Sym {
outer := "glob."
prefix := "func"
gen := &globClosgen
if outerfunc != nil {
if outerfunc.OClosure != nil {
prefix = ""
}
outer = ir.FuncName(outerfunc)
// There may be multiple functions named "_". In those
// cases, we can't use their individual Closgens as it
// would lead to name clashes.
if !ir.IsBlank(outerfunc.Nname) {
gen = &outerfunc.Closgen
}
}
*gen++
return lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
}
// capturevarscomplete is set to true when the capturevars phase is done.
var capturevarscomplete bool
// capturevars is called in a separate phase after all typechecking is done.
// It decides whether each variable captured by a closure should be captured
// by value or by reference.
// We use value capturing for values <= 128 bytes that are never reassigned
// after capturing (effectively constant).
func capturevars(fn *ir.Func) {
lno := base.Pos
base.Pos = fn.Pos()
cvars := fn.ClosureVars
out := cvars[:0]
for _, v := range cvars {
if v.Type() == nil {
// If v.Type is nil, it means v looked like it
// was going to be used in the closure, but
// isn't. This happens in struct literals like
// s{f: x} where we can't distinguish whether
// f is a field identifier or expression until
// resolving s.
continue
}
out = append(out, v)
// type check the & of closed variables outside the closure,
// so that the outer frame also grabs them and knows they escape.
dowidth(v.Type())
var outer ir.Node
outer = v.Outer
outermost := v.Defn.(*ir.Name)
// out parameters will be assigned to implicitly upon return.
if outermost.Class() != ir.PPARAMOUT && !outermost.Name().Addrtaken() && !outermost.Name().Assigned() && v.Type().Width <= 128 {
v.SetByval(true)
} else {
outermost.Name().SetAddrtaken(true)
outer = nodAddr(outer)
}
if base.Flag.LowerM > 1 {
var name *types.Sym
if v.Curfn != nil && v.Curfn.Nname != nil {
name = v.Curfn.Sym()
}
how := "ref"
if v.Byval() {
how = "value"
}
base.WarnfAt(v.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym(), outermost.Name().Addrtaken(), outermost.Name().Assigned(), int32(v.Type().Width))
}
outer = typecheck(outer, ctxExpr)
fn.ClosureEnter.Append(outer)
}
fn.ClosureVars = out
base.Pos = lno
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
func transformclosure(fn *ir.Func) {
lno := base.Pos
base.Pos = fn.Pos()
if fn.ClosureCalled() {
// If the closure is directly called, we transform it to a plain function call
// with variables passed as args. This avoids allocation of a closure object.
// Here we do only a part of the transformation. Walk of OCALLFUNC(OCLOSURE)
// will complete the transformation later.
// For illustration, the following closure:
// func(a int) {
// println(byval)
// byref++
// }(42)
// becomes:
// func(byval int, &byref *int, a int) {
// println(byval)
// (*&byref)++
// }(byval, &byref, 42)
// f is ONAME of the actual function.
f := fn.Nname
// We are going to insert captured variables before input args.
var params []*types.Field
var decls []*ir.Name
for _, v := range fn.ClosureVars {
if !v.Byval() {
// If v of type T is captured by reference,
// we introduce function param &v *T
// and v remains PAUTOHEAP with &v heapaddr
// (accesses will implicitly deref &v).
addr := NewName(lookup("&" + v.Sym().Name))
addr.SetType(types.NewPtr(v.Type()))
v.Heapaddr = addr
v = addr
}
v.SetClass(ir.PPARAM)
decls = append(decls, v)
fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
fld.Nname = v
params = append(params, fld)
}
if len(params) > 0 {
// Prepend params and decls.
f.Type().Params().SetFields(append(params, f.Type().Params().FieldSlice()...))
fn.Dcl = append(decls, fn.Dcl...)
}
dowidth(f.Type())
fn.SetType(f.Type()) // update type of ODCLFUNC
} else {
// The closure is not called, so it is going to stay as closure.
var body []ir.Node
offset := int64(Widthptr)
for _, v := range fn.ClosureVars {
// cv refers to the field inside of closure OSTRUCTLIT.
typ := v.Type()
if !v.Byval() {
typ = types.NewPtr(typ)
}
offset = Rnd(offset, int64(typ.Align))
cr := ir.NewClosureRead(typ, offset)
offset += typ.Width
if v.Byval() && v.Type().Width <= int64(2*Widthptr) {
// If it is a small variable captured by value, downgrade it to PAUTO.
v.SetClass(ir.PAUTO)
fn.Dcl = append(fn.Dcl, v)
body = append(body, ir.Nod(ir.OAS, v, cr))
} else {
// Declare variable holding addresses taken from closure
// and initialize in entry prologue.
addr := NewName(lookup("&" + v.Sym().Name))
addr.SetType(types.NewPtr(v.Type()))
addr.SetClass(ir.PAUTO)
addr.SetUsed(true)
addr.Curfn = fn
fn.Dcl = append(fn.Dcl, addr)
v.Heapaddr = addr
var src ir.Node = cr
if v.Byval() {
src = nodAddr(cr)
}
body = append(body, ir.Nod(ir.OAS, addr, src))
}
}
if len(body) > 0 {
typecheckslice(body, ctxStmt)
fn.Enter.Set(body)
fn.SetNeedctxt(true)
}
}
base.Pos = lno
}
// hasemptycvars reports whether closure clo has an
// empty list of captured vars.
func hasemptycvars(clo *ir.ClosureExpr) bool {
return len(clo.Func().ClosureVars) == 0
}
// closuredebugruntimecheck applies boilerplate checks for debug flags
// and compiling runtime
func closuredebugruntimecheck(clo *ir.ClosureExpr) {
if base.Debug.Closure > 0 {
if clo.Esc() == EscHeap {
base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func().ClosureVars)
} else {
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func().ClosureVars)
}
}
if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
}
}
// closureType returns the struct type used to hold all the information
// needed in the closure for clo (clo must be a OCLOSURE node).
// The address of a variable of the returned type can be cast to a func.
func closureType(clo *ir.ClosureExpr) *types.Type {
// Create closure in the form of a composite literal.
// supposing the closure captures an int i and a string s
// and has one float64 argument and no results,
// the generated code looks like:
//
// clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
//
// The use of the struct provides type information to the garbage
// collector so that it can walk the closure. We could use (in this case)
// [3]unsafe.Pointer instead, but that would leave the gc in the dark.
// The information appears in the binary in the form of type descriptors;
// the struct is unnamed so that closures in multiple packages with the
// same struct type can share the descriptor.
fields := []*ir.Field{
namedfield(".F", types.Types[types.TUINTPTR]),
}
for _, v := range clo.Func().ClosureVars {
typ := v.Type()
if !v.Byval() {
typ = types.NewPtr(typ)
}
fields = append(fields, symfield(v.Sym(), typ))
}
typ := tostruct(fields)
typ.SetNoalg(true)
return typ
}
func walkclosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
fn := clo.Func()
// If no closure vars, don't bother wrapping.
if hasemptycvars(clo) {
if base.Debug.Closure > 0 {
base.WarnfAt(clo.Pos(), "closure converted to global")
}
return fn.Nname
}
closuredebugruntimecheck(clo)
typ := closureType(clo)
clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ))
clos.SetEsc(clo.Esc())
clos.PtrList().Set(append([]ir.Node{ir.Nod(ir.OCFUNC, fn.Nname, nil)}, fn.ClosureEnter.Slice()...))
addr := nodAddr(clos)
addr.SetEsc(clo.Esc())
// Force type conversion from *struct to the func type.
cfn := convnop(addr, clo.Type())
// non-escaping temp to use, if any.
if x := clo.Prealloc; x != nil {
if !types.Identical(typ, x.Type()) {
panic("closure type does not match order's assigned type")
}
addr.SetRight(x)
clo.Prealloc = nil
}
return walkexpr(cfn, init)
}
func typecheckpartialcall(n ir.Node, sym *types.Sym) *ir.CallPartExpr {
switch n.Op() {
case ir.ODOTINTER, ir.ODOTMETH:
break
default:
base.Fatalf("invalid typecheckpartialcall")
}
dot := n.(*ir.SelectorExpr)
// Create top-level function.
fn := makepartialcall(dot, dot.Type(), sym)
fn.SetWrapper(true)
return ir.NewCallPartExpr(dot.Pos(), dot.Left(), dot.Selection, fn)
}
// makepartialcall returns a DCLFUNC node representing the wrapper function (*-fm) needed
// for partial calls.
func makepartialcall(dot *ir.SelectorExpr, t0 *types.Type, meth *types.Sym) *ir.Func {
rcvrtype := dot.Left().Type()
sym := methodSymSuffix(rcvrtype, meth, "-fm")
if sym.Uniq() {
return sym.Def.(*ir.Func)
}
sym.SetUniq(true)
savecurfn := Curfn
saveLineNo := base.Pos
Curfn = nil
// Set line number equal to the line number where the method is declared.
var m *types.Field
if lookdot0(meth, rcvrtype, &m, false) == 1 && m.Pos.IsKnown() {
base.Pos = m.Pos
}
// Note: !m.Pos.IsKnown() happens for method expressions where
// the method is implicitly declared. The Error method of the
// built-in error type is one such method. We leave the line
// number at the use of the method expression in this
// case. See issue 29389.
tfn := ir.NewFuncType(base.Pos, nil,
structargs(t0.Params(), true),
structargs(t0.Results(), false))
fn := dclfunc(sym, tfn)
fn.SetDupok(true)
fn.SetNeedctxt(true)
// Declare and initialize variable holding receiver.
cr := ir.NewClosureRead(rcvrtype, Rnd(int64(Widthptr), int64(rcvrtype.Align)))
ptr := NewName(lookup(".this"))
declare(ptr, ir.PAUTO)
ptr.SetUsed(true)
var body []ir.Node
if rcvrtype.IsPtr() || rcvrtype.IsInterface() {
ptr.SetType(rcvrtype)
body = append(body, ir.Nod(ir.OAS, ptr, cr))
} else {
ptr.SetType(types.NewPtr(rcvrtype))
body = append(body, ir.Nod(ir.OAS, ptr, nodAddr(cr)))
}
call := ir.Nod(ir.OCALL, nodSym(ir.OXDOT, ptr, meth), nil)
call.PtrList().Set(paramNnames(tfn.Type()))
call.SetIsDDD(tfn.Type().IsVariadic())
if t0.NumResults() != 0 {
ret := ir.Nod(ir.ORETURN, nil, nil)
ret.PtrList().Set1(call)
body = append(body, ret)
} else {
body = append(body, call)
}
fn.PtrBody().Set(body)
funcbody()
typecheckFunc(fn)
// Need to typecheck the body of the just-generated wrapper.
// typecheckslice() requires that Curfn is set when processing an ORETURN.
Curfn = fn
typecheckslice(fn.Body().Slice(), ctxStmt)
sym.Def = fn
Target.Decls = append(Target.Decls, fn)
Curfn = savecurfn
base.Pos = saveLineNo
return fn
}
// partialCallType returns the struct type used to hold all the information
// needed in the closure for n (n must be a OCALLPART node).
// The address of a variable of the returned type can be cast to a func.
func partialCallType(n *ir.CallPartExpr) *types.Type {
t := tostruct([]*ir.Field{
namedfield("F", types.Types[types.TUINTPTR]),
namedfield("R", n.Left().Type()),
})
t.SetNoalg(true)
return t
}
func walkpartialcall(n *ir.CallPartExpr, init *ir.Nodes) ir.Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
// clos = &struct{F uintptr; R T}{T.M·f, x}
//
// Like walkclosure above.
if n.Left().Type().IsInterface() {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
n.SetLeft(cheapexpr(n.Left(), init))
n.SetLeft(walkexpr(n.Left(), nil))
tab := typecheck(ir.Nod(ir.OITAB, n.Left(), nil), ctxExpr)
c := ir.Nod(ir.OCHECKNIL, tab, nil)
c.SetTypecheck(1)
init.Append(c)
}
typ := partialCallType(n)
clos := ir.Nod(ir.OCOMPLIT, nil, ir.TypeNode(typ))
clos.SetEsc(n.Esc())
clos.PtrList().Set2(ir.Nod(ir.OCFUNC, n.Func().Nname, nil), n.Left())
addr := nodAddr(clos)
addr.SetEsc(n.Esc())
// Force type conversion from *struct to the func type.
cfn := convnop(addr, n.Type())
// non-escaping temp to use, if any.
if x := n.Prealloc; x != nil {
if !types.Identical(typ, x.Type()) {
panic("partial call type does not match order's assigned type")
}
addr.SetRight(x)
n.Prealloc = nil
}
return walkexpr(cfn, init)
}
// callpartMethod returns the *types.Field representing the method
// referenced by method value n.
func callpartMethod(n ir.Node) *types.Field {
return n.(*ir.CallPartExpr).Method
}

View file

@ -0,0 +1,178 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"internal/race"
"math/rand"
"sort"
"sync"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/walk"
)
// "Portable" code generation.
var (
compilequeue []*ir.Func // functions waiting to be compiled
)
func funccompile(fn *ir.Func) {
if ir.CurFunc != nil {
base.Fatalf("funccompile %v inside %v", fn.Sym(), ir.CurFunc.Sym())
}
if fn.Type() == nil {
if base.Errors() == 0 {
base.Fatalf("funccompile missing type")
}
return
}
// assign parameter offsets
types.CalcSize(fn.Type())
if len(fn.Body) == 0 {
// Initialize ABI wrappers if necessary.
ssagen.InitLSym(fn, false)
liveness.WriteFuncMap(fn)
return
}
typecheck.DeclContext = ir.PAUTO
ir.CurFunc = fn
compile(fn)
ir.CurFunc = nil
typecheck.DeclContext = ir.PEXTERN
}
func compile(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
ssagen.InitLSym(fn, true)
errorsBefore := base.Errors()
walk.Walk(fn)
if base.Errors() > errorsBefore {
return
}
// From this point, there should be no uses of Curfn. Enforce that.
ir.CurFunc = nil
if ir.FuncName(fn) == "_" {
// We don't need to generate code for this function, just report errors in its body.
// At this point we've generated any errors needed.
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
// See issue 29870.
return
}
// Make sure type syms are declared for all types that might
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
for _, n := range fn.Dcl {
switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if liveness.ShouldTrack(n) && n.Addrtaken() {
reflectdata.WriteType(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.LSym.Func().StackObjects == nil {
fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
}
}
}
}
if compilenow(fn) {
ssagen.Compile(fn, 0)
} else {
compilequeue = append(compilequeue, fn)
}
}
// compilenow reports whether to compile immediately.
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
func compilenow(fn *ir.Func) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
// compiling a function later on in the Target.Decls list.
if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
return false
}
return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
}
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
func compileFunctions() {
if len(compilequeue) != 0 {
types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*ir.Func, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
copy(compilequeue, tmp)
} else {
// Compile the longest functions first,
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
return len(compilequeue[i].Body) > len(compilequeue[j].Body)
})
}
var wg sync.WaitGroup
base.Ctxt.InParallel = true
c := make(chan *ir.Func, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
for fn := range c {
ssagen.Compile(fn, worker)
}
wg.Done()
}(i)
}
for _, fn := range compilequeue {
c <- fn
}
close(c)
compilequeue = nil
wg.Wait()
base.Ctxt.InParallel = false
types.CalcSizeDisabled = false
}
}
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
func isInlinableButNotInlined(fn *ir.Func) bool {
if fn.Inl == nil {
return false
}
if fn.Sym() == nil {
return true
}
return !fn.Sym().Linksym().WasInlined()
}

File diff suppressed because it is too large Load diff

View file

@ -6,10 +6,11 @@ package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/bio"
"cmd/internal/src"
"fmt"
"go/constant"
)
@ -21,54 +22,16 @@ func exportf(bout *bio.Writer, format string, args ...interface{}) {
}
}
// exportsym marks n for export (or reexport).
func exportsym(n *ir.Name) {
if n.Sym().OnExportList() {
return
}
n.Sym().SetOnExportList(true)
if base.Flag.E != 0 {
fmt.Printf("export symbol %v\n", n.Sym())
}
Target.Exports = append(Target.Exports, n)
}
func initname(s string) bool {
return s == "init"
}
func autoexport(n *ir.Name, ctxt ir.Class) {
if n.Sym().Pkg != types.LocalPkg {
return
}
if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || dclcontext != ir.PEXTERN {
return
}
if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
return
}
if types.IsExported(n.Sym().Name) || initname(n.Sym().Name) {
exportsym(n)
}
if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
n.Sym().SetAsm(true)
Target.Asms = append(Target.Asms, n)
}
}
func dumpexport(bout *bio.Writer) {
p := &exporter{marked: make(map[*types.Type]bool)}
for _, n := range Target.Exports {
for _, n := range typecheck.Target.Exports {
p.markObject(n)
}
// The linker also looks for the $$ marker - use char after $$ to distinguish format.
exportf(bout, "\n$$B\n") // indicate binary export format
off := bout.Offset()
iexport(bout.Writer)
typecheck.WriteExports(bout.Writer)
size := bout.Offset() - off
exportf(bout, "\n$$\n")
@ -77,78 +40,13 @@ func dumpexport(bout *bio.Writer) {
}
}
func importsym(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
if n := s.PkgDef(); n != nil {
base.Fatalf("importsym of symbol that already exists: %v", n)
}
n := ir.NewDeclNameAt(pos, op, s)
n.SetClass(ctxt) // TODO(mdempsky): Move this into NewDeclNameAt too?
s.SetPkgDef(n)
s.Importdef = ipkg
return n
}
// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
// ipkg is the package being imported
func importtype(ipkg *types.Pkg, pos src.XPos, s *types.Sym) *ir.Name {
n := importsym(ipkg, pos, s, ir.OTYPE, ir.PEXTERN)
n.SetType(types.NewNamed(n))
return n
}
// importobj declares symbol s as an imported object representable by op.
// ipkg is the package being imported
func importobj(ipkg *types.Pkg, pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
n := importsym(ipkg, pos, s, op, ctxt)
n.SetType(t)
if ctxt == ir.PFUNC {
n.Sym().SetFunc(true)
}
return n
}
// importconst declares symbol s as an imported constant with type t and value val.
// ipkg is the package being imported
func importconst(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
n := importobj(ipkg, pos, s, ir.OLITERAL, ir.PEXTERN, t)
n.SetVal(val)
return n
}
// importfunc declares symbol s as an imported function with type t.
// ipkg is the package being imported
func importfunc(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
n := importobj(ipkg, pos, s, ir.ONAME, ir.PFUNC, t)
fn := ir.NewFunc(pos)
fn.SetType(t)
n.SetFunc(fn)
fn.Nname = n
return n
}
// importvar declares symbol s as an imported variable with type t.
// ipkg is the package being imported
func importvar(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
return importobj(ipkg, pos, s, ir.ONAME, ir.PEXTERN, t)
}
// importalias declares symbol s as an imported type alias with type t.
// ipkg is the package being imported
func importalias(ipkg *types.Pkg, pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
return importobj(ipkg, pos, s, ir.OTYPE, ir.PEXTERN, t)
}
func dumpasmhdr() {
b, err := bio.Create(base.Flag.AsmHdr)
if err != nil {
base.Fatalf("%v", err)
}
fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
for _, n := range Target.Asms {
for _, n := range typecheck.Target.Asms {
if n.Sym().IsBlank() {
continue
}
@ -176,3 +74,83 @@ func dumpasmhdr() {
b.Close()
}
type exporter struct {
marked map[*types.Type]bool // types already seen by markType
}
// markObject visits a reachable object.
func (p *exporter) markObject(n ir.Node) {
if n.Op() == ir.ONAME {
n := n.(*ir.Name)
if n.Class_ == ir.PFUNC {
inline.Inline_Flood(n, typecheck.Export)
}
}
p.markType(n.Type())
}
// markType recursively visits types reachable from t to identify
// functions whose inline bodies may be needed.
func (p *exporter) markType(t *types.Type) {
if p.marked[t] {
return
}
p.marked[t] = true
// If this is a named type, mark all of its associated
// methods. Skip interface types because t.Methods contains
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
if t.Sym() != nil && t.Kind() != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markObject(ir.AsNode(m.Nname))
}
}
}
// Recursively mark any types that can be produced given a
// value of type t: dereferencing a pointer; indexing or
// iterating over an array, slice, or map; receiving from a
// channel; accessing a struct field or interface method; or
// calling a function.
//
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
switch t.Kind() {
case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
case types.TCHAN:
if t.ChanDir().CanRecv() {
p.markType(t.Elem())
}
case types.TMAP:
p.markType(t.Key())
p.markType(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) || f.Embedded != 0 {
p.markType(f.Type)
}
}
case types.TFUNC:
for _, f := range t.Results().FieldSlice() {
p.markType(f.Type)
}
case types.TINTER:
for _, f := range t.FieldSlice() {
if types.IsExported(f.Sym.Name) {
p.markType(f.Type)
}
}
}
}

View file

@ -1,96 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"strconv"
)
// sysfunc looks up Go function name in package runtime. This function
// must follow the internal calling convention.
func sysfunc(name string) *obj.LSym {
s := Runtimepkg.Lookup(name)
s.SetFunc(true)
return s.Linksym()
}
// sysvar looks up a variable (or assembly function) name in package
// runtime. If this is a function, it may have a special calling
// convention.
func sysvar(name string) *obj.LSym {
return Runtimepkg.Lookup(name).Linksym()
}
// isParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
func isParamStackCopy(n ir.Node) bool {
if n.Op() != ir.ONAME {
return false
}
name := n.(*ir.Name)
return (name.Class() == ir.PPARAM || name.Class() == ir.PPARAMOUT) && name.Heapaddr != nil
}
// isParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
func isParamHeapCopy(n ir.Node) bool {
if n.Op() != ir.ONAME {
return false
}
name := n.(*ir.Name)
return name.Class() == ir.PAUTOHEAP && name.Name().Stackcopy != nil
}
// autotmpname returns the name for an autotmp variable numbered n.
func autotmpname(n int) string {
// Give each tmp a different name so that they can be registerized.
// Add a preceding . to avoid clashing with legal names.
const prefix = ".autotmp_"
// Start with a buffer big enough to hold a large n.
b := []byte(prefix + " ")[:len(prefix)]
b = strconv.AppendInt(b, int64(n), 10)
return types.InternString(b)
}
// make a new Node off the books
func tempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
if curfn == nil {
base.Fatalf("no curfn for tempAt")
}
if curfn.Op() == ir.OCLOSURE {
ir.Dump("tempAt", curfn)
base.Fatalf("adding tempAt to wrong closure function")
}
if t == nil {
base.Fatalf("tempAt called with nil type")
}
s := &types.Sym{
Name: autotmpname(len(curfn.Dcl)),
Pkg: types.LocalPkg,
}
n := ir.NewNameAt(pos, s)
s.Def = n
n.SetType(t)
n.SetClass(ir.PAUTO)
n.SetEsc(EscNever)
n.Curfn = curfn
n.SetUsed(true)
n.SetAutoTemp(true)
curfn.Dcl = append(curfn.Dcl, n)
dowidth(t)
return n
}
func temp(t *types.Type) *ir.Name {
return tempAt(base.Pos, Curfn, t)
}

View file

@ -1,243 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"sync"
)
var (
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
// Note: the flag smallframes can update this value.
maxStackVarSize = int64(10 * 1024 * 1024)
// maximum size of implicit variables that we will allocate on the stack.
// p := new(T) allocating T on the stack
// p := &T{} allocating T on the stack
// s := make([]T, n) allocating [n]T on the stack
// s := []byte("...") allocating [n]byte on the stack
// Note: the flag smallframes can update this value.
maxImplicitStackVarSize = int64(64 * 1024)
// smallArrayBytes is the maximum size of an array which is considered small.
// Small arrays will be initialized directly with a sequence of constant stores.
// Large arrays will be initialized by copying from a static temp.
// 256 bytes was chosen to minimize generated code + statictmp size.
smallArrayBytes = int64(256)
)
// isRuntimePkg reports whether p is package runtime.
func isRuntimePkg(p *types.Pkg) bool {
if base.Flag.CompilingRuntime && p == types.LocalPkg {
return true
}
return p.Path == "runtime"
}
// isReflectPkg reports whether p is package reflect.
func isReflectPkg(p *types.Pkg) bool {
if p == types.LocalPkg {
return base.Ctxt.Pkgpath == "reflect"
}
return p.Path == "reflect"
}
// Slices in the runtime are represented by three components:
//
// type slice struct {
// ptr unsafe.Pointer
// len int
// cap int
// }
//
// Strings in the runtime are represented by two components:
//
// type string struct {
// ptr unsafe.Pointer
// len int
// }
//
// These variables are the offsets of fields and sizes of these structs.
var (
slicePtrOffset int64
sliceLenOffset int64
sliceCapOffset int64
sizeofSlice int64
sizeofString int64
)
var pragcgobuf [][]string
var decldepth int32
var nolocalimports bool
var inimport bool // set during import
var itabpkg *types.Pkg // fake pkg for itab entries
var itablinkpkg *types.Pkg // fake package for runtime itab entries
var Runtimepkg *types.Pkg // fake package runtime
var racepkg *types.Pkg // package runtime/race
var msanpkg *types.Pkg // package runtime/msan
var unsafepkg *types.Pkg // package unsafe
var trackpkg *types.Pkg // fake package for field tracking
var mappkg *types.Pkg // fake package for map zero value
var gopkg *types.Pkg // pseudo-package for method symbols on anonymous receiver types
var zerosize int64
var simtype [types.NTYPE]types.Kind
var (
isInt [types.NTYPE]bool
isFloat [types.NTYPE]bool
isComplex [types.NTYPE]bool
issimple [types.NTYPE]bool
)
var (
okforeq [types.NTYPE]bool
okforadd [types.NTYPE]bool
okforand [types.NTYPE]bool
okfornone [types.NTYPE]bool
okforbool [types.NTYPE]bool
okforcap [types.NTYPE]bool
okforlen [types.NTYPE]bool
okforarith [types.NTYPE]bool
)
var okforcmp [types.NTYPE]bool
var (
okfor [ir.OEND][]bool
iscmp [ir.OEND]bool
)
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
funcsyms []*types.Sym
)
var dclcontext ir.Class // PEXTERN/PAUTO
var Curfn *ir.Func
var Widthptr int
var Widthreg int
var typecheckok bool
// Whether we are adding any sort of code instrumentation, such as
// when the race detector is enabled.
var instrumenting bool
var nodfp *ir.Name
var autogeneratedPos src.XPos
// interface to back end
type Arch struct {
LinkArch *obj.LinkArch
REGSP int
MAXWIDTH int64
SoftFloat bool
PadFrame func(int64) int64
// ZeroRange zeroes a range of memory on stack. It is only inserted
// at function entry, and it is ok to clobber registers.
ZeroRange func(*Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
Ginsnop func(*Progs) *obj.Prog
Ginsnopdefer func(*Progs) *obj.Prog // special ginsnop for deferreturn
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*SSAGenState, *ssa.Block)
// SSAGenValue emits Prog(s) for the Value.
SSAGenValue func(*SSAGenState, *ssa.Value)
// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
}
var thearch Arch
var (
staticuint64s *ir.Name
zerobase *ir.Name
assertE2I,
assertE2I2,
assertI2I,
assertI2I2,
deferproc,
deferprocStack,
Deferreturn,
Duffcopy,
Duffzero,
gcWriteBarrier,
goschedguarded,
growslice,
msanread,
msanwrite,
msanmove,
newobject,
newproc,
panicdivide,
panicshift,
panicdottypeE,
panicdottypeI,
panicnildottype,
panicoverflow,
raceread,
racereadrange,
racewrite,
racewriterange,
x86HasPOPCNT,
x86HasSSE41,
x86HasFMA,
armHasVFPv4,
arm64HasATOMICS,
typedmemclr,
typedmemmove,
Udiv,
writeBarrier,
zerobaseSym *obj.LSym
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
// Wasm
WasmMove,
WasmZero,
WasmDiv,
WasmTruncS,
WasmTruncU,
SigPanic *obj.LSym
)
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
var GCWriteBarrierReg map[int16]*obj.LSym

View file

@ -1,465 +0,0 @@
// Derived from Inferno utils/6c/txt.c
// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"fmt"
"os"
)
var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
// Progs accumulates Progs for a function and converts them into machine code.
type Progs struct {
Text *obj.Prog // ATEXT Prog for this function
next *obj.Prog // next Prog
pc int64 // virtual PC; count of Progs
pos src.XPos // position to use for new Progs
curfn *ir.Func // fn these Progs are for
progcache []obj.Prog // local progcache
cacheidx int // first free element of progcache
nextLive LivenessIndex // liveness index for the next Prog
prevLive LivenessIndex // last emitted liveness index
}
// newProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
func newProgs(fn *ir.Func, worker int) *Progs {
pp := new(Progs)
if base.Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / base.Flag.LowerC
pp.progcache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.curfn = fn
// prime the pump
pp.next = pp.NewProg()
pp.clearp(pp.next)
pp.pos = fn.Pos()
pp.settext(fn)
// PCDATA tables implicitly start with index -1.
pp.prevLive = LivenessIndex{-1, false}
pp.nextLive = pp.prevLive
return pp
}
func (pp *Progs) NewProg() *obj.Prog {
var p *obj.Prog
if pp.cacheidx < len(pp.progcache) {
p = &pp.progcache[pp.cacheidx]
pp.cacheidx++
} else {
p = new(obj.Prog)
}
p.Ctxt = base.Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.curfn}
obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
if base.Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.progcache[:pp.cacheidx]
for i := range s {
s[i] = obj.Prog{}
}
}
// Clear pp to avoid abuse.
*pp = Progs{}
}
// Prog adds a Prog with instruction As to pp.
func (pp *Progs) Prog(as obj.As) *obj.Prog {
if pp.nextLive.StackMapValid() && pp.nextLive.stackMapIndex != pp.prevLive.stackMapIndex {
// Emit stack map index change.
idx := pp.nextLive.stackMapIndex
pp.prevLive.stackMapIndex = idx
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
Addrconst(&p.To, int64(idx))
}
if pp.nextLive.isUnsafePoint != pp.prevLive.isUnsafePoint {
// Emit unsafe-point marker.
pp.prevLive.isUnsafePoint = pp.nextLive.isUnsafePoint
p := pp.Prog(obj.APCDATA)
Addrconst(&p.From, objabi.PCDATA_UnsafePoint)
if pp.nextLive.isUnsafePoint {
Addrconst(&p.To, objabi.PCDATA_UnsafePointUnsafe)
} else {
Addrconst(&p.To, objabi.PCDATA_UnsafePointSafe)
}
}
p := pp.next
pp.next = pp.NewProg()
pp.clearp(pp.next)
p.Link = pp.next
if !pp.pos.IsKnown() && base.Flag.K != 0 {
base.Warn("prog: unknown position (line 0)")
}
p.As = as
p.Pos = pp.pos
if pp.pos.IsStmt() == src.PosIsStmt {
// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
if ssa.LosesStmtMark(as) {
return p
}
pp.pos = pp.pos.WithNotStmt()
}
return p
}
func (pp *Progs) clearp(p *obj.Prog) {
obj.Nopout(p)
p.As = obj.AEND
p.Pc = pp.pc
pp.pc++
}
func (pp *Progs) Appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
q := pp.NewProg()
pp.clearp(q)
q.As = as
q.Pos = p.Pos
q.From.Type = ftype
q.From.Reg = freg
q.From.Offset = foffset
q.To.Type = ttype
q.To.Reg = treg
q.To.Offset = toffset
q.Link = p.Link
p.Link = q
return q
}
func (pp *Progs) settext(fn *ir.Func) {
if pp.Text != nil {
base.Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
fn.LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.LSym
}
// makeABIWrapper creates a new function that wraps a cross-ABI call
// to "f". The wrapper is marked as an ABIWRAPPER.
func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
// Q: is this needed?
savepos := base.Pos
savedclcontext := dclcontext
savedcurfn := Curfn
base.Pos = autogeneratedPos
dclcontext = ir.PEXTERN
// At the moment we don't support wrapping a method, we'd need machinery
// below to handle the receiver. Panic if we see this scenario.
ft := f.Nname.Ntype.Type()
if ft.NumRecvs() != 0 {
panic("makeABIWrapper support for wrapping methods not implemented")
}
// Manufacture a new func type to use for the wrapper.
var noReceiver *ir.Field
tfn := ir.NewFuncType(base.Pos,
noReceiver,
structargs(ft.Params(), true),
structargs(ft.Results(), false))
// Reuse f's types.Sym to create a new ODCLFUNC/function.
fn := dclfunc(f.Nname.Sym(), tfn)
fn.SetDupok(true)
fn.SetWrapper(true) // ignore frame for panic+recover matching
// Select LSYM now.
asym := base.Ctxt.LookupABI(f.LSym.Name, wrapperABI)
asym.Type = objabi.STEXT
if fn.LSym != nil {
panic("unexpected")
}
fn.LSym = asym
// ABI0-to-ABIInternal wrappers will be mainly loading params from
// stack into registers (and/or storing stack locations back to
// registers after the wrapped call); in most cases they won't
// need to allocate stack space, so it should be OK to mark them
// as NOSPLIT in these cases. In addition, my assumption is that
// functions written in assembly are NOSPLIT in most (but not all)
// cases. In the case of an ABIInternal target that has too many
// parameters to fit into registers, the wrapper would need to
// allocate stack space, but this seems like an unlikely scenario.
// Hence: mark these wrappers NOSPLIT.
//
// ABIInternal-to-ABI0 wrappers on the other hand will be taking
// things in registers and pushing them onto the stack prior to
// the ABI0 call, meaning that they will always need to allocate
// stack space. If the compiler marks them as NOSPLIT this seems
// as though it could lead to situations where the the linker's
// nosplit-overflow analysis would trigger a link failure. On the
// other hand if they not tagged NOSPLIT then this could cause
// problems when building the runtime (since there may be calls to
// asm routine in cases where it's not safe to grow the stack). In
// most cases the wrapper would be (in effect) inlined, but are
// there (perhaps) indirect calls from the runtime that could run
// into trouble here.
// FIXME: at the moment all.bash does not pass when I leave out
// NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT.
setupTextLSym(fn, obj.NOSPLIT|obj.ABIWRAPPER)
// Generate call. Use tail call if no params and no returns,
// but a regular call otherwise.
//
// Note: ideally we would be using a tail call in cases where
// there are params but no returns for ABI0->ABIInternal wrappers,
// provided that all params fit into registers (e.g. we don't have
// to allocate any stack space). Doing this will require some
// extra work in typecheck/walk/ssa, might want to add a new node
// OTAILCALL or something to this effect.
var tail ir.Node
if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 {
tail = nodSym(ir.ORETJMP, nil, f.Nname.Sym())
} else {
call := ir.Nod(ir.OCALL, f.Nname, nil)
call.PtrList().Set(paramNnames(tfn.Type()))
call.SetIsDDD(tfn.Type().IsVariadic())
tail = call
if tfn.Type().NumResults() > 0 {
n := ir.Nod(ir.ORETURN, nil, nil)
n.PtrList().Set1(call)
tail = n
}
}
fn.PtrBody().Append(tail)
funcbody()
if base.Debug.DclStack != 0 {
testdclstack()
}
typecheckFunc(fn)
Curfn = fn
typecheckslice(fn.Body().Slice(), ctxStmt)
escapeFuncs([]*ir.Func{fn}, false)
Target.Decls = append(Target.Decls, fn)
// Restore previous context.
base.Pos = savepos
dclcontext = savedclcontext
Curfn = savedcurfn
}
// initLSym defines f's obj.LSym and initializes it based on the
// properties of f. This includes setting the symbol flags and ABI and
// creating and initializing related DWARF symbols.
//
// initLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
// For body-less functions, we only create the LSym; for functions
// with bodies call a helper to setup up / populate the LSym.
func initLSym(f *ir.Func, hasBody bool) {
// FIXME: for new-style ABI wrappers, we set up the lsym at the
// point the wrapper is created.
if f.LSym != nil && base.Flag.ABIWrap {
return
}
selectLSym(f, hasBody)
if hasBody {
setupTextLSym(f, 0)
}
}
// selectLSym sets up the LSym for a given function, and
// makes calls to helpers to create ABI wrappers if needed.
func selectLSym(f *ir.Func, hasBody bool) {
if f.LSym != nil {
base.Fatalf("Func.initLSym called twice")
}
if nam := f.Nname; !ir.IsBlank(nam) {
var wrapperABI obj.ABI
needABIWrapper := false
defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
f.LSym = nam.Sym().LinksymABI0()
needABIWrapper, wrapperABI = true, obj.ABIInternal
} else {
f.LSym = nam.Sym().Linksym()
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
if f.LSym.ABI() != want {
base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
}
}
if f.Pragma&ir.Systemstack != 0 {
f.LSym.Set(obj.AttrCFunc, true)
}
isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
// given a linkname, indicating that it may be
// referenced from another package. Create an
// ABI0 -> Internal wrapper so it can be
// called as ABI0. In case 2, it's important
// that we know it's defined in this package
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
if f.LSym.ABI() != obj.ABI0 {
needABIWrapper, wrapperABI = true, obj.ABI0
}
}
if needABIWrapper {
if !useABIWrapGen(f) {
// Fallback: use alias instead. FIXME.
// These LSyms have the same name as the
// native function, so we create them directly
// rather than looking them up. The uniqueness
// of f.lsym ensures uniqueness of asym.
asym := &obj.LSym{
Name: f.LSym.Name,
Type: objabi.SABIALIAS,
R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
}
asym.SetABI(wrapperABI)
asym.Set(obj.AttrDuplicateOK, true)
base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
} else {
if base.Debug.ABIWrap != 0 {
fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n",
wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name)
}
makeABIWrapper(f, wrapperABI)
}
}
}
}
// setupTextLsym initializes the LSym for a with-body text symbol.
func setupTextLSym(f *ir.Func, flag int) {
if f.Dupok() {
flag |= obj.DUPOK
}
if f.Wrapper() {
flag |= obj.WRAPPER
}
if f.Needctxt() {
flag |= obj.NEEDCTXT
}
if f.Pragma&ir.Nosplit != 0 {
flag |= obj.NOSPLIT
}
if f.ReflectMethod() {
flag |= obj.REFLECTMETHOD
}
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
if base.Ctxt.Pkgpath == "reflect" {
switch f.Sym().Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
}
base.Ctxt.InitTextSym(f.LSym, flag)
}
func ggloblnod(nam ir.Node) {
s := nam.Sym().Linksym()
s.Gotype = ngotype(nam).Linksym()
flags := 0
if nam.Name().Readonly() {
flags = obj.RODATA
}
if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
base.Ctxt.Globl(s, nam.Type().Width, flags)
if nam.Name().LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Pkg = "_"
}
}
func ggloblsym(s *obj.LSym, width int32, flags int16) {
if flags&obj.LOCAL != 0 {
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
base.Ctxt.Globl(s, int64(width), int(flags))
}
func Addrconst(a *obj.Addr, v int64) {
a.SetConst(v)
}
func Patch(p *obj.Prog, to *obj.Prog) {
p.To.SetTarget(to)
}

File diff suppressed because it is too large Load diff

View file

@ -7,29 +7,19 @@ package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/archive"
"cmd/internal/bio"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"crypto/sha256"
"encoding/json"
"fmt"
"go/constant"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
)
// architecture-independent object file output
const ArhdrSize = 60
func formathdr(arhdr []byte, name string, size int64) {
copy(arhdr[:], fmt.Sprintf("%-16s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size))
}
// These modes say which kind of object file to generate.
// The default use of the toolchain is to set both bits,
// generating a combined compiler+linker object, one that
@ -91,7 +81,7 @@ func printObjHeader(bout *bio.Writer) {
}
func startArchiveEntry(bout *bio.Writer) int64 {
var arhdr [ArhdrSize]byte
var arhdr [archive.HeaderSize]byte
bout.Write(arhdr[:])
return bout.Offset()
}
@ -102,10 +92,10 @@ func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
if size&1 != 0 {
bout.WriteByte(0)
}
bout.MustSeek(start-ArhdrSize, 0)
bout.MustSeek(start-archive.HeaderSize, 0)
var arhdr [ArhdrSize]byte
formathdr(arhdr[:], name, size)
var arhdr [archive.HeaderSize]byte
archive.FormatHeader(arhdr[:], name, size)
bout.Write(arhdr[:])
bout.Flush()
bout.MustSeek(start+size+(size&1), 0)
@ -117,19 +107,19 @@ func dumpCompilerObj(bout *bio.Writer) {
}
func dumpdata() {
numExterns := len(Target.Externs)
numDecls := len(Target.Decls)
numExterns := len(typecheck.Target.Externs)
numDecls := len(typecheck.Target.Decls)
dumpglobls(Target.Externs)
dumpfuncsyms()
addptabs()
numExports := len(Target.Exports)
addsignats(Target.Externs)
dumpsignats()
dumptabs()
numPTabs, numITabs := CountTabs()
dumpimportstrings()
dumpbasictypes()
dumpglobls(typecheck.Target.Externs)
staticdata.WriteFuncSyms()
reflectdata.CollectPTabs()
numExports := len(typecheck.Target.Exports)
addsignats(typecheck.Target.Externs)
reflectdata.WriteRuntimeTypes()
reflectdata.WriteTabs()
numPTabs, numITabs := reflectdata.CountTabs()
reflectdata.WriteImportStrings()
reflectdata.WriteBasicTypes()
dumpembeds()
// Calls to dumpsignats can generate functions,
@ -140,34 +130,34 @@ func dumpdata() {
// In the typical case, we loop 0 or 1 times.
// It was not until issue 24761 that we found any code that required a loop at all.
for {
for i := numDecls; i < len(Target.Decls); i++ {
n := Target.Decls[i]
for i := numDecls; i < len(typecheck.Target.Decls); i++ {
n := typecheck.Target.Decls[i]
if n.Op() == ir.ODCLFUNC {
funccompile(n.(*ir.Func))
}
}
numDecls = len(Target.Decls)
numDecls = len(typecheck.Target.Decls)
compileFunctions()
dumpsignats()
if numDecls == len(Target.Decls) {
reflectdata.WriteRuntimeTypes()
if numDecls == len(typecheck.Target.Decls) {
break
}
}
// Dump extra globals.
dumpglobls(Target.Externs[numExterns:])
dumpglobls(typecheck.Target.Externs[numExterns:])
if zerosize > 0 {
zero := mappkg.Lookup("zero")
ggloblsym(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
if reflectdata.ZeroSize > 0 {
zero := ir.Pkgs.Map.Lookup("zero")
objw.Global(zero.Linksym(), int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
}
addGCLocals()
if numExports != len(Target.Exports) {
if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
newNumPTabs, newNumITabs := CountTabs()
newNumPTabs, newNumITabs := reflectdata.CountTabs()
if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop")
}
@ -179,11 +169,11 @@ func dumpdata() {
func dumpLinkerObj(bout *bio.Writer) {
printObjHeader(bout)
if len(Target.CgoPragmas) != 0 {
if len(typecheck.Target.CgoPragmas) != 0 {
// write empty export section; must be before cgo section
fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
fmt.Fprintf(bout, "\n$$ // cgo\n")
if err := json.NewEncoder(bout).Encode(Target.CgoPragmas); err != nil {
if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil {
base.Fatalf("serializing pragcgobuf: %v", err)
}
fmt.Fprintf(bout, "\n$$\n\n")
@ -194,47 +184,17 @@ func dumpLinkerObj(bout *bio.Writer) {
obj.WriteObjFile(base.Ctxt, bout)
}
func addptabs() {
if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
return
}
for _, exportn := range Target.Exports {
s := exportn.Sym()
nn := ir.AsNode(s.Def)
if nn == nil {
continue
}
if nn.Op() != ir.ONAME {
continue
}
n := nn.(*ir.Name)
if !types.IsExported(s.Name) {
continue
}
if s.Pkg.Name != "main" {
continue
}
if n.Type().Kind() == types.TFUNC && n.Class() == ir.PFUNC {
// function
ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()})
} else {
// variable
ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())})
}
}
}
func dumpGlobal(n *ir.Name) {
if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
}
if n.Class() == ir.PFUNC {
if n.Class_ == ir.PFUNC {
return
}
if n.Sym().Pkg != types.LocalPkg {
return
}
dowidth(n.Type())
types.CalcSize(n.Type())
ggloblnod(n)
}
@ -255,11 +215,11 @@ func dumpGlobalConst(n ir.Node) {
if t.IsUntyped() {
// Export untyped integers as int (if they fit).
t = types.Types[types.TINT]
if doesoverflow(v, t) {
if ir.ConstOverflow(v, t) {
return
}
}
base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, typesymname(t), ir.IntVal(t, v))
base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
}
func dumpglobls(externs []ir.Node) {
@ -274,17 +234,6 @@ func dumpglobls(externs []ir.Node) {
}
}
func dumpfuncsyms() {
sort.Slice(funcsyms, func(i, j int) bool {
return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
})
for _, s := range funcsyms {
sf := s.Pkg.Lookup(funcsymname(s)).Linksym()
dsymptr(sf, 0, s.Linksym(), 0)
ggloblsym(sf, int32(Widthptr), obj.DUPOK|obj.RODATA)
}
}
// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
//
// This is done during the sequential phase after compilation, since
@ -297,328 +246,53 @@ func addGCLocals() {
}
for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
if gcsym != nil && !gcsym.OnList() {
ggloblsym(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
}
}
if x := fn.StackObjects; x != nil {
attr := int16(obj.RODATA)
ggloblsym(x, int32(len(x.P)), attr)
objw.Global(x, int32(len(x.P)), attr)
x.Set(obj.AttrStatic, true)
}
if x := fn.OpenCodedDeferInfo; x != nil {
ggloblsym(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
}
}
}
func duintxx(s *obj.LSym, off int, v uint64, wid int) int {
if off&(wid-1) != 0 {
base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
func ggloblnod(nam ir.Node) {
s := nam.Sym().Linksym()
s.Gotype = reflectdata.TypeSym(nam.Type()).Linksym()
flags := 0
if nam.Name().Readonly() {
flags = obj.RODATA
}
if nam.Type() != nil && !nam.Type().HasPointers() {
flags |= obj.NOPTR
}
base.Ctxt.Globl(s, nam.Type().Width, flags)
if nam.Name().LibfuzzerExtraCounter() {
s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
}
if nam.Sym().Linkname != "" {
// Make sure linkname'd symbol is non-package. When a symbol is
// both imported and linkname'd, s.Pkg may not set to "_" in
// types.Sym.Linksym because LSym already exists. Set it here.
s.Pkg = "_"
}
s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
return off + wid
}
func duint8(s *obj.LSym, off int, v uint8) int {
return duintxx(s, off, uint64(v), 1)
func dumpembeds() {
for _, v := range typecheck.Target.Embeds {
staticdata.WriteEmbed(v)
}
}
func duint16(s *obj.LSym, off int, v uint16) int {
return duintxx(s, off, uint64(v), 2)
}
func duint32(s *obj.LSym, off int, v uint32) int {
return duintxx(s, off, uint64(v), 4)
}
func duintptr(s *obj.LSym, off int, v uint64) int {
return duintxx(s, off, v, Widthptr)
}
func dbvec(s *obj.LSym, off int, bv bvec) int {
// Runtime reads the bitmaps as byte arrays. Oblige.
for j := 0; int32(j) < bv.n; j += 8 {
word := bv.b[j/32]
off = duint8(s, off, uint8(word>>(uint(j)%32)))
}
return off
}
const (
stringSymPrefix = "go.string."
stringSymPattern = ".gostring.%d.%x"
)
// stringsym returns a symbol containing the string s.
// The symbol contains the string data, not a string header.
func stringsym(pos src.XPos, s string) (data *obj.LSym) {
var symname string
if len(s) > 100 {
// Huge strings are hashed to avoid long names in object files.
// Indulge in some paranoia by writing the length of s, too,
// as protection against length extension attacks.
// Same pattern is known to fileStringSym below.
h := sha256.New()
io.WriteString(h, s)
symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
} else {
// Small strings get named directly by their contents.
symname = strconv.Quote(s)
}
symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
off := dstringdata(symdata, 0, s, pos, "string")
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
return symdata
}
// fileStringSym returns a symbol for the contents and the size of file.
// If readonly is true, the symbol shares storage with any literal string
// or other file with the same content and is placed in a read-only section.
// If readonly is false, the symbol is a read-write copy separate from any other,
// for use as the backing store of a []byte.
// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
// The returned symbol contains the data itself, not a string header.
func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
f, err := os.Open(file)
if err != nil {
return nil, 0, err
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return nil, 0, err
}
if !info.Mode().IsRegular() {
return nil, 0, fmt.Errorf("not a regular file")
}
size := info.Size()
if size <= 1*1024 {
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, 0, err
}
if int64(len(data)) != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
var sym *obj.LSym
if readonly {
sym = stringsym(pos, string(data))
} else {
sym = slicedata(pos, string(data)).Sym().Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
copy(hash, sum[:])
}
return sym, size, nil
}
if size > 2e9 {
// ggloblsym takes an int32,
// and probably the rest of the toolchain
// can't handle such big symbols either.
// See golang.org/issue/9862.
return nil, 0, fmt.Errorf("file too large")
}
// File is too big to read and keep in memory.
// Compute hash if needed for read-only content hashing or if the caller wants it.
var sum []byte
if readonly || len(hash) > 0 {
h := sha256.New()
n, err := io.Copy(h, f)
if err != nil {
return nil, 0, err
}
if n != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
sum = h.Sum(nil)
copy(hash, sum)
}
var symdata *obj.LSym
if readonly {
symname := fmt.Sprintf(stringSymPattern, size, sum)
symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
ggloblsym(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
// Note: AttrContentAddressable cannot be set here,
// because the content-addressable-handling code
// does not know about file symbols.
}
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
symdata = slicedata(pos, "").Sym().Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
}
return symdata, size, nil
}
var slicedataGen int
func slicedata(pos src.XPos, s string) *ir.Name {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := types.LocalPkg.Lookup(symname)
symnode := NewName(sym)
sym.Def = symnode
lsym := sym.Linksym()
off := dstringdata(lsym, 0, s, pos, "slice")
ggloblsym(lsym, int32(off), obj.NOPTR|obj.LOCAL)
return symnode
}
func slicebytes(nam *ir.Name, off int64, s string) {
if nam.Op() != ir.ONAME {
base.Fatalf("slicebytes %v", nam)
}
slicesym(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// Objects that are too large will cause the data section to overflow right away,
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.
if int64(len(t)) > 2e9 {
base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
return 0
}
s.WriteString(base.Ctxt, int64(off), len(t), t)
return off + len(t)
}
func dsymptr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
off = int(Rnd(int64(off), int64(Widthptr)))
s.WriteAddr(base.Ctxt, int64(off), Widthptr, x, int64(xoff))
off += Widthptr
return off
}
func dsymptrOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func dsymptrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
// slicesym writes a static slice symbol {&arr, lencap, lencap} to n+noff.
// slicesym does not modify n.
func slicesym(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
s := n.Sym().Linksym()
if arr.Op() != ir.ONAME {
base.Fatalf("slicesym non-name arr %v", arr)
}
s.WriteAddr(base.Ctxt, noff, Widthptr, arr.Sym().Linksym(), 0)
s.WriteInt(base.Ctxt, noff+sliceLenOffset, Widthptr, lencap)
s.WriteInt(base.Ctxt, noff+sliceCapOffset, Widthptr, lencap)
}
// addrsym writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
func addrsym(n *ir.Name, noff int64, a *ir.Name, aoff int64) {
if n.Op() != ir.ONAME {
base.Fatalf("addrsym n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("addrsym nil n sym")
}
if a.Op() != ir.ONAME {
base.Fatalf("addrsym a op %v", a.Op())
}
s := n.Sym().Linksym()
s.WriteAddr(base.Ctxt, noff, Widthptr, a.Sym().Linksym(), aoff)
}
// pfuncsym writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
func pfuncsym(n *ir.Name, noff int64, f *ir.Name) {
if n.Op() != ir.ONAME {
base.Fatalf("pfuncsym n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("pfuncsym nil n sym")
}
if f.Class() != ir.PFUNC {
base.Fatalf("pfuncsym class not PFUNC %d", f.Class())
}
s := n.Sym().Linksym()
s.WriteAddr(base.Ctxt, noff, Widthptr, funcsym(f.Sym()).Linksym(), 0)
}
// litsym writes the static literal c to n.
// Neither n nor c is modified.
func litsym(n *ir.Name, noff int64, c ir.Node, wid int) {
if n.Op() != ir.ONAME {
base.Fatalf("litsym n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("litsym nil n sym")
}
if c.Op() == ir.ONIL {
return
}
if c.Op() != ir.OLITERAL {
base.Fatalf("litsym c op %v", c.Op())
}
s := n.Sym().Linksym()
switch u := c.Val(); u.Kind() {
case constant.Bool:
i := int64(obj.Bool2int(constant.BoolVal(u)))
s.WriteInt(base.Ctxt, noff, wid, i)
case constant.Int:
s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
case constant.Float:
f, _ := constant.Float64Val(u)
switch c.Type().Kind() {
case types.TFLOAT32:
s.WriteFloat32(base.Ctxt, noff, float32(f))
case types.TFLOAT64:
s.WriteFloat64(base.Ctxt, noff, f)
}
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
switch c.Type().Kind() {
case types.TCOMPLEX64:
s.WriteFloat32(base.Ctxt, noff, float32(re))
s.WriteFloat32(base.Ctxt, noff+4, float32(im))
case types.TCOMPLEX128:
s.WriteFloat64(base.Ctxt, noff, re)
s.WriteFloat64(base.Ctxt, noff+8, im)
}
case constant.String:
i := constant.StringVal(u)
symdata := stringsym(n.Pos(), i)
s.WriteAddr(base.Ctxt, noff, Widthptr, symdata, 0)
s.WriteInt(base.Ctxt, noff+int64(Widthptr), Widthptr, int64(len(i)))
default:
base.Fatalf("litsym unhandled OLITERAL %v", c)
func addsignats(dcls []ir.Node) {
// copy types from dcl list to signatset
for _, n := range dcls {
if n.Op() == ir.OTYPE {
reflectdata.NeedRuntimeType(n.Type())
}
}
}

View file

@ -1,95 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"cmd/internal/sys"
)
// The racewalk pass is currently handled in three parts.
//
// First, for flag_race, it inserts calls to racefuncenter and
// racefuncexit at the start and end (respectively) of each
// function. This is handled below.
//
// Second, during buildssa, it inserts appropriate instrumentation
// calls immediately before each memory load or store. This is handled
// by the (*state).instrument method in ssa.go, so here we just set
// the Func.InstrumentBody flag as needed. For background on why this
// is done during SSA construction rather than a separate SSA pass,
// see issue #19054.
//
// Third we remove calls to racefuncenter and racefuncexit, for leaf
// functions without instrumented operations. This is done as part of
// ssa opt pass via special rule.
// TODO(dvyukov): do not instrument initialization as writes:
// a := make([]int, 10)
// Do not instrument the following packages at all,
// at best instrumentation would cause infinite recursion.
var omit_pkgs = []string{
"runtime/internal/atomic",
"runtime/internal/sys",
"runtime/internal/math",
"runtime",
"runtime/race",
"runtime/msan",
"internal/cpu",
}
// Don't insert racefuncenterfp/racefuncexit into the following packages.
// Memory accesses in the packages are either uninteresting or will cause false positives.
var norace_inst_pkgs = []string{"sync", "sync/atomic"}
func ispkgin(pkgs []string) bool {
if base.Ctxt.Pkgpath != "" {
for _, p := range pkgs {
if base.Ctxt.Pkgpath == p {
return true
}
}
}
return false
}
func instrument(fn *ir.Func) {
if fn.Pragma&ir.Norace != 0 || (fn.Sym().Linksym() != nil && fn.Sym().Linksym().ABIWrapper()) {
return
}
if !base.Flag.Race || !ispkgin(norace_inst_pkgs) {
fn.SetInstrumentBody(true)
}
if base.Flag.Race {
lno := base.Pos
base.Pos = src.NoXPos
if thearch.LinkArch.Arch.Family != sys.AMD64 {
fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
fn.Exit.Append(mkcall("racefuncexit", nil, nil))
} else {
// nodpc is the PC of the caller as extracted by
// getcallerpc. We use -widthptr(FP) for x86.
// This only works for amd64. This will not
// work on arm or others that might support
// race in the future.
nodpc := nodfp.CloneName()
nodpc.SetType(types.Types[types.TUINTPTR])
nodpc.SetFrameOffset(int64(-Widthptr))
fn.Dcl = append(fn.Dcl, nodpc)
fn.Enter.Prepend(mkcall("racefuncenter", nil, nil, nodpc))
fn.Exit.Append(mkcall("racefuncexit", nil, nil))
}
base.Pos = lno
}
}

View file

@ -1,617 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/sys"
"unicode/utf8"
)
// range
func typecheckrange(n *ir.RangeStmt) {
// Typechecking order is important here:
// 0. first typecheck range expression (slice/map/chan),
// it is evaluated only once and so logically it is not part of the loop.
// 1. typecheck produced values,
// this part can declare new vars and so it must be typechecked before body,
// because body can contain a closure that captures the vars.
// 2. decldepth++ to denote loop body.
// 3. typecheck body.
// 4. decldepth--.
typecheckrangeExpr(n)
// second half of dance, the first half being typecheckrangeExpr
n.SetTypecheck(1)
ls := n.List().Slice()
for i1, n1 := range ls {
if n1.Typecheck() == 0 {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
decldepth++
typecheckslice(n.Body().Slice(), ctxStmt)
decldepth--
}
func typecheckrangeExpr(n *ir.RangeStmt) {
n.SetRight(typecheck(n.Right(), ctxExpr))
t := n.Right().Type()
if t == nil {
return
}
// delicate little dance. see typecheckas2
ls := n.List().Slice()
for i1, n1 := range ls {
if !ir.DeclaredBy(n1, n) {
ls[i1] = typecheck(ls[i1], ctxExpr|ctxAssign)
}
}
if t.IsPtr() && t.Elem().IsArray() {
t = t.Elem()
}
n.SetType(t)
var t1, t2 *types.Type
toomany := false
switch t.Kind() {
default:
base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right())
return
case types.TARRAY, types.TSLICE:
t1 = types.Types[types.TINT]
t2 = t.Elem()
case types.TMAP:
t1 = t.Key()
t2 = t.Elem()
case types.TCHAN:
if !t.ChanDir().CanRecv() {
base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.Right(), n.Right().Type())
return
}
t1 = t.Elem()
t2 = nil
if n.List().Len() == 2 {
toomany = true
}
case types.TSTRING:
t1 = types.Types[types.TINT]
t2 = types.RuneType
}
if n.List().Len() > 2 || toomany {
base.ErrorfAt(n.Pos(), "too many variables in range")
}
var v1, v2 ir.Node
if n.List().Len() != 0 {
v1 = n.List().First()
}
if n.List().Len() > 1 {
v2 = n.List().Second()
}
// this is not only an optimization but also a requirement in the spec.
// "if the second iteration variable is the blank identifier, the range
// clause is equivalent to the same clause with only the first variable
// present."
if ir.IsBlank(v2) {
if v1 != nil {
n.PtrList().Set1(v1)
}
v2 = nil
}
if v1 != nil {
if ir.DeclaredBy(v1, n) {
v1.SetType(t1)
} else if v1.Type() != nil {
if op, why := assignop(t1, v1.Type()); op == ir.OXXX {
base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t1, v1, why)
}
}
checkassign(n, v1)
}
if v2 != nil {
if ir.DeclaredBy(v2, n) {
v2.SetType(t2)
} else if v2.Type() != nil {
if op, why := assignop(t2, v2.Type()); op == ir.OXXX {
base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t2, v2, why)
}
}
checkassign(n, v2)
}
}
func cheapComputableIndex(width int64) bool {
switch thearch.LinkArch.Family {
// MIPS does not have R+R addressing
// Arm64 may lack ability to generate this code in our assembler,
// but the architecture supports it.
case sys.PPC64, sys.S390X:
return width == 1
case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
switch width {
case 1, 2, 4, 8:
return true
}
}
return false
}
// walkrange transforms various forms of ORANGE into
// simpler forms. The result must be assigned back to n.
// Node n may also be modified in place, and may also be
// the returned node.
func walkrange(nrange *ir.RangeStmt) ir.Node {
if isMapClear(nrange) {
m := nrange.Right()
lno := setlineno(m)
n := mapClear(m)
base.Pos = lno
return n
}
nfor := ir.NodAt(nrange.Pos(), ir.OFOR, nil, nil)
nfor.SetInit(nrange.Init())
nfor.SetSym(nrange.Sym())
// variable name conventions:
// ohv1, hv1, hv2: hidden (old) val 1, 2
// ha, hit: hidden aggregate, iterator
// hn, hp: hidden len, pointer
// hb: hidden bool
// a, v1, v2: not hidden aggregate, val 1, 2
t := nrange.Type()
a := nrange.Right()
lno := setlineno(a)
var v1, v2 ir.Node
l := nrange.List().Len()
if l > 0 {
v1 = nrange.List().First()
}
if l > 1 {
v2 = nrange.List().Second()
}
if ir.IsBlank(v2) {
v2 = nil
}
if ir.IsBlank(v1) && v2 == nil {
v1 = nil
}
if v1 == nil && v2 != nil {
base.Fatalf("walkrange: v2 != nil while v1 == nil")
}
var ifGuard *ir.IfStmt
var body []ir.Node
var init []ir.Node
switch t.Kind() {
default:
base.Fatalf("walkrange")
case types.TARRAY, types.TSLICE:
if nn := arrayClear(nrange, v1, v2, a); nn != nil {
base.Pos = lno
return nn
}
// order.stmt arranged for a copy of the array/slice variable if needed.
ha := a
hv1 := temp(types.Types[types.TINT])
hn := temp(types.Types[types.TINT])
init = append(init, ir.Nod(ir.OAS, hv1, nil))
init = append(init, ir.Nod(ir.OAS, hn, ir.Nod(ir.OLEN, ha, nil)))
nfor.SetLeft(ir.Nod(ir.OLT, hv1, hn))
nfor.SetRight(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// for range ha { body }
if v1 == nil {
break
}
// for v1 := range ha { body }
if v2 == nil {
body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
break
}
// for v1, v2 := range ha { body }
if cheapComputableIndex(nrange.Type().Elem().Width) {
// v1, v2 = hv1, ha[hv1]
tmp := ir.Nod(ir.OINDEX, ha, hv1)
tmp.SetBounded(true)
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(hv1, tmp)
body = []ir.Node{a}
break
}
// TODO(austin): OFORUNTIL is a strange beast, but is
// necessary for expressing the control flow we need
// while also making "break" and "continue" work. It
// would be nice to just lower ORANGE during SSA, but
// racewalk needs to see many of the operations
// involved in ORANGE's implementation. If racewalk
// moves into SSA, consider moving ORANGE into SSA and
// eliminating OFORUNTIL.
// TODO(austin): OFORUNTIL inhibits bounds-check
// elimination on the index variable (see #20711).
// Enhance the prove pass to understand this.
ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
ifGuard.SetLeft(ir.Nod(ir.OLT, hv1, hn))
nfor.SetOp(ir.OFORUNTIL)
hp := temp(types.NewPtr(nrange.Type().Elem()))
tmp := ir.Nod(ir.OINDEX, ha, nodintconst(0))
tmp.SetBounded(true)
init = append(init, ir.Nod(ir.OAS, hp, nodAddr(tmp)))
// Use OAS2 to correctly handle assignments
// of the form "v1, a[v1] := range".
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(hv1, ir.Nod(ir.ODEREF, hp, nil))
body = append(body, a)
// Advance pointer as part of the late increment.
//
// This runs *after* the condition check, so we know
// advancing the pointer is safe and won't go past the
// end of the allocation.
as := ir.Nod(ir.OAS, hp, addptr(hp, t.Elem().Width))
nfor.PtrList().Set1(typecheck(as, ctxStmt))
case types.TMAP:
// order.stmt allocated the iterator for us.
// we only use a once, so no copy needed.
ha := a
hit := nrange.Prealloc
th := hit.Type()
keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter
elemsym := th.Field(1).Sym // ditto
fn := syslook("mapiterinit")
fn = substArgTypes(fn, t.Key(), t.Elem(), th)
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nodAddr(hit)))
nfor.SetLeft(ir.Nod(ir.ONE, nodSym(ir.ODOT, hit, keysym), nodnil()))
fn = syslook("mapiternext")
fn = substArgTypes(fn, th)
nfor.SetRight(mkcall1(fn, nil, nil, nodAddr(hit)))
key := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, keysym), nil)
if v1 == nil {
body = nil
} else if v2 == nil {
body = []ir.Node{ir.Nod(ir.OAS, v1, key)}
} else {
elem := ir.Nod(ir.ODEREF, nodSym(ir.ODOT, hit, elemsym), nil)
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(key, elem)
body = []ir.Node{a}
}
case types.TCHAN:
// order.stmt arranged for a copy of the channel variable.
ha := a
hv1 := temp(t.Elem())
hv1.SetTypecheck(1)
if t.Elem().HasPointers() {
init = append(init, ir.Nod(ir.OAS, hv1, nil))
}
hb := temp(types.Types[types.TBOOL])
nfor.SetLeft(ir.Nod(ir.ONE, hb, nodbool(false)))
a := ir.Nod(ir.OAS2RECV, nil, nil)
a.SetTypecheck(1)
a.PtrList().Set2(hv1, hb)
a.PtrRlist().Set1(ir.Nod(ir.ORECV, ha, nil))
nfor.Left().PtrInit().Set1(a)
if v1 == nil {
body = nil
} else {
body = []ir.Node{ir.Nod(ir.OAS, v1, hv1)}
}
// Zero hv1. This prevents hv1 from being the sole, inaccessible
// reference to an otherwise GC-able value during the next channel receive.
// See issue 15281.
body = append(body, ir.Nod(ir.OAS, hv1, nil))
case types.TSTRING:
// Transform string range statements like "for v1, v2 = range a" into
//
// ha := a
// for hv1 := 0; hv1 < len(ha); {
// hv1t := hv1
// hv2 := rune(ha[hv1])
// if hv2 < utf8.RuneSelf {
// hv1++
// } else {
// hv2, hv1 = decoderune(ha, hv1)
// }
// v1, v2 = hv1t, hv2
// // original body
// }
// order.stmt arranged for a copy of the string variable.
ha := a
hv1 := temp(types.Types[types.TINT])
hv1t := temp(types.Types[types.TINT])
hv2 := temp(types.RuneType)
// hv1 := 0
init = append(init, ir.Nod(ir.OAS, hv1, nil))
// hv1 < len(ha)
nfor.SetLeft(ir.Nod(ir.OLT, hv1, ir.Nod(ir.OLEN, ha, nil)))
if v1 != nil {
// hv1t = hv1
body = append(body, ir.Nod(ir.OAS, hv1t, hv1))
}
// hv2 := rune(ha[hv1])
nind := ir.Nod(ir.OINDEX, ha, hv1)
nind.SetBounded(true)
body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.RuneType)))
// if hv2 < utf8.RuneSelf
nif := ir.Nod(ir.OIF, nil, nil)
nif.SetLeft(ir.Nod(ir.OLT, hv2, nodintconst(utf8.RuneSelf)))
// hv1++
nif.PtrBody().Set1(ir.Nod(ir.OAS, hv1, ir.Nod(ir.OADD, hv1, nodintconst(1))))
// } else {
eif := ir.Nod(ir.OAS2, nil, nil)
nif.PtrRlist().Set1(eif)
// hv2, hv1 = decoderune(ha, hv1)
eif.PtrList().Set2(hv2, hv1)
fn := syslook("decoderune")
eif.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, ha, hv1))
body = append(body, nif)
if v1 != nil {
if v2 != nil {
// v1, v2 = hv1t, hv2
a := ir.Nod(ir.OAS2, nil, nil)
a.PtrList().Set2(v1, v2)
a.PtrRlist().Set2(hv1t, hv2)
body = append(body, a)
} else {
// v1 = hv1t
body = append(body, ir.Nod(ir.OAS, v1, hv1t))
}
}
}
typecheckslice(init, ctxStmt)
if ifGuard != nil {
ifGuard.PtrInit().Append(init...)
ifGuard = typecheck(ifGuard, ctxStmt).(*ir.IfStmt)
} else {
nfor.PtrInit().Append(init...)
}
typecheckslice(nfor.Left().Init().Slice(), ctxStmt)
nfor.SetLeft(typecheck(nfor.Left(), ctxExpr))
nfor.SetLeft(defaultlit(nfor.Left(), nil))
nfor.SetRight(typecheck(nfor.Right(), ctxStmt))
typecheckslice(body, ctxStmt)
nfor.PtrBody().Append(body...)
nfor.PtrBody().Append(nrange.Body().Slice()...)
var n ir.Node = nfor
if ifGuard != nil {
ifGuard.PtrBody().Set1(n)
n = ifGuard
}
n = walkstmt(n)
base.Pos = lno
return n
}
// isMapClear checks if n is of the form:
//
// for k := range m {
// delete(m, k)
// }
//
// where == for keys of map m is reflexive.
func isMapClear(n *ir.RangeStmt) bool {
if base.Flag.N != 0 || instrumenting {
return false
}
if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.List().Len() != 1 {
return false
}
k := n.List().First()
if k == nil || ir.IsBlank(k) {
return false
}
// Require k to be a new variable name.
if !ir.DeclaredBy(k, n) {
return false
}
if n.Body().Len() != 1 {
return false
}
stmt := n.Body().First() // only stmt in body
if stmt == nil || stmt.Op() != ir.ODELETE {
return false
}
m := n.Right()
if delete := stmt.(*ir.CallExpr); !samesafeexpr(delete.List().First(), m) || !samesafeexpr(delete.List().Second(), k) {
return false
}
// Keys where equality is not reflexive can not be deleted from maps.
if !isreflexive(m.Type().Key()) {
return false
}
return true
}
// mapClear constructs a call to runtime.mapclear for the map m.
func mapClear(m ir.Node) ir.Node {
t := m.Type()
// instantiate mapclear(typ *type, hmap map[any]any)
fn := syslook("mapclear")
fn = substArgTypes(fn, t.Key(), t.Elem())
n := mkcall1(fn, nil, nil, typename(t), m)
return walkstmt(typecheck(n, ctxStmt))
}
// Lower n into runtime·memclr if possible, for
// fast zeroing of slices and arrays (issue 5373).
// Look for instances of
//
// for i := range a {
// a[i] = zero
// }
//
// in which the evaluation of a is side-effect-free.
//
// Parameters are as in walkrange: "for v1, v2 = range a".
func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
if base.Flag.N != 0 || instrumenting {
return nil
}
if v1 == nil || v2 != nil {
return nil
}
if loop.Body().Len() != 1 || loop.Body().First() == nil {
return nil
}
stmt1 := loop.Body().First() // only stmt in body
if stmt1.Op() != ir.OAS {
return nil
}
stmt := stmt1.(*ir.AssignStmt)
if stmt.Left().Op() != ir.OINDEX {
return nil
}
lhs := stmt.Left().(*ir.IndexExpr)
if !samesafeexpr(lhs.Left(), a) || !samesafeexpr(lhs.Right(), v1) {
return nil
}
elemsize := loop.Type().Elem().Width
if elemsize <= 0 || !isZero(stmt.Right()) {
return nil
}
// Convert to
// if len(a) != 0 {
// hp = &a[0]
// hn = len(a)*sizeof(elem(a))
// memclr{NoHeap,Has}Pointers(hp, hn)
// i = len(a) - 1
// }
n := ir.Nod(ir.OIF, nil, nil)
n.PtrBody().Set(nil)
n.SetLeft(ir.Nod(ir.ONE, ir.Nod(ir.OLEN, a, nil), nodintconst(0)))
// hp = &a[0]
hp := temp(types.Types[types.TUNSAFEPTR])
ix := ir.Nod(ir.OINDEX, a, nodintconst(0))
ix.SetBounded(true)
addr := convnop(nodAddr(ix), types.Types[types.TUNSAFEPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hp, addr))
// hn = len(a) * sizeof(elem(a))
hn := temp(types.Types[types.TUINTPTR])
mul := conv(ir.Nod(ir.OMUL, ir.Nod(ir.OLEN, a, nil), nodintconst(elemsize)), types.Types[types.TUINTPTR])
n.PtrBody().Append(ir.Nod(ir.OAS, hn, mul))
var fn ir.Node
if a.Type().Elem().HasPointers() {
// memclrHasPointers(hp, hn)
Curfn.SetWBPos(stmt.Pos())
fn = mkcall("memclrHasPointers", nil, nil, hp, hn)
} else {
// memclrNoHeapPointers(hp, hn)
fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn)
}
n.PtrBody().Append(fn)
// i = len(a) - 1
v1 = ir.Nod(ir.OAS, v1, ir.Nod(ir.OSUB, ir.Nod(ir.OLEN, a, nil), nodintconst(1)))
n.PtrBody().Append(v1)
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(defaultlit(n.Left(), nil))
typecheckslice(n.Body().Slice(), ctxStmt)
return walkstmt(n)
}
// addptr returns (*T)(uintptr(p) + n).
func addptr(p ir.Node, n int64) ir.Node {
t := p.Type()
p = ir.Nod(ir.OCONVNOP, p, nil)
p.SetType(types.Types[types.TUINTPTR])
p = ir.Nod(ir.OADD, p, nodintconst(n))
p = ir.Nod(ir.OCONVNOP, p, nil)
p.SetType(t)
return p
}

View file

@ -1,368 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
)
// select
func typecheckselect(sel *ir.SelectStmt) {
var def ir.Node
lno := setlineno(sel)
typecheckslice(sel.Init().Slice(), ctxStmt)
for _, ncase := range sel.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
if ncase.List().Len() == 0 {
// default
if def != nil {
base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
} else {
def = ncase
}
} else if ncase.List().Len() > 1 {
base.ErrorfAt(ncase.Pos(), "select cases cannot be lists")
} else {
ncase.List().SetFirst(typecheck(ncase.List().First(), ctxStmt))
n := ncase.List().First()
ncase.SetLeft(n)
ncase.PtrList().Set(nil)
oselrecv2 := func(dst, recv ir.Node, colas bool) {
n := ir.NodAt(n.Pos(), ir.OSELRECV2, nil, nil)
n.PtrList().Set2(dst, ir.BlankNode)
n.PtrRlist().Set1(recv)
n.SetColas(colas)
n.SetTypecheck(1)
ncase.SetLeft(n)
}
switch n.Op() {
default:
pos := n.Pos()
if n.Op() == ir.ONAME {
// We don't have the right position for ONAME nodes (see #15459 and
// others). Using ncase.Pos for now as it will provide the correct
// line number (assuming the expression follows the "case" keyword
// on the same line). This matches the approach before 1.10.
pos = ncase.Pos()
}
base.ErrorfAt(pos, "select case must be receive, send or assign recv")
case ir.OAS:
// convert x = <-c into x, _ = <-c
// remove implicit conversions; the eventual assignment
// will reintroduce them.
if r := n.Right(); r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
if r.Implicit() {
n.SetRight(r.Left())
}
}
if n.Right().Op() != ir.ORECV {
base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
oselrecv2(n.Left(), n.Right(), n.Colas())
case ir.OAS2RECV:
if n.Rlist().First().Op() != ir.ORECV {
base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
break
}
n.SetOp(ir.OSELRECV2)
case ir.ORECV:
// convert <-c into _, _ = <-c
oselrecv2(ir.BlankNode, n, false)
case ir.OSEND:
break
}
}
typecheckslice(ncase.Body().Slice(), ctxStmt)
}
base.Pos = lno
}
func walkselect(sel *ir.SelectStmt) {
lno := setlineno(sel)
if sel.Body().Len() != 0 {
base.Fatalf("double walkselect")
}
init := sel.Init().Slice()
sel.PtrInit().Set(nil)
init = append(init, walkselectcases(sel.List())...)
sel.SetList(ir.Nodes{})
sel.PtrBody().Set(init)
walkstmtlist(sel.Body().Slice())
base.Pos = lno
}
func walkselectcases(cases ir.Nodes) []ir.Node {
ncas := cases.Len()
sellineno := base.Pos
// optimization: zero-case select
if ncas == 0 {
return []ir.Node{mkcall("block", nil, nil)}
}
// optimization: one-case select: single op.
if ncas == 1 {
cas := cases.First().(*ir.CaseStmt)
setlineno(cas)
l := cas.Init().Slice()
if cas.Left() != nil { // not default:
n := cas.Left()
l = append(l, n.Init().Slice()...)
n.PtrInit().Set(nil)
switch n.Op() {
default:
base.Fatalf("select %v", n.Op())
case ir.OSEND:
// already ok
case ir.OSELRECV2:
r := n.(*ir.AssignListStmt)
if ir.IsBlank(r.List().First()) && ir.IsBlank(r.List().Second()) {
n = r.Rlist().First()
break
}
r.SetOp(ir.OAS2RECV)
}
l = append(l, n)
}
l = append(l, cas.Body().Slice()...)
l = append(l, ir.Nod(ir.OBREAK, nil, nil))
return l
}
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
var dflt *ir.CaseStmt
for _, cas := range cases.Slice() {
cas := cas.(*ir.CaseStmt)
setlineno(cas)
n := cas.Left()
if n == nil {
dflt = cas
continue
}
switch n.Op() {
case ir.OSEND:
n.SetRight(nodAddr(n.Right()))
n.SetRight(typecheck(n.Right(), ctxExpr))
case ir.OSELRECV2:
if !ir.IsBlank(n.List().First()) {
n.List().SetIndex(0, nodAddr(n.List().First()))
n.List().SetIndex(0, typecheck(n.List().First(), ctxExpr))
}
}
}
// optimization: two-case select but one is default: single non-blocking op.
if ncas == 2 && dflt != nil {
cas := cases.First().(*ir.CaseStmt)
if cas == dflt {
cas = cases.Second().(*ir.CaseStmt)
}
n := cas.Left()
setlineno(n)
r := ir.Nod(ir.OIF, nil, nil)
r.PtrInit().Set(cas.Init().Slice())
var call ir.Node
switch n.Op() {
default:
base.Fatalf("select %v", n.Op())
case ir.OSEND:
// if selectnbsend(c, v) { body } else { default body }
ch := n.Left()
call = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Right())
case ir.OSELRECV2:
recv := n.Rlist().First().(*ir.UnaryExpr)
ch := recv.Left()
elem := n.List().First()
if ir.IsBlank(elem) {
elem = nodnil()
}
if ir.IsBlank(n.List().Second()) {
// if selectnbrecv(&v, c) { body } else { default body }
call = mkcall1(chanfn("selectnbrecv", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, ch)
} else {
// TODO(cuonglm): make this use selectnbrecv()
// if selectnbrecv2(&v, &received, c) { body } else { default body }
receivedp := typecheck(nodAddr(n.List().Second()), ctxExpr)
call = mkcall1(chanfn("selectnbrecv2", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), elem, receivedp, ch)
}
}
r.SetLeft(typecheck(call, ctxExpr))
r.PtrBody().Set(cas.Body().Slice())
r.PtrRlist().Set(append(dflt.Init().Slice(), dflt.Body().Slice()...))
return []ir.Node{r, ir.Nod(ir.OBREAK, nil, nil)}
}
if dflt != nil {
ncas--
}
casorder := make([]*ir.CaseStmt, ncas)
nsends, nrecvs := 0, 0
var init []ir.Node
// generate sel-struct
base.Pos = sellineno
selv := temp(types.NewArray(scasetype(), int64(ncas)))
init = append(init, typecheck(ir.Nod(ir.OAS, selv, nil), ctxStmt))
// No initialization for order; runtime.selectgo is responsible for that.
order := temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
var pc0, pcs ir.Node
if base.Flag.Race {
pcs = temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
pc0 = typecheck(nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(0))), ctxExpr)
} else {
pc0 = nodnil()
}
// register cases
for _, cas := range cases.Slice() {
cas := cas.(*ir.CaseStmt)
setlineno(cas)
init = append(init, cas.Init().Slice()...)
cas.PtrInit().Set(nil)
n := cas.Left()
if n == nil { // default:
continue
}
var i int
var c, elem ir.Node
switch n.Op() {
default:
base.Fatalf("select %v", n.Op())
case ir.OSEND:
i = nsends
nsends++
c = n.Left()
elem = n.Right()
case ir.OSELRECV2:
nrecvs++
i = ncas - nrecvs
recv := n.Rlist().First().(*ir.UnaryExpr)
c = recv.Left()
elem = n.List().First()
}
casorder[i] = cas
setField := func(f string, val ir.Node) {
r := ir.Nod(ir.OAS, nodSym(ir.ODOT, ir.Nod(ir.OINDEX, selv, nodintconst(int64(i))), lookup(f)), val)
init = append(init, typecheck(r, ctxStmt))
}
c = convnop(c, types.Types[types.TUNSAFEPTR])
setField("c", c)
if !ir.IsBlank(elem) {
elem = convnop(elem, types.Types[types.TUNSAFEPTR])
setField("elem", elem)
}
// TODO(mdempsky): There should be a cleaner way to
// handle this.
if base.Flag.Race {
r := mkcall("selectsetpc", nil, nil, nodAddr(ir.Nod(ir.OINDEX, pcs, nodintconst(int64(i)))))
init = append(init, r)
}
}
if nsends+nrecvs != ncas {
base.Fatalf("walkselectcases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
}
// run the select
base.Pos = sellineno
chosen := temp(types.Types[types.TINT])
recvOK := temp(types.Types[types.TBOOL])
r := ir.Nod(ir.OAS2, nil, nil)
r.PtrList().Set2(chosen, recvOK)
fn := syslook("selectgo")
r.PtrRlist().Set1(mkcall1(fn, fn.Type().Results(), nil, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, nodintconst(int64(nsends)), nodintconst(int64(nrecvs)), nodbool(dflt == nil)))
init = append(init, typecheck(r, ctxStmt))
// selv and order are no longer alive after selectgo.
init = append(init, ir.Nod(ir.OVARKILL, selv, nil))
init = append(init, ir.Nod(ir.OVARKILL, order, nil))
if base.Flag.Race {
init = append(init, ir.Nod(ir.OVARKILL, pcs, nil))
}
// dispatch cases
dispatch := func(cond ir.Node, cas *ir.CaseStmt) {
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
r := ir.Nod(ir.OIF, cond, nil)
if n := cas.Left(); n != nil && n.Op() == ir.OSELRECV2 {
if !ir.IsBlank(n.List().Second()) {
x := ir.Nod(ir.OAS, n.List().Second(), recvOK)
r.PtrBody().Append(typecheck(x, ctxStmt))
}
}
r.PtrBody().AppendNodes(cas.PtrBody())
r.PtrBody().Append(ir.Nod(ir.OBREAK, nil, nil))
init = append(init, r)
}
if dflt != nil {
setlineno(dflt)
dispatch(ir.Nod(ir.OLT, chosen, nodintconst(0)), dflt)
}
for i, cas := range casorder {
setlineno(cas)
dispatch(ir.Nod(ir.OEQ, chosen, nodintconst(int64(i))), cas)
}
return init
}
// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
func bytePtrToIndex(n ir.Node, i int64) ir.Node {
s := nodAddr(ir.Nod(ir.OINDEX, n, nodintconst(i)))
t := types.NewPtr(types.Types[types.TUINT8])
return convnop(s, t)
}
var scase *types.Type
// Keep in sync with src/runtime/select.go.
func scasetype() *types.Type {
if scase == nil {
scase = tostruct([]*ir.Field{
namedfield("c", types.Types[types.TUNSAFEPTR]),
namedfield("elem", types.Types[types.TUNSAFEPTR]),
})
scase.SetNoalg(true)
}
return scase
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,762 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"go/constant"
"go/token"
"sort"
)
// typecheckswitch typechecks a switch statement.
func typecheckswitch(n *ir.SwitchStmt) {
typecheckslice(n.Init().Slice(), ctxStmt)
if n.Left() != nil && n.Left().Op() == ir.OTYPESW {
typecheckTypeSwitch(n)
} else {
typecheckExprSwitch(n)
}
}
func typecheckTypeSwitch(n *ir.SwitchStmt) {
guard := n.Left().(*ir.TypeSwitchGuard)
guard.SetRight(typecheck(guard.Right(), ctxExpr))
t := guard.Right().Type()
if t != nil && !t.IsInterface() {
base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.Right())
t = nil
}
// We don't actually declare the type switch's guarded
// declaration itself. So if there are no cases, we won't
// notice that it went unused.
if v := guard.Left(); v != nil && !ir.IsBlank(v) && n.List().Len() == 0 {
base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
}
var defCase, nilCase ir.Node
var ts typeSet
for _, ncase := range n.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
}
for i := range ls {
ls[i] = typecheck(ls[i], ctxExpr|ctxType)
n1 := ls[i]
if t == nil || n1.Type() == nil {
continue
}
var missing, have *types.Field
var ptr int
if ir.IsNil(n1) { // case nil:
if nilCase != nil {
base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
} else {
nilCase = ncase
}
continue
}
if n1.Op() != ir.OTYPE {
base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
continue
}
if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() {
if have != nil && !have.Broke() {
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.Right(), n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
} else if ptr != 0 {
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (%v method has pointer receiver)", guard.Right(), n1.Type(), missing.Sym)
} else {
base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
" (missing %v method)", guard.Right(), n1.Type(), missing.Sym)
}
continue
}
ts.add(ncase.Pos(), n1.Type())
}
if ncase.Rlist().Len() != 0 {
// Assign the clause variable's type.
vt := t
if len(ls) == 1 {
if ls[0].Op() == ir.OTYPE {
vt = ls[0].Type()
} else if !ir.IsNil(ls[0]) {
// Invalid single-type case;
// mark variable as broken.
vt = nil
}
}
nvar := ncase.Rlist().First()
nvar.SetType(vt)
if vt != nil {
nvar = typecheck(nvar, ctxExpr|ctxAssign)
} else {
// Clause variable is broken; prevent typechecking.
nvar.SetTypecheck(1)
nvar.SetWalkdef(1)
}
ncase.Rlist().SetFirst(nvar)
}
typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
type typeSet struct {
m map[string][]typeSetEntry
}
type typeSetEntry struct {
pos src.XPos
typ *types.Type
}
func (s *typeSet) add(pos src.XPos, typ *types.Type) {
if s.m == nil {
s.m = make(map[string][]typeSetEntry)
}
// LongString does not uniquely identify types, so we need to
// disambiguate collisions with types.Identical.
// TODO(mdempsky): Add a method that *is* unique.
ls := typ.LongString()
prevs := s.m[ls]
for _, prev := range prevs {
if types.Identical(typ, prev.typ) {
base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev.pos))
return
}
}
s.m[ls] = append(prevs, typeSetEntry{pos, typ})
}
func typecheckExprSwitch(n *ir.SwitchStmt) {
t := types.Types[types.TBOOL]
if n.Left() != nil {
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(defaultlit(n.Left(), nil))
t = n.Left().Type()
}
var nilonly string
if t != nil {
switch {
case t.IsMap():
nilonly = "map"
case t.Kind() == types.TFUNC:
nilonly = "func"
case t.IsSlice():
nilonly = "slice"
case !IsComparable(t):
if t.IsStruct() {
base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Left(), IncomparableField(t).Type)
} else {
base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Left())
}
t = nil
}
}
var defCase ir.Node
var cs constSet
for _, ncase := range n.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
ls := ncase.List().Slice()
if len(ls) == 0 { // default:
if defCase != nil {
base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
} else {
defCase = ncase
}
}
for i := range ls {
setlineno(ncase)
ls[i] = typecheck(ls[i], ctxExpr)
ls[i] = defaultlit(ls[i], t)
n1 := ls[i]
if t == nil || n1.Type() == nil {
continue
}
if nilonly != "" && !ir.IsNil(n1) {
base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Left())
} else if t.IsInterface() && !n1.Type().IsInterface() && !IsComparable(n1.Type()) {
base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
} else {
op1, _ := assignop(n1.Type(), t)
op2, _ := assignop(t, n1.Type())
if op1 == ir.OXXX && op2 == ir.OXXX {
if n.Left() != nil {
base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Left(), n1.Type(), t)
} else {
base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
}
}
}
// Don't check for duplicate bools. Although the spec allows it,
// (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
// (2) it would disallow useful things like
// case GOARCH == "arm" && GOARM == "5":
// case GOARCH == "arm":
// which would both evaluate to false for non-ARM compiles.
if !n1.Type().IsBoolean() {
cs.add(ncase.Pos(), n1, "case", "switch")
}
}
typecheckslice(ncase.Body().Slice(), ctxStmt)
}
}
// walkswitch walks a switch statement.
func walkswitch(sw *ir.SwitchStmt) {
// Guard against double walk, see #25776.
if sw.List().Len() == 0 && sw.Body().Len() > 0 {
return // Was fatal, but eliminating every possible source of double-walking is hard
}
if sw.Left() != nil && sw.Left().Op() == ir.OTYPESW {
walkTypeSwitch(sw)
} else {
walkExprSwitch(sw)
}
}
// walkExprSwitch generates an AST implementing sw. sw is an
// expression switch.
func walkExprSwitch(sw *ir.SwitchStmt) {
lno := setlineno(sw)
cond := sw.Left()
sw.SetLeft(nil)
// convert switch {...} to switch true {...}
if cond == nil {
cond = nodbool(true)
cond = typecheck(cond, ctxExpr)
cond = defaultlit(cond, nil)
}
// Given "switch string(byteslice)",
// with all cases being side-effect free,
// use a zero-cost alias of the byte slice.
// Do this before calling walkexpr on cond,
// because walkexpr will lower the string
// conversion into a runtime call.
// See issue 24937 for more discussion.
if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
cond.SetOp(ir.OBYTES2STRTMP)
}
cond = walkexpr(cond, sw.PtrInit())
if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
cond = copyexpr(cond, cond.Type(), sw.PtrBody())
}
base.Pos = lno
s := exprSwitch{
exprname: cond,
}
var defaultGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
label := autolabel(".s")
jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
// Process case dispatch.
if ncase.List().Len() == 0 {
if defaultGoto != nil {
base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
for _, n1 := range ncase.List().Slice() {
s.Add(ncase.Pos(), n1, jmp)
}
// Process body.
body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
body.Append(ncase.Body().Slice()...)
if fall, pos := endsInFallthrough(ncase.Body().Slice()); !fall {
br := ir.Nod(ir.OBREAK, nil, nil)
br.SetPos(pos)
body.Append(br)
}
}
sw.PtrList().Set(nil)
if defaultGoto == nil {
br := ir.Nod(ir.OBREAK, nil, nil)
br.SetPos(br.Pos().WithNotStmt())
defaultGoto = br
}
s.Emit(sw.PtrBody())
sw.PtrBody().Append(defaultGoto)
sw.PtrBody().AppendNodes(&body)
walkstmtlist(sw.Body().Slice())
}
// An exprSwitch walks an expression switch.
type exprSwitch struct {
exprname ir.Node // value being switched on
done ir.Nodes
clauses []exprClause
}
type exprClause struct {
pos src.XPos
lo, hi ir.Node
jmp ir.Node
}
func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
if okforcmp[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
return
}
s.flush()
s.clauses = append(s.clauses, c)
s.flush()
}
func (s *exprSwitch) Emit(out *ir.Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
func (s *exprSwitch) flush() {
cc := s.clauses
s.clauses = nil
if len(cc) == 0 {
return
}
// Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
// The code below is structured to implicitly handle this case
// (e.g., sort.Slice doesn't need to invoke the less function
// when there's only a single slice element).
if s.exprname.Type().IsString() && len(cc) >= 2 {
// Sort strings by length and then by value. It is
// much cheaper to compare lengths than values, and
// all we need here is consistency. We respect this
// sorting below.
sort.Slice(cc, func(i, j int) bool {
si := ir.StringVal(cc[i].lo)
sj := ir.StringVal(cc[j].lo)
if len(si) != len(sj) {
return len(si) < len(sj)
}
return si < sj
})
// runLen returns the string length associated with a
// particular run of exprClauses.
runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) }
// Collapse runs of consecutive strings with the same length.
var runs [][]exprClause
start := 0
for i := 1; i < len(cc); i++ {
if runLen(cc[start:]) != runLen(cc[i:]) {
runs = append(runs, cc[start:i])
start = i
}
}
runs = append(runs, cc[start:])
// Perform two-level binary search.
binarySearch(len(runs), &s.done,
func(i int) ir.Node {
return ir.Nod(ir.OLE, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(runs[i-1])))
},
func(i int, nif *ir.IfStmt) {
run := runs[i]
nif.SetLeft(ir.Nod(ir.OEQ, ir.Nod(ir.OLEN, s.exprname, nil), nodintconst(runLen(run))))
s.search(run, nif.PtrBody())
},
)
return
}
sort.Slice(cc, func(i, j int) bool {
return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
})
// Merge consecutive integer cases.
if s.exprname.Type().IsInteger() {
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
if last.jmp == c.jmp && ir.Int64Val(last.hi)+1 == ir.Int64Val(c.lo) {
last.hi = c.lo
} else {
merged = append(merged, c)
}
}
cc = merged
}
s.search(cc, &s.done)
}
func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
binarySearch(len(cc), out,
func(i int) ir.Node {
return ir.Nod(ir.OLE, s.exprname, cc[i-1].hi)
},
func(i int, nif *ir.IfStmt) {
c := &cc[i]
nif.SetLeft(c.test(s.exprname))
nif.PtrBody().Set1(c.jmp)
},
)
}
func (c *exprClause) test(exprname ir.Node) ir.Node {
// Integer range.
if c.hi != c.lo {
low := ir.NodAt(c.pos, ir.OGE, exprname, c.lo)
high := ir.NodAt(c.pos, ir.OLE, exprname, c.hi)
return ir.NodAt(c.pos, ir.OANDAND, low, high)
}
// Optimize "switch true { ...}" and "switch false { ... }".
if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
if ir.BoolVal(exprname) {
return c.lo
} else {
return ir.NodAt(c.pos, ir.ONOT, c.lo, nil)
}
}
return ir.NodAt(c.pos, ir.OEQ, exprname, c.lo)
}
func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
// In theory, we could be more aggressive, allowing any
// side-effect-free expressions in cases, but it's a bit
// tricky because some of that information is unavailable due
// to the introduction of temporaries during order.
// Restricting to constants is simple and probably powerful
// enough.
for _, ncase := range sw.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
for _, v := range ncase.List().Slice() {
if v.Op() != ir.OLITERAL {
return false
}
}
}
return true
}
// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
// Search backwards for the index of the fallthrough
// statement. Do not assume it'll be in the last
// position, since in some cases (e.g. when the statement
// list contains autotmp_ variables), one or more OVARKILL
// nodes will be at the end of the list.
i := len(stmts) - 1
for i >= 0 && stmts[i].Op() == ir.OVARKILL {
i--
}
if i < 0 {
return false, src.NoXPos
}
return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
}
// walkTypeSwitch generates an AST that implements sw, where sw is a
// type switch.
func walkTypeSwitch(sw *ir.SwitchStmt) {
var s typeSwitch
s.facename = sw.Left().(*ir.TypeSwitchGuard).Right()
sw.SetLeft(nil)
s.facename = walkexpr(s.facename, sw.PtrInit())
s.facename = copyexpr(s.facename, s.facename.Type(), sw.PtrBody())
s.okname = temp(types.Types[types.TBOOL])
// Get interface descriptor word.
// For empty interfaces this will be the type.
// For non-empty interfaces this will be the itab.
itab := ir.Nod(ir.OITAB, s.facename, nil)
// For empty interfaces, do:
// if e._type == nil {
// do nil case if it exists, otherwise default
// }
// h := e._type.hash
// Use a similar strategy for non-empty interfaces.
ifNil := ir.Nod(ir.OIF, nil, nil)
ifNil.SetLeft(ir.Nod(ir.OEQ, itab, nodnil()))
base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
ifNil.SetLeft(typecheck(ifNil.Left(), ctxExpr))
ifNil.SetLeft(defaultlit(ifNil.Left(), nil))
// ifNil.Nbody assigned at end.
sw.PtrBody().Append(ifNil)
// Load hash from type or itab.
dotHash := nodSym(ir.ODOTPTR, itab, nil)
dotHash.SetType(types.Types[types.TUINT32])
dotHash.SetTypecheck(1)
if s.facename.Type().IsEmptyInterface() {
dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime._type
} else {
dotHash.SetOffset(int64(2 * Widthptr)) // offset of hash in runtime.itab
}
dotHash.SetBounded(true) // guaranteed not to fault
s.hashname = copyexpr(dotHash, dotHash.Type(), sw.PtrBody())
br := ir.Nod(ir.OBREAK, nil, nil)
var defaultGoto, nilGoto ir.Node
var body ir.Nodes
for _, ncase := range sw.List().Slice() {
ncase := ncase.(*ir.CaseStmt)
var caseVar ir.Node
if ncase.Rlist().Len() != 0 {
caseVar = ncase.Rlist().First()
}
// For single-type cases with an interface type,
// we initialize the case variable as part of the type assertion.
// In other cases, we initialize it in the body.
var singleType *types.Type
if ncase.List().Len() == 1 && ncase.List().First().Op() == ir.OTYPE {
singleType = ncase.List().First().Type()
}
caseVarInitialized := false
label := autolabel(".s")
jmp := npos(ncase.Pos(), nodSym(ir.OGOTO, nil, label))
if ncase.List().Len() == 0 { // default:
if defaultGoto != nil {
base.Fatalf("duplicate default case not detected during typechecking")
}
defaultGoto = jmp
}
for _, n1 := range ncase.List().Slice() {
if ir.IsNil(n1) { // case nil:
if nilGoto != nil {
base.Fatalf("duplicate nil case not detected during typechecking")
}
nilGoto = jmp
continue
}
if singleType != nil && singleType.IsInterface() {
s.Add(ncase.Pos(), n1.Type(), caseVar, jmp)
caseVarInitialized = true
} else {
s.Add(ncase.Pos(), n1.Type(), nil, jmp)
}
}
body.Append(npos(ncase.Pos(), nodSym(ir.OLABEL, nil, label)))
if caseVar != nil && !caseVarInitialized {
val := s.facename
if singleType != nil {
// We have a single concrete type. Extract the data.
if singleType.IsInterface() {
base.Fatalf("singleType interface should have been handled in Add")
}
val = ifaceData(ncase.Pos(), s.facename, singleType)
}
l := []ir.Node{
ir.NodAt(ncase.Pos(), ir.ODCL, caseVar, nil),
ir.NodAt(ncase.Pos(), ir.OAS, caseVar, val),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
}
body.Append(ncase.Body().Slice()...)
body.Append(br)
}
sw.PtrList().Set(nil)
if defaultGoto == nil {
defaultGoto = br
}
if nilGoto == nil {
nilGoto = defaultGoto
}
ifNil.PtrBody().Set1(nilGoto)
s.Emit(sw.PtrBody())
sw.PtrBody().Append(defaultGoto)
sw.PtrBody().AppendNodes(&body)
walkstmtlist(sw.Body().Slice())
}
// A typeSwitch walks a type switch.
type typeSwitch struct {
// Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
facename ir.Node // value being type-switched on
hashname ir.Node // type hash of the value being type-switched on
okname ir.Node // boolean used for comma-ok type assertions
done ir.Nodes
clauses []typeClause
}
type typeClause struct {
hash uint32
body ir.Nodes
}
func (s *typeSwitch) Add(pos src.XPos, typ *types.Type, caseVar, jmp ir.Node) {
var body ir.Nodes
if caseVar != nil {
l := []ir.Node{
ir.NodAt(pos, ir.ODCL, caseVar, nil),
ir.NodAt(pos, ir.OAS, caseVar, nil),
}
typecheckslice(l, ctxStmt)
body.Append(l...)
} else {
caseVar = ir.BlankNode
}
// cv, ok = iface.(type)
as := ir.NodAt(pos, ir.OAS2, nil, nil)
as.PtrList().Set2(caseVar, s.okname) // cv, ok =
dot := ir.NodAt(pos, ir.ODOTTYPE, s.facename, nil)
dot.SetType(typ) // iface.(type)
as.PtrRlist().Set1(dot)
appendWalkStmt(&body, as)
// if ok { goto label }
nif := ir.NodAt(pos, ir.OIF, nil, nil)
nif.SetLeft(s.okname)
nif.PtrBody().Set1(jmp)
body.Append(nif)
if !typ.IsInterface() {
s.clauses = append(s.clauses, typeClause{
hash: typehash(typ),
body: body,
})
return
}
s.flush()
s.done.AppendNodes(&body)
}
func (s *typeSwitch) Emit(out *ir.Nodes) {
s.flush()
out.AppendNodes(&s.done)
}
func (s *typeSwitch) flush() {
cc := s.clauses
s.clauses = nil
if len(cc) == 0 {
return
}
sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
// Combine adjacent cases with the same hash.
merged := cc[:1]
for _, c := range cc[1:] {
last := &merged[len(merged)-1]
if last.hash == c.hash {
last.body.AppendNodes(&c.body)
} else {
merged = append(merged, c)
}
}
cc = merged
binarySearch(len(cc), &s.done,
func(i int) ir.Node {
return ir.Nod(ir.OLE, s.hashname, nodintconst(int64(cc[i-1].hash)))
},
func(i int, nif *ir.IfStmt) {
// TODO(mdempsky): Omit hash equality check if
// there's only one type.
c := cc[i]
nif.SetLeft(ir.Nod(ir.OEQ, s.hashname, nodintconst(int64(c.hash))))
nif.PtrBody().AppendNodes(&c.body)
},
)
}
// binarySearch constructs a binary search tree for handling n cases,
// and appends it to out. It's used for efficiently implementing
// switch statements.
//
// less(i) should return a boolean expression. If it evaluates true,
// then cases before i will be tested; otherwise, cases i and later.
//
// leaf(i, nif) should setup nif (an OIF node) to test case i. In
// particular, it should set nif.Left and nif.Nbody.
func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
const binarySearchMin = 4 // minimum number of cases for binary search
var do func(lo, hi int, out *ir.Nodes)
do = func(lo, hi int, out *ir.Nodes) {
n := hi - lo
if n < binarySearchMin {
for i := lo; i < hi; i++ {
nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
leaf(i, nif)
base.Pos = base.Pos.WithNotStmt()
nif.SetLeft(typecheck(nif.Left(), ctxExpr))
nif.SetLeft(defaultlit(nif.Left(), nil))
out.Append(nif)
out = nif.PtrRlist()
}
return
}
half := lo + n/2
nif := ir.Nod(ir.OIF, nil, nil)
nif.SetLeft(less(half))
base.Pos = base.Pos.WithNotStmt()
nif.SetLeft(typecheck(nif.Left(), ctxExpr))
nif.SetLeft(defaultlit(nif.Left(), nil))
do(lo, half, nif.PtrBody())
do(half, hi, nif.PtrRlist())
out.Append(nif)
}
do(0, n, out)
}

File diff suppressed because it is too large Load diff

View file

@ -1,8 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements convertions between *types.Node and *Node.
// TODO(gri) try to eliminate these soon
package gc

View file

@ -1,85 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
)
// evalunsafe evaluates a package unsafe operation and returns the result.
func evalunsafe(n ir.Node) int64 {
switch n.Op() {
case ir.OALIGNOF, ir.OSIZEOF:
n.SetLeft(typecheck(n.Left(), ctxExpr))
n.SetLeft(defaultlit(n.Left(), nil))
tr := n.Left().Type()
if tr == nil {
return 0
}
dowidth(tr)
if n.Op() == ir.OALIGNOF {
return int64(tr.Align)
}
return tr.Width
case ir.OOFFSETOF:
// must be a selector.
if n.Left().Op() != ir.OXDOT {
base.Errorf("invalid expression %v", n)
return 0
}
sel := n.Left().(*ir.SelectorExpr)
// Remember base of selector to find it back after dot insertion.
// Since r->left may be mutated by typechecking, check it explicitly
// first to track it correctly.
sel.SetLeft(typecheck(sel.Left(), ctxExpr))
sbase := sel.Left()
tsel := typecheck(sel, ctxExpr)
n.SetLeft(tsel)
if tsel.Type() == nil {
return 0
}
switch tsel.Op() {
case ir.ODOT, ir.ODOTPTR:
break
case ir.OCALLPART:
base.Errorf("invalid expression %v: argument is a method value", n)
return 0
default:
base.Errorf("invalid expression %v", n)
return 0
}
// Sum offsets for dots until we reach sbase.
var v int64
var next ir.Node
for r := tsel; r != sbase; r = next {
switch r.Op() {
case ir.ODOTPTR:
// For Offsetof(s.f), s may itself be a pointer,
// but accessing f must not otherwise involve
// indirection via embedded pointer types.
if r.Left() != sbase {
base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.Left())
return 0
}
fallthrough
case ir.ODOT:
v += r.Offset()
next = r.Left()
default:
ir.Dump("unsafenmagic", tsel)
base.Fatalf("impossible %v node after dot insertion", r.Op())
}
}
return v
}
base.Fatalf("unexpected op %v", n.Op())
return 0
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,26 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
var (
// maximum size variable which we will allocate on the stack.
// This limit is for explicit variable declarations like "var x T" or "x := ...".
// Note: the flag smallframes can update this value.
MaxStackVarSize = int64(10 * 1024 * 1024)
// maximum size of implicit variables that we will allocate on the stack.
// p := new(T) allocating T on the stack
// p := &T{} allocating T on the stack
// s := make([]T, n) allocating [n]T on the stack
// s := []byte("...") allocating [n]byte on the stack
// Note: the flag smallframes can update this value.
MaxImplicitStackVarSize = int64(64 * 1024)
// MaxSmallArraySize is the maximum size of an array which is considered small.
// Small arrays will be initialized directly with a sequence of constant stores.
// Large arrays will be initialized by copying from a static temp.
// 256 bytes was chosen to minimize generated code + statictmp size.
MaxSmallArraySize = int64(256)
)

View file

@ -0,0 +1,99 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"go/constant"
"math"
"math/big"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
)
func NewBool(b bool) Node {
return NewLiteral(constant.MakeBool(b))
}
func NewInt(v int64) Node {
return NewLiteral(constant.MakeInt64(v))
}
func NewString(s string) Node {
return NewLiteral(constant.MakeString(s))
}
const (
// Maximum size in bits for big.Ints before signalling
// overflow and also mantissa precision for big.Floats.
ConstPrec = 512
)
func BigFloat(v constant.Value) *big.Float {
f := new(big.Float)
f.SetPrec(ConstPrec)
switch u := constant.Val(v).(type) {
case int64:
f.SetInt64(u)
case *big.Int:
f.SetInt(u)
case *big.Float:
f.Set(u)
case *big.Rat:
f.SetRat(u)
default:
base.Fatalf("unexpected: %v", u)
}
return f
}
// ConstOverflow reports whether constant value v is too large
// to represent with type t.
func ConstOverflow(v constant.Value, t *types.Type) bool {
switch {
case t.IsInteger():
bits := uint(8 * t.Size())
if t.IsUnsigned() {
x, ok := constant.Uint64Val(v)
return !ok || x>>bits != 0
}
x, ok := constant.Int64Val(v)
if x < 0 {
x = ^x
}
return !ok || x>>(bits-1) != 0
case t.IsFloat():
switch t.Size() {
case 4:
f, _ := constant.Float32Val(v)
return math.IsInf(float64(f), 0)
case 8:
f, _ := constant.Float64Val(v)
return math.IsInf(f, 0)
}
case t.IsComplex():
ft := types.FloatForComplex(t)
return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)
}
base.Fatalf("doesoverflow: %v, %v", v, t)
panic("unreachable")
}
// IsConstNode reports whether n is a Go language constant (as opposed to a
// compile-time constant).
//
// Expressions derived from nil, like string([]byte(nil)), while they
// may be known at compile time, are not Go language constants.
func IsConstNode(n Node) bool {
return n.Op() == OLITERAL
}
func IsSmallIntConst(n Node) bool {
if n.Op() == OLITERAL {
v, ok := constant.Int64Val(n.Val())
return ok && int64(int32(v)) == v
}
return false
}

View file

@ -222,7 +222,7 @@ func (p *dumper) dump(x reflect.Value, depth int) {
omitted = true
continue // exclude zero-valued fields
}
if n, ok := x.Interface().(Nodes); ok && n.Len() == 0 {
if n, ok := x.Interface().(Nodes); ok && len(n) == 0 {
omitted = true
continue // exclude empty Nodes slices
}

View file

@ -5,10 +5,13 @@
package ir
import (
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"go/constant"
"go/token"
)
func maybeDo(x Node, err error, do func(Node) error) error {
@ -89,7 +92,7 @@ func toNtype(x Node) Ntype {
// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1].
type AddStringExpr struct {
miniExpr
List_ Nodes
List Nodes
Prealloc *Name
}
@ -97,14 +100,10 @@ func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr {
n := &AddStringExpr{}
n.pos = pos
n.op = OADDSTR
n.List_.Set(list)
n.List.Set(list)
return n
}
func (n *AddStringExpr) List() Nodes { return n.List_ }
func (n *AddStringExpr) PtrList() *Nodes { return &n.List_ }
func (n *AddStringExpr) SetList(x Nodes) { n.List_ = x }
// An AddrExpr is an address-of expression &X.
// It may end up being a normal address-of or an allocation of a composite literal.
type AddrExpr struct {
@ -120,10 +119,6 @@ func NewAddrExpr(pos src.XPos, x Node) *AddrExpr {
return n
}
func (n *AddrExpr) Left() Node { return n.X }
func (n *AddrExpr) SetLeft(x Node) { n.X = x }
func (n *AddrExpr) Right() Node { return n.Alloc }
func (n *AddrExpr) SetRight(x Node) { n.Alloc = x }
func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
@ -170,11 +165,6 @@ func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr {
return n
}
func (n *BinaryExpr) Left() Node { return n.X }
func (n *BinaryExpr) SetLeft(x Node) { n.X = x }
func (n *BinaryExpr) Right() Node { return n.Y }
func (n *BinaryExpr) SetRight(y Node) { n.Y = y }
func (n *BinaryExpr) SetOp(op Op) {
switch op {
default:
@ -205,10 +195,10 @@ type CallExpr struct {
X Node
Args Nodes
Rargs Nodes // TODO(rsc): Delete.
Body_ Nodes // TODO(rsc): Delete.
DDD bool
Body Nodes // TODO(rsc): Delete.
IsDDD bool
Use CallUse
NoInline_ bool
NoInline bool
}
func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
@ -224,21 +214,6 @@ func (*CallExpr) isStmt() {}
func (n *CallExpr) Orig() Node { return n.orig }
func (n *CallExpr) SetOrig(x Node) { n.orig = x }
func (n *CallExpr) Left() Node { return n.X }
func (n *CallExpr) SetLeft(x Node) { n.X = x }
func (n *CallExpr) List() Nodes { return n.Args }
func (n *CallExpr) PtrList() *Nodes { return &n.Args }
func (n *CallExpr) SetList(x Nodes) { n.Args = x }
func (n *CallExpr) Rlist() Nodes { return n.Rargs }
func (n *CallExpr) PtrRlist() *Nodes { return &n.Rargs }
func (n *CallExpr) SetRlist(x Nodes) { n.Rargs = x }
func (n *CallExpr) IsDDD() bool { return n.DDD }
func (n *CallExpr) SetIsDDD(x bool) { n.DDD = x }
func (n *CallExpr) NoInline() bool { return n.NoInline_ }
func (n *CallExpr) SetNoInline(x bool) { n.NoInline_ = x }
func (n *CallExpr) Body() Nodes { return n.Body_ }
func (n *CallExpr) PtrBody() *Nodes { return &n.Body_ }
func (n *CallExpr) SetBody(x Nodes) { n.Body_ = x }
func (n *CallExpr) SetOp(op Op) {
switch op {
@ -253,65 +228,57 @@ func (n *CallExpr) SetOp(op Op) {
// A CallPartExpr is a method expression X.Method (uncalled).
type CallPartExpr struct {
miniExpr
Func_ *Func
Func *Func
X Node
Method *types.Field
Prealloc *Name
}
func NewCallPartExpr(pos src.XPos, x Node, method *types.Field, fn *Func) *CallPartExpr {
n := &CallPartExpr{Func_: fn, X: x, Method: method}
n := &CallPartExpr{Func: fn, X: x, Method: method}
n.op = OCALLPART
n.pos = pos
n.typ = fn.Type()
n.Func_ = fn
n.Func = fn
return n
}
func (n *CallPartExpr) Func() *Func { return n.Func_ }
func (n *CallPartExpr) Left() Node { return n.X }
func (n *CallPartExpr) Sym() *types.Sym { return n.Method.Sym }
func (n *CallPartExpr) SetLeft(x Node) { n.X = x }
// A ClosureExpr is a function literal expression.
type ClosureExpr struct {
miniExpr
Func_ *Func
Func *Func
Prealloc *Name
}
func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
n := &ClosureExpr{Func_: fn}
n := &ClosureExpr{Func: fn}
n.op = OCLOSURE
n.pos = pos
return n
}
func (n *ClosureExpr) Func() *Func { return n.Func_ }
// A ClosureRead denotes reading a variable stored within a closure struct.
type ClosureReadExpr struct {
miniExpr
Offset_ int64
Offset int64
}
func NewClosureRead(typ *types.Type, offset int64) *ClosureReadExpr {
n := &ClosureReadExpr{Offset_: offset}
n := &ClosureReadExpr{Offset: offset}
n.typ = typ
n.op = OCLOSUREREAD
return n
}
func (n *ClosureReadExpr) Type() *types.Type { return n.typ }
func (n *ClosureReadExpr) Offset() int64 { return n.Offset_ }
// A CompLitExpr is a composite literal Type{Vals}.
// Before type-checking, the type is Ntype.
type CompLitExpr struct {
miniExpr
orig Node
Ntype Ntype
List_ Nodes // initialized values
List Nodes // initialized values
Prealloc *Name
Len int64 // backing array length for OSLICELIT
}
@ -320,18 +287,13 @@ func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr {
n := &CompLitExpr{Ntype: typ}
n.pos = pos
n.SetOp(op)
n.List_.Set(list)
n.List.Set(list)
n.orig = n
return n
}
func (n *CompLitExpr) Orig() Node { return n.orig }
func (n *CompLitExpr) SetOrig(x Node) { n.orig = x }
func (n *CompLitExpr) Right() Node { return n.Ntype }
func (n *CompLitExpr) SetRight(x Node) { n.Ntype = toNtype(x) }
func (n *CompLitExpr) List() Nodes { return n.List_ }
func (n *CompLitExpr) PtrList() *Nodes { return &n.List_ }
func (n *CompLitExpr) SetList(x Nodes) { n.List_ = x }
func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
@ -380,8 +342,6 @@ func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr {
return n
}
func (n *ConvExpr) Left() Node { return n.X }
func (n *ConvExpr) SetLeft(x Node) { n.X = x }
func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
@ -409,13 +369,6 @@ func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr {
return n
}
func (n *IndexExpr) Left() Node { return n.X }
func (n *IndexExpr) SetLeft(x Node) { n.X = x }
func (n *IndexExpr) Right() Node { return n.Index }
func (n *IndexExpr) SetRight(y Node) { n.Index = y }
func (n *IndexExpr) IndexMapLValue() bool { return n.Assigned }
func (n *IndexExpr) SetIndexMapLValue(x bool) { n.Assigned = x }
func (n *IndexExpr) SetOp(op Op) {
switch op {
default:
@ -439,38 +392,28 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
return n
}
func (n *KeyExpr) Left() Node { return n.Key }
func (n *KeyExpr) SetLeft(x Node) { n.Key = x }
func (n *KeyExpr) Right() Node { return n.Value }
func (n *KeyExpr) SetRight(y Node) { n.Value = y }
// A StructKeyExpr is an Field: Value composite literal key.
type StructKeyExpr struct {
miniExpr
Field *types.Sym
Value Node
Offset_ int64
Offset int64
}
func NewStructKeyExpr(pos src.XPos, field *types.Sym, value Node) *StructKeyExpr {
n := &StructKeyExpr{Field: field, Value: value}
n.pos = pos
n.op = OSTRUCTKEY
n.Offset_ = types.BADWIDTH
n.Offset = types.BADWIDTH
return n
}
func (n *StructKeyExpr) Sym() *types.Sym { return n.Field }
func (n *StructKeyExpr) SetSym(x *types.Sym) { n.Field = x }
func (n *StructKeyExpr) Left() Node { return n.Value }
func (n *StructKeyExpr) SetLeft(x Node) { n.Value = x }
func (n *StructKeyExpr) Offset() int64 { return n.Offset_ }
func (n *StructKeyExpr) SetOffset(x int64) { n.Offset_ = x }
// An InlinedCallExpr is an inlined function call.
type InlinedCallExpr struct {
miniExpr
Body_ Nodes
Body Nodes
ReturnVars Nodes
}
@ -478,18 +421,11 @@ func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
n := &InlinedCallExpr{}
n.pos = pos
n.op = OINLCALL
n.Body_.Set(body)
n.Body.Set(body)
n.ReturnVars.Set(retvars)
return n
}
func (n *InlinedCallExpr) Body() Nodes { return n.Body_ }
func (n *InlinedCallExpr) PtrBody() *Nodes { return &n.Body_ }
func (n *InlinedCallExpr) SetBody(x Nodes) { n.Body_ = x }
func (n *InlinedCallExpr) Rlist() Nodes { return n.ReturnVars }
func (n *InlinedCallExpr) PtrRlist() *Nodes { return &n.ReturnVars }
func (n *InlinedCallExpr) SetRlist(x Nodes) { n.ReturnVars = x }
// A LogicalExpr is a expression X Op Y where Op is && or ||.
// It is separate from BinaryExpr to make room for statements
// that must be executed before Y but after X.
@ -506,11 +442,6 @@ func NewLogicalExpr(pos src.XPos, op Op, x, y Node) *LogicalExpr {
return n
}
func (n *LogicalExpr) Left() Node { return n.X }
func (n *LogicalExpr) SetLeft(x Node) { n.X = x }
func (n *LogicalExpr) Right() Node { return n.Y }
func (n *LogicalExpr) SetRight(y Node) { n.Y = y }
func (n *LogicalExpr) SetOp(op Op) {
switch op {
default:
@ -536,11 +467,6 @@ func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr {
return n
}
func (n *MakeExpr) Left() Node { return n.Len }
func (n *MakeExpr) SetLeft(x Node) { n.Len = x }
func (n *MakeExpr) Right() Node { return n.Cap }
func (n *MakeExpr) SetRight(x Node) { n.Cap = x }
func (n *MakeExpr) SetOp(op Op) {
switch op {
default:
@ -566,15 +492,7 @@ func NewMethodExpr(pos src.XPos, t *types.Type, method *types.Field) *MethodExpr
}
func (n *MethodExpr) FuncName() *Name { return n.FuncName_ }
func (n *MethodExpr) Left() Node { panic("MethodExpr.Left") }
func (n *MethodExpr) SetLeft(x Node) { panic("MethodExpr.SetLeft") }
func (n *MethodExpr) Right() Node { panic("MethodExpr.Right") }
func (n *MethodExpr) SetRight(x Node) { panic("MethodExpr.SetRight") }
func (n *MethodExpr) Sym() *types.Sym { panic("MethodExpr.Sym") }
func (n *MethodExpr) Offset() int64 { panic("MethodExpr.Offset") }
func (n *MethodExpr) SetOffset(x int64) { panic("MethodExpr.SetOffset") }
func (n *MethodExpr) Class() Class { panic("MethodExpr.Class") }
func (n *MethodExpr) SetClass(x Class) { panic("MethodExpr.SetClass") }
// A NilExpr represents the predefined untyped constant nil.
// (It may be copied and assigned a type, though.)
@ -607,8 +525,6 @@ func NewParenExpr(pos src.XPos, x Node) *ParenExpr {
return n
}
func (n *ParenExpr) Left() Node { return n.X }
func (n *ParenExpr) SetLeft(x Node) { n.X = x }
func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
@ -625,20 +541,17 @@ func (n *ParenExpr) SetOTYPE(t *types.Type) {
// A ResultExpr represents a direct access to a result slot on the stack frame.
type ResultExpr struct {
miniExpr
Offset_ int64
Offset int64
}
func NewResultExpr(pos src.XPos, typ *types.Type, offset int64) *ResultExpr {
n := &ResultExpr{Offset_: offset}
n := &ResultExpr{Offset: offset}
n.pos = pos
n.op = ORESULT
n.typ = typ
return n
}
func (n *ResultExpr) Offset() int64 { return n.Offset_ }
func (n *ResultExpr) SetOffset(x int64) { n.Offset_ = x }
// A NameOffsetExpr refers to an offset within a variable.
// It is like a SelectorExpr but without the field name.
type NameOffsetExpr struct {
@ -659,14 +572,14 @@ type SelectorExpr struct {
miniExpr
X Node
Sel *types.Sym
Offset_ int64
Offset int64
Selection *types.Field
}
func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
n := &SelectorExpr{X: x, Sel: sel}
n.pos = pos
n.Offset_ = types.BADWIDTH
n.Offset = types.BADWIDTH
n.SetOp(op)
return n
}
@ -680,12 +593,7 @@ func (n *SelectorExpr) SetOp(op Op) {
}
}
func (n *SelectorExpr) Left() Node { return n.X }
func (n *SelectorExpr) SetLeft(x Node) { n.X = x }
func (n *SelectorExpr) Sym() *types.Sym { return n.Sel }
func (n *SelectorExpr) SetSym(x *types.Sym) { n.Sel = x }
func (n *SelectorExpr) Offset() int64 { return n.Offset_ }
func (n *SelectorExpr) SetOffset(x int64) { n.Offset_ = x }
func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
@ -697,7 +605,7 @@ func (*SelectorExpr) CanBeNtype() {}
type SliceExpr struct {
miniExpr
X Node
List_ Nodes // TODO(rsc): Use separate Nodes
List Nodes // TODO(rsc): Use separate Nodes
}
func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr {
@ -707,12 +615,6 @@ func NewSliceExpr(pos src.XPos, op Op, x Node) *SliceExpr {
return n
}
func (n *SliceExpr) Left() Node { return n.X }
func (n *SliceExpr) SetLeft(x Node) { n.X = x }
func (n *SliceExpr) List() Nodes { return n.List_ }
func (n *SliceExpr) PtrList() *Nodes { return &n.List_ }
func (n *SliceExpr) SetList(x Nodes) { n.List_ = x }
func (n *SliceExpr) SetOp(op Op) {
switch op {
default:
@ -725,16 +627,16 @@ func (n *SliceExpr) SetOp(op Op) {
// SliceBounds returns n's slice bounds: low, high, and max in expr[low:high:max].
// n must be a slice expression. max is nil if n is a simple slice expression.
func (n *SliceExpr) SliceBounds() (low, high, max Node) {
if n.List_.Len() == 0 {
if len(n.List) == 0 {
return nil, nil, nil
}
switch n.Op() {
case OSLICE, OSLICEARR, OSLICESTR:
s := n.List_.Slice()
s := n.List
return s[0], s[1], nil
case OSLICE3, OSLICE3ARR:
s := n.List_.Slice()
s := n.List
return s[0], s[1], s[2]
}
base.Fatalf("SliceBounds op %v: %v", n.Op(), n)
@ -749,24 +651,24 @@ func (n *SliceExpr) SetSliceBounds(low, high, max Node) {
if max != nil {
base.Fatalf("SetSliceBounds %v given three bounds", n.Op())
}
s := n.List_.Slice()
s := n.List
if s == nil {
if low == nil && high == nil {
return
}
n.List_.Set2(low, high)
n.List = []Node{low, high}
return
}
s[0] = low
s[1] = high
return
case OSLICE3, OSLICE3ARR:
s := n.List_.Slice()
s := n.List
if s == nil {
if low == nil && high == nil && max == nil {
return
}
n.List_.Set3(low, high, max)
n.List = []Node{low, high, max}
return
}
s[0] = low
@ -794,7 +696,7 @@ func (o Op) IsSlice3() bool {
type SliceHeaderExpr struct {
miniExpr
Ptr Node
LenCap_ Nodes // TODO(rsc): Split into two Node fields
LenCap Nodes // TODO(rsc): Split into two Node fields
}
func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr {
@ -802,16 +704,10 @@ func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *Slic
n.pos = pos
n.op = OSLICEHEADER
n.typ = typ
n.LenCap_.Set2(len, cap)
n.LenCap = []Node{len, cap}
return n
}
func (n *SliceHeaderExpr) Left() Node { return n.Ptr }
func (n *SliceHeaderExpr) SetLeft(x Node) { n.Ptr = x }
func (n *SliceHeaderExpr) List() Nodes { return n.LenCap_ }
func (n *SliceHeaderExpr) PtrList() *Nodes { return &n.LenCap_ }
func (n *SliceHeaderExpr) SetList(x Nodes) { n.LenCap_ = x }
// A StarExpr is a dereference expression *X.
// It may end up being a value or a type.
type StarExpr struct {
@ -826,8 +722,6 @@ func NewStarExpr(pos src.XPos, x Node) *StarExpr {
return n
}
func (n *StarExpr) Left() Node { return n.X }
func (n *StarExpr) SetLeft(x Node) { n.X = x }
func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
@ -858,14 +752,6 @@ func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr {
return n
}
func (n *TypeAssertExpr) Left() Node { return n.X }
func (n *TypeAssertExpr) SetLeft(x Node) { n.X = x }
func (n *TypeAssertExpr) Right() Node { return n.Ntype }
func (n *TypeAssertExpr) SetRight(x Node) { n.Ntype = x } // TODO: toNtype(x)
func (n *TypeAssertExpr) List() Nodes { return n.Itab }
func (n *TypeAssertExpr) PtrList() *Nodes { return &n.Itab }
func (n *TypeAssertExpr) SetList(x Nodes) { n.Itab = x }
func (n *TypeAssertExpr) SetOp(op Op) {
switch op {
default:
@ -889,9 +775,6 @@ func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr {
return n
}
func (n *UnaryExpr) Left() Node { return n.X }
func (n *UnaryExpr) SetLeft(x Node) { n.X = x }
func (n *UnaryExpr) SetOp(op Op) {
switch op {
default:
@ -903,3 +786,371 @@ func (n *UnaryExpr) SetOp(op Op) {
n.op = op
}
}
func IsZero(n Node) bool {
switch n.Op() {
case ONIL:
return true
case OLITERAL:
switch u := n.Val(); u.Kind() {
case constant.String:
return constant.StringVal(u) == ""
case constant.Bool:
return !constant.BoolVal(u)
default:
return constant.Sign(u) == 0
}
case OARRAYLIT:
n := n.(*CompLitExpr)
for _, n1 := range n.List {
if n1.Op() == OKEY {
n1 = n1.(*KeyExpr).Value
}
if !IsZero(n1) {
return false
}
}
return true
case OSTRUCTLIT:
n := n.(*CompLitExpr)
for _, n1 := range n.List {
n1 := n1.(*StructKeyExpr)
if !IsZero(n1.Value) {
return false
}
}
return true
}
return false
}
// lvalue etc
func IsAssignable(n Node) bool {
switch n.Op() {
case OINDEX:
n := n.(*IndexExpr)
if n.X.Type() != nil && n.X.Type().IsArray() {
return IsAssignable(n.X)
}
if n.X.Type() != nil && n.X.Type().IsString() {
return false
}
fallthrough
case ODEREF, ODOTPTR, OCLOSUREREAD:
return true
case ODOT:
n := n.(*SelectorExpr)
return IsAssignable(n.X)
case ONAME:
n := n.(*Name)
if n.Class_ == PFUNC {
return false
}
return true
case ONAMEOFFSET:
return true
}
return false
}
func StaticValue(n Node) Node {
for {
if n.Op() == OCONVNOP {
n = n.(*ConvExpr).X
continue
}
n1 := staticValue1(n)
if n1 == nil {
return n
}
n = n1
}
}
// staticValue1 implements a simple SSA-like optimization. If n is a local variable
// that is initialized and never reassigned, staticValue1 returns the initializer
// expression. Otherwise, it returns nil.
func staticValue1(nn Node) Node {
if nn.Op() != ONAME {
return nil
}
n := nn.(*Name)
if n.Class_ != PAUTO || n.Name().Addrtaken() {
return nil
}
defn := n.Name().Defn
if defn == nil {
return nil
}
var rhs Node
FindRHS:
switch defn.Op() {
case OAS:
defn := defn.(*AssignStmt)
rhs = defn.Y
case OAS2:
defn := defn.(*AssignListStmt)
for i, lhs := range defn.Lhs {
if lhs == n {
rhs = defn.Rhs[i]
break FindRHS
}
}
base.Fatalf("%v missing from LHS of %v", n, defn)
default:
return nil
}
if rhs == nil {
base.Fatalf("RHS is nil: %v", defn)
}
if reassigned(n) {
return nil
}
return rhs
}
// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
// indicating whether the name has any assignments other than its declaration.
// The second return value is the first such assignment encountered in the walk, if any. It is mostly
// useful for -m output documenting the reason for inhibited optimizations.
// NB: global variables are always considered to be re-assigned.
// TODO: handle initial declaration not including an assignment and followed by a single assignment?
func reassigned(name *Name) bool {
if name.Op() != ONAME {
base.Fatalf("reassigned %v", name)
}
// no way to reliably check for no-reassignment of globals, assume it can be
if name.Curfn == nil {
return true
}
return Any(name.Curfn, func(n Node) bool {
switch n.Op() {
case OAS:
n := n.(*AssignStmt)
if n.X == name && n != name.Defn {
return true
}
case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2:
n := n.(*AssignListStmt)
for _, p := range n.Lhs {
if p == name && n != name.Defn {
return true
}
}
}
return false
})
}
// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
var IsIntrinsicCall = func(*CallExpr) bool { return false }
// SameSafeExpr checks whether it is safe to reuse one of l and r
// instead of computing both. SameSafeExpr assumes that l and r are
// used in the same statement or expression. In order for it to be
// safe to reuse l or r, they must:
// * be the same expression
// * not have side-effects (no function calls, no channel ops);
// however, panics are ok
// * not cause inappropriate aliasing; e.g. two string to []byte
// conversions, must result in two distinct slices
//
// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
// as an lvalue (map assignment) and an rvalue (map access). This is
// currently OK, since the only place SameSafeExpr gets used on an
// lvalue expression is for OSLICE and OAPPEND optimizations, and it
// is correct in those settings.
func SameSafeExpr(l Node, r Node) bool {
if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
return false
}
switch l.Op() {
case ONAME, OCLOSUREREAD:
return l == r
case ODOT, ODOTPTR:
l := l.(*SelectorExpr)
r := r.(*SelectorExpr)
return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && SameSafeExpr(l.X, r.X)
case ODEREF:
l := l.(*StarExpr)
r := r.(*StarExpr)
return SameSafeExpr(l.X, r.X)
case ONOT, OBITNOT, OPLUS, ONEG:
l := l.(*UnaryExpr)
r := r.(*UnaryExpr)
return SameSafeExpr(l.X, r.X)
case OCONVNOP:
l := l.(*ConvExpr)
r := r.(*ConvExpr)
return SameSafeExpr(l.X, r.X)
case OCONV:
l := l.(*ConvExpr)
r := r.(*ConvExpr)
// Some conversions can't be reused, such as []byte(str).
// Allow only numeric-ish types. This is a bit conservative.
return types.IsSimple[l.Type().Kind()] && SameSafeExpr(l.X, r.X)
case OINDEX, OINDEXMAP:
l := l.(*IndexExpr)
r := r.(*IndexExpr)
return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Index, r.Index)
case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
l := l.(*BinaryExpr)
r := r.(*BinaryExpr)
return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Y, r.Y)
case OLITERAL:
return constant.Compare(l.Val(), token.EQL, r.Val())
case ONIL:
return true
}
return false
}
// ShouldCheckPtr reports whether pointer checking should be enabled for
// function fn at a given level. See debugHelpFooter for defined
// levels.
func ShouldCheckPtr(fn *Func, level int) bool {
return base.Debug.Checkptr >= level && fn.Pragma&NoCheckPtr == 0
}
// IsReflectHeaderDataField reports whether l is an expression p.Data
// where p has type reflect.SliceHeader or reflect.StringHeader.
func IsReflectHeaderDataField(l Node) bool {
if l.Type() != types.Types[types.TUINTPTR] {
return false
}
var tsym *types.Sym
switch l.Op() {
case ODOT:
l := l.(*SelectorExpr)
tsym = l.X.Type().Sym()
case ODOTPTR:
l := l.(*SelectorExpr)
tsym = l.X.Type().Elem().Sym()
default:
return false
}
if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
return false
}
return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
}
func ParamNames(ft *types.Type) []Node {
args := make([]Node, ft.NumParams())
for i, f := range ft.Params().FieldSlice() {
args[i] = AsNode(f.Nname)
}
return args
}
// MethodSym returns the method symbol representing a method name
// associated with a specific receiver type.
//
// Method symbols can be used to distinguish the same method appearing
// in different method sets. For example, T.M and (*T).M have distinct
// method symbols.
//
// The returned symbol will be marked as a function.
func MethodSym(recv *types.Type, msym *types.Sym) *types.Sym {
sym := MethodSymSuffix(recv, msym, "")
sym.SetFunc(true)
return sym
}
// MethodSymSuffix is like methodsym, but allows attaching a
// distinguisher suffix. To avoid collisions, the suffix must not
// start with a letter, number, or period.
func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
if msym.IsBlank() {
base.Fatalf("blank method name")
}
rsym := recv.Sym()
if recv.IsPtr() {
if rsym != nil {
base.Fatalf("declared pointer receiver type: %v", recv)
}
rsym = recv.Elem().Sym()
}
// Find the package the receiver type appeared in. For
// anonymous receiver types (i.e., anonymous structs with
// embedded fields), use the "go" pseudo-package instead.
rpkg := Pkgs.Go
if rsym != nil {
rpkg = rsym.Pkg
}
var b bytes.Buffer
if recv.IsPtr() {
// The parentheses aren't really necessary, but
// they're pretty traditional at this point.
fmt.Fprintf(&b, "(%-S)", recv)
} else {
fmt.Fprintf(&b, "%-S", recv)
}
// A particular receiver type may have multiple non-exported
// methods with the same name. To disambiguate them, include a
// package qualifier for names that came from a different
// package than the receiver type.
if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
b.WriteString(".")
b.WriteString(msym.Pkg.Prefix)
}
b.WriteString(".")
b.WriteString(msym.Name)
b.WriteString(suffix)
return rpkg.LookupBytes(b.Bytes())
}
// MethodName returns the ONAME representing the method
// referenced by expression n, which must be a method selector,
// method expression, or method value.
func MethodExprName(n Node) *Name {
name, _ := MethodExprFunc(n).Nname.(*Name)
return name
}
// MethodFunc is like MethodName, but returns the types.Field instead.
func MethodExprFunc(n Node) *types.Field {
switch n.Op() {
case ODOTMETH:
return n.(*SelectorExpr).Selection
case OMETHEXPR:
return n.(*MethodExpr).Method
case OCALLPART:
n := n.(*CallPartExpr)
return n.Method
}
base.Fatalf("unexpected node: %v (%v)", n, n.Op())
panic("unreachable")
}

View file

@ -313,10 +313,10 @@ func stmtFmt(n Node, s fmt.State) {
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
simpleinit := n.Init().Len() == 1 && n.Init().First().Init().Len() == 0 && StmtWithInit(n.Op())
simpleinit := len(n.Init()) == 1 && len(n.Init()[0].Init()) == 0 && StmtWithInit(n.Op())
// otherwise, print the inits as separate statements
complexinit := n.Init().Len() != 0 && !simpleinit && exportFormat
complexinit := len(n.Init()) != 0 && !simpleinit && exportFormat
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
extrablock := complexinit && StmtWithInit(n.Op())
@ -332,75 +332,75 @@ func stmtFmt(n Node, s fmt.State) {
switch n.Op() {
case ODCL:
n := n.(*Decl)
fmt.Fprintf(s, "var %v %v", n.Left().Sym(), n.Left().Type())
fmt.Fprintf(s, "var %v %v", n.X.Sym(), n.X.Type())
// Don't export "v = <N>" initializing statements, hope they're always
// preceded by the DCL which will be re-parsed and typechecked to reproduce
// the "v = <N>" again.
case OAS:
n := n.(*AssignStmt)
if n.Colas() && !complexinit {
fmt.Fprintf(s, "%v := %v", n.Left(), n.Right())
if n.Def && !complexinit {
fmt.Fprintf(s, "%v := %v", n.X, n.Y)
} else {
fmt.Fprintf(s, "%v = %v", n.Left(), n.Right())
fmt.Fprintf(s, "%v = %v", n.X, n.Y)
}
case OASOP:
n := n.(*AssignOpStmt)
if n.Implicit() {
if n.SubOp() == OADD {
fmt.Fprintf(s, "%v++", n.Left())
if n.IncDec {
if n.AsOp == OADD {
fmt.Fprintf(s, "%v++", n.X)
} else {
fmt.Fprintf(s, "%v--", n.Left())
fmt.Fprintf(s, "%v--", n.X)
}
break
}
fmt.Fprintf(s, "%v %v= %v", n.Left(), n.SubOp(), n.Right())
fmt.Fprintf(s, "%v %v= %v", n.X, n.AsOp, n.Y)
case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
n := n.(*AssignListStmt)
if n.Colas() && !complexinit {
fmt.Fprintf(s, "%.v := %.v", n.List(), n.Rlist())
if n.Def && !complexinit {
fmt.Fprintf(s, "%.v := %.v", n.Lhs, n.Rhs)
} else {
fmt.Fprintf(s, "%.v = %.v", n.List(), n.Rlist())
fmt.Fprintf(s, "%.v = %.v", n.Lhs, n.Rhs)
}
case OBLOCK:
n := n.(*BlockStmt)
if n.List().Len() != 0 {
fmt.Fprintf(s, "%v", n.List())
if len(n.List) != 0 {
fmt.Fprintf(s, "%v", n.List)
}
case ORETURN:
n := n.(*ReturnStmt)
fmt.Fprintf(s, "return %.v", n.List())
fmt.Fprintf(s, "return %.v", n.Results)
case ORETJMP:
n := n.(*BranchStmt)
fmt.Fprintf(s, "retjmp %v", n.Sym())
fmt.Fprintf(s, "retjmp %v", n.Label)
case OINLMARK:
n := n.(*InlineMarkStmt)
fmt.Fprintf(s, "inlmark %d", n.Offset())
fmt.Fprintf(s, "inlmark %d", n.Index)
case OGO:
n := n.(*GoDeferStmt)
fmt.Fprintf(s, "go %v", n.Left())
fmt.Fprintf(s, "go %v", n.Call)
case ODEFER:
n := n.(*GoDeferStmt)
fmt.Fprintf(s, "defer %v", n.Left())
fmt.Fprintf(s, "defer %v", n.Call)
case OIF:
n := n.(*IfStmt)
if simpleinit {
fmt.Fprintf(s, "if %v; %v { %v }", n.Init().First(), n.Left(), n.Body())
fmt.Fprintf(s, "if %v; %v { %v }", n.Init()[0], n.Cond, n.Body)
} else {
fmt.Fprintf(s, "if %v { %v }", n.Left(), n.Body())
fmt.Fprintf(s, "if %v { %v }", n.Cond, n.Body)
}
if n.Rlist().Len() != 0 {
fmt.Fprintf(s, " else { %v }", n.Rlist())
if len(n.Else) != 0 {
fmt.Fprintf(s, " else { %v }", n.Else)
}
case OFOR, OFORUNTIL:
@ -416,26 +416,26 @@ func stmtFmt(n Node, s fmt.State) {
fmt.Fprint(s, opname)
if simpleinit {
fmt.Fprintf(s, " %v;", n.Init().First())
} else if n.Right() != nil {
fmt.Fprintf(s, " %v;", n.Init()[0])
} else if n.Post != nil {
fmt.Fprint(s, " ;")
}
if n.Left() != nil {
fmt.Fprintf(s, " %v", n.Left())
if n.Cond != nil {
fmt.Fprintf(s, " %v", n.Cond)
}
if n.Right() != nil {
fmt.Fprintf(s, "; %v", n.Right())
if n.Post != nil {
fmt.Fprintf(s, "; %v", n.Post)
} else if simpleinit {
fmt.Fprint(s, ";")
}
if n.Op() == OFORUNTIL && n.List().Len() != 0 {
fmt.Fprintf(s, "; %v", n.List())
if n.Op() == OFORUNTIL && len(n.Late) != 0 {
fmt.Fprintf(s, "; %v", n.Late)
}
fmt.Fprintf(s, " { %v }", n.Body())
fmt.Fprintf(s, " { %v }", n.Body)
case ORANGE:
n := n.(*RangeStmt)
@ -444,12 +444,12 @@ func stmtFmt(n Node, s fmt.State) {
break
}
if n.List().Len() == 0 {
fmt.Fprintf(s, "for range %v { %v }", n.Right(), n.Body())
if len(n.Vars) == 0 {
fmt.Fprintf(s, "for range %v { %v }", n.X, n.Body)
break
}
fmt.Fprintf(s, "for %.v = range %v { %v }", n.List(), n.Right(), n.Body())
fmt.Fprintf(s, "for %.v = range %v { %v }", n.Vars, n.X, n.Body)
case OSELECT:
n := n.(*SelectStmt)
@ -457,7 +457,7 @@ func stmtFmt(n Node, s fmt.State) {
fmt.Fprintf(s, "%v statement", n.Op())
break
}
fmt.Fprintf(s, "select { %v }", n.List())
fmt.Fprintf(s, "select { %v }", n.Cases)
case OSWITCH:
n := n.(*SwitchStmt)
@ -467,33 +467,33 @@ func stmtFmt(n Node, s fmt.State) {
}
fmt.Fprintf(s, "switch")
if simpleinit {
fmt.Fprintf(s, " %v;", n.Init().First())
fmt.Fprintf(s, " %v;", n.Init()[0])
}
if n.Left() != nil {
fmt.Fprintf(s, " %v ", n.Left())
if n.Tag != nil {
fmt.Fprintf(s, " %v ", n.Tag)
}
fmt.Fprintf(s, " { %v }", n.List())
fmt.Fprintf(s, " { %v }", n.Cases)
case OCASE:
n := n.(*CaseStmt)
if n.List().Len() != 0 {
fmt.Fprintf(s, "case %.v", n.List())
if len(n.List) != 0 {
fmt.Fprintf(s, "case %.v", n.List)
} else {
fmt.Fprint(s, "default")
}
fmt.Fprintf(s, ": %v", n.Body())
fmt.Fprintf(s, ": %v", n.Body)
case OBREAK, OCONTINUE, OGOTO, OFALL:
n := n.(*BranchStmt)
if n.Sym() != nil {
fmt.Fprintf(s, "%v %v", n.Op(), n.Sym())
if n.Label != nil {
fmt.Fprintf(s, "%v %v", n.Op(), n.Label)
} else {
fmt.Fprintf(s, "%v", n.Op())
}
case OLABEL:
n := n.(*LabelStmt)
fmt.Fprintf(s, "%v: ", n.Sym())
fmt.Fprintf(s, "%v: ", n.Label)
}
if extrablock {
@ -527,19 +527,19 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OADDR:
nn := nn.(*AddrExpr)
if nn.Implicit() {
n = nn.Left()
n = nn.X
continue
}
case ODEREF:
nn := nn.(*StarExpr)
if nn.Implicit() {
n = nn.Left()
n = nn.X
continue
}
case OCONV, OCONVNOP, OCONVIFACE:
nn := nn.(*ConvExpr)
if nn.Implicit() {
n = nn.Left()
n = nn.X
continue
}
}
@ -560,7 +560,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
switch n.Op() {
case OPAREN:
n := n.(*ParenExpr)
fmt.Fprintf(s, "(%v)", n.Left())
fmt.Fprintf(s, "(%v)", n.X)
case ONIL:
fmt.Fprint(s, "nil")
@ -694,7 +694,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprint(s, "func literal")
return
}
fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func().Body())
fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body)
case OCOMPLIT:
n := n.(*CompLitExpr)
@ -703,84 +703,84 @@ func exprFmt(n Node, s fmt.State, prec int) {
fmt.Fprintf(s, "... argument")
return
}
if n.Right() != nil {
fmt.Fprintf(s, "%v{%s}", n.Right(), ellipsisIf(n.List().Len() != 0))
if n.Ntype != nil {
fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(len(n.List) != 0))
return
}
fmt.Fprint(s, "composite literal")
return
}
fmt.Fprintf(s, "(%v{ %.v })", n.Right(), n.List())
fmt.Fprintf(s, "(%v{ %.v })", n.Ntype, n.List)
case OPTRLIT:
n := n.(*AddrExpr)
fmt.Fprintf(s, "&%v", n.Left())
fmt.Fprintf(s, "&%v", n.X)
case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
n := n.(*CompLitExpr)
if !exportFormat {
fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(n.List().Len() != 0))
fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
return
}
fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List())
fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List)
case OKEY:
n := n.(*KeyExpr)
if n.Left() != nil && n.Right() != nil {
fmt.Fprintf(s, "%v:%v", n.Left(), n.Right())
if n.Key != nil && n.Value != nil {
fmt.Fprintf(s, "%v:%v", n.Key, n.Value)
return
}
if n.Left() == nil && n.Right() != nil {
fmt.Fprintf(s, ":%v", n.Right())
if n.Key == nil && n.Value != nil {
fmt.Fprintf(s, ":%v", n.Value)
return
}
if n.Left() != nil && n.Right() == nil {
fmt.Fprintf(s, "%v:", n.Left())
if n.Key != nil && n.Value == nil {
fmt.Fprintf(s, "%v:", n.Key)
return
}
fmt.Fprint(s, ":")
case OSTRUCTKEY:
n := n.(*StructKeyExpr)
fmt.Fprintf(s, "%v:%v", n.Sym(), n.Left())
fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
case OCALLPART:
n := n.(*CallPartExpr)
exprFmt(n.Left(), s, nprec)
if n.Sym() == nil {
exprFmt(n.X, s, nprec)
if n.Method.Sym == nil {
fmt.Fprint(s, ".<nil>")
return
}
fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym()))
fmt.Fprintf(s, ".%s", types.SymMethodName(n.Method.Sym))
case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH:
n := n.(*SelectorExpr)
exprFmt(n.Left(), s, nprec)
if n.Sym() == nil {
exprFmt(n.X, s, nprec)
if n.Sel == nil {
fmt.Fprint(s, ".<nil>")
return
}
fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sym()))
fmt.Fprintf(s, ".%s", types.SymMethodName(n.Sel))
case ODOTTYPE, ODOTTYPE2:
n := n.(*TypeAssertExpr)
exprFmt(n.Left(), s, nprec)
if n.Right() != nil {
fmt.Fprintf(s, ".(%v)", n.Right())
exprFmt(n.X, s, nprec)
if n.Ntype != nil {
fmt.Fprintf(s, ".(%v)", n.Ntype)
return
}
fmt.Fprintf(s, ".(%v)", n.Type())
case OINDEX, OINDEXMAP:
n := n.(*IndexExpr)
exprFmt(n.Left(), s, nprec)
fmt.Fprintf(s, "[%v]", n.Right())
exprFmt(n.X, s, nprec)
fmt.Fprintf(s, "[%v]", n.Index)
case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
n := n.(*SliceExpr)
exprFmt(n.Left(), s, nprec)
exprFmt(n.X, s, nprec)
fmt.Fprint(s, "[")
low, high, max := n.SliceBounds()
if low != nil {
@ -800,14 +800,14 @@ func exprFmt(n Node, s fmt.State, prec int) {
case OSLICEHEADER:
n := n.(*SliceHeaderExpr)
if n.List().Len() != 2 {
base.Fatalf("bad OSLICEHEADER list length %d", n.List().Len())
if len(n.LenCap) != 2 {
base.Fatalf("bad OSLICEHEADER list length %d", len(n.LenCap))
}
fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Left(), n.List().First(), n.List().Second())
fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.LenCap[0], n.LenCap[1])
case OCOMPLEX, OCOPY:
n := n.(*BinaryExpr)
fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.Left(), n.Right())
fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.X, n.Y)
case OCONV,
OCONVIFACE,
@ -823,7 +823,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
} else {
fmt.Fprintf(s, "%v", n.Type())
}
fmt.Fprintf(s, "(%v)", n.Left())
fmt.Fprintf(s, "(%v)", n.X)
case OREAL,
OIMAG,
@ -836,7 +836,7 @@ func exprFmt(n Node, s fmt.State, prec int) {
OOFFSETOF,
OSIZEOF:
n := n.(*UnaryExpr)
fmt.Fprintf(s, "%v(%v)", n.Op(), n.Left())
fmt.Fprintf(s, "%v(%v)", n.Op(), n.X)
case OAPPEND,
ODELETE,
@ -845,58 +845,58 @@ func exprFmt(n Node, s fmt.State, prec int) {
OPRINT,
OPRINTN:
n := n.(*CallExpr)
if n.IsDDD() {
fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.List())
if n.IsDDD {
fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args)
return
}
fmt.Fprintf(s, "%v(%.v)", n.Op(), n.List())
fmt.Fprintf(s, "%v(%.v)", n.Op(), n.Args)
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
n := n.(*CallExpr)
exprFmt(n.Left(), s, nprec)
if n.IsDDD() {
fmt.Fprintf(s, "(%.v...)", n.List())
exprFmt(n.X, s, nprec)
if n.IsDDD {
fmt.Fprintf(s, "(%.v...)", n.Args)
return
}
fmt.Fprintf(s, "(%.v)", n.List())
fmt.Fprintf(s, "(%.v)", n.Args)
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
n := n.(*MakeExpr)
if n.Right() != nil {
fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Left(), n.Right())
if n.Cap != nil {
fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Len, n.Cap)
return
}
if n.Left() != nil && (n.Op() == OMAKESLICE || !n.Left().Type().IsUntyped()) {
fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Left())
if n.Len != nil && (n.Op() == OMAKESLICE || !n.Len.Type().IsUntyped()) {
fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Len)
return
}
fmt.Fprintf(s, "make(%v)", n.Type())
case OMAKESLICECOPY:
n := n.(*MakeExpr)
fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Left(), n.Right())
fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Len, n.Cap)
case OPLUS, ONEG, OBITNOT, ONOT, ORECV:
// Unary
n := n.(*UnaryExpr)
fmt.Fprintf(s, "%v", n.Op())
if n.Left() != nil && n.Left().Op() == n.Op() {
if n.X != nil && n.X.Op() == n.Op() {
fmt.Fprint(s, " ")
}
exprFmt(n.Left(), s, nprec+1)
exprFmt(n.X, s, nprec+1)
case OADDR:
n := n.(*AddrExpr)
fmt.Fprintf(s, "%v", n.Op())
if n.Left() != nil && n.Left().Op() == n.Op() {
if n.X != nil && n.X.Op() == n.Op() {
fmt.Fprint(s, " ")
}
exprFmt(n.Left(), s, nprec+1)
exprFmt(n.X, s, nprec+1)
case ODEREF:
n := n.(*StarExpr)
fmt.Fprintf(s, "%v", n.Op())
exprFmt(n.Left(), s, nprec+1)
exprFmt(n.X, s, nprec+1)
// Binary
case OADD,
@ -917,26 +917,26 @@ func exprFmt(n Node, s fmt.State, prec int) {
OSUB,
OXOR:
n := n.(*BinaryExpr)
exprFmt(n.Left(), s, nprec)
exprFmt(n.X, s, nprec)
fmt.Fprintf(s, " %v ", n.Op())
exprFmt(n.Right(), s, nprec+1)
exprFmt(n.Y, s, nprec+1)
case OANDAND,
OOROR:
n := n.(*LogicalExpr)
exprFmt(n.Left(), s, nprec)
exprFmt(n.X, s, nprec)
fmt.Fprintf(s, " %v ", n.Op())
exprFmt(n.Right(), s, nprec+1)
exprFmt(n.Y, s, nprec+1)
case OSEND:
n := n.(*SendStmt)
exprFmt(n.Left(), s, nprec)
exprFmt(n.Chan, s, nprec)
fmt.Fprintf(s, " <- ")
exprFmt(n.Right(), s, nprec+1)
exprFmt(n.Value, s, nprec+1)
case OADDSTR:
n := n.(*AddStringExpr)
for i, n1 := range n.List().Slice() {
for i, n1 := range n.List {
if i != 0 {
fmt.Fprint(s, " + ")
}
@ -980,9 +980,9 @@ func (l Nodes) Format(s fmt.State, verb rune) {
sep = ", "
}
for i, n := range l.Slice() {
for i, n := range l {
fmt.Fprint(s, n)
if i+1 < l.Len() {
if i+1 < len(l) {
fmt.Fprint(s, sep)
}
}
@ -1098,7 +1098,7 @@ func dumpNodeHeader(w io.Writer, n Node) {
if n.Op() == OCLOSURE {
n := n.(*ClosureExpr)
if fn := n.Func(); fn != nil && fn.Nname.Sym() != nil {
if fn := n.Func; fn != nil && fn.Nname.Sym() != nil {
fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym())
}
}
@ -1131,7 +1131,7 @@ func dumpNode(w io.Writer, n Node, depth int) {
return
}
if n.Init().Len() != 0 {
if len(n.Init()) != 0 {
fmt.Fprintf(w, "%+v-init", n.Op())
dumpNodes(w, n.Init(), depth+1)
indent(w, depth)
@ -1169,7 +1169,7 @@ func dumpNode(w io.Writer, n Node, depth int) {
case OASOP:
n := n.(*AssignOpStmt)
fmt.Fprintf(w, "%+v-%+v", n.Op(), n.SubOp())
fmt.Fprintf(w, "%+v-%+v", n.Op(), n.AsOp)
dumpNodeHeader(w, n)
case OTYPE:
@ -1192,18 +1192,18 @@ func dumpNode(w io.Writer, n Node, depth int) {
n := n.(*Func)
fmt.Fprintf(w, "%+v", n.Op())
dumpNodeHeader(w, n)
fn := n.Func()
fn := n
if len(fn.Dcl) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-Dcl", n.Op())
for _, dcl := range n.Func().Dcl {
for _, dcl := range n.Dcl {
dumpNode(w, dcl, depth+1)
}
}
if fn.Body().Len() > 0 {
if len(fn.Body) > 0 {
indent(w, depth)
fmt.Fprintf(w, "%+v-body", n.Op())
dumpNodes(w, fn.Body(), depth+1)
dumpNodes(w, fn.Body, depth+1)
}
return
}
@ -1247,7 +1247,7 @@ func dumpNode(w io.Writer, n Node, depth int) {
}
dumpNode(w, val, depth+1)
case Nodes:
if val.Len() == 0 {
if len(val) == 0 {
continue
}
if name != "" {
@ -1260,12 +1260,12 @@ func dumpNode(w io.Writer, n Node, depth int) {
}
func dumpNodes(w io.Writer, list Nodes, depth int) {
if list.Len() == 0 {
if len(list) == 0 {
fmt.Fprintf(w, " <nil>")
return
}
for _, n := range list.Slice() {
for _, n := range list {
dumpNode(w, n, depth)
}
}

View file

@ -50,8 +50,8 @@ import (
type Func struct {
miniNode
typ *types.Type
Body_ Nodes
iota int64
Body Nodes
Iota int64
Nname *Name // ONAME node
OClosure *ClosureExpr // OCLOSURE node
@ -110,20 +110,14 @@ func NewFunc(pos src.XPos) *Func {
f := new(Func)
f.pos = pos
f.op = ODCLFUNC
f.iota = -1
f.Iota = -1
return f
}
func (f *Func) isStmt() {}
func (f *Func) Func() *Func { return f }
func (f *Func) Body() Nodes { return f.Body_ }
func (f *Func) PtrBody() *Nodes { return &f.Body_ }
func (f *Func) SetBody(x Nodes) { f.Body_ = x }
func (f *Func) Type() *types.Type { return f.typ }
func (f *Func) SetType(x *types.Type) { f.typ = x }
func (f *Func) Iota() int64 { return f.iota }
func (f *Func) SetIota(x int64) { f.iota = x }
func (f *Func) Sym() *types.Sym {
if f.Nname != nil {
@ -218,11 +212,11 @@ func FuncName(n Node) string {
case *Func:
f = n
case *Name:
f = n.Func()
f = n.Func
case *CallPartExpr:
f = n.Func()
f = n.Func
case *ClosureExpr:
f = n.Func()
f = n.Func
}
if f == nil || f.Nname == nil {
return "<nil>"
@ -245,9 +239,9 @@ func PkgFuncName(n Node) string {
var f *Func
switch n := n.(type) {
case *CallPartExpr:
f = n.Func()
f = n.Func
case *ClosureExpr:
f = n.Func()
f = n.Func
case *Func:
f = n
}
@ -267,3 +261,51 @@ func PkgFuncName(n Node) string {
}
return p + "." + s.Name
}
var CurFunc *Func
func FuncSymName(s *types.Sym) string {
return s.Name + "·f"
}
// NewFuncNameAt generates a new name node for a function or method.
func NewFuncNameAt(pos src.XPos, s *types.Sym, fn *Func) *Name {
if fn.Nname != nil {
base.Fatalf("newFuncName - already have name")
}
n := NewNameAt(pos, s)
n.SetFunc(fn)
fn.Nname = n
return n
}
// MarkFunc marks a node as a function.
func MarkFunc(n *Name) {
if n.Op() != ONAME || n.Class_ != Pxxx {
base.Fatalf("expected ONAME/Pxxx node, got %v", n)
}
n.Class_ = PFUNC
n.Sym().SetFunc(true)
}
// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
// and compiling runtime
func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
if base.Debug.Closure > 0 {
if clo.Esc() == EscHeap {
base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
} else {
base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
}
}
if base.Flag.CompilingRuntime && clo.Esc() == EscHeap {
base.ErrorfAt(clo.Pos(), "heap-allocated closure, not allowed in runtime")
}
}
// IsTrivialClosure reports whether closure clo has an
// empty list of captured vars.
func IsTrivialClosure(clo *ClosureExpr) bool {
return len(clo.Func.ClosureVars) == 0
}

View file

@ -35,11 +35,6 @@ type miniNode struct {
esc uint16
}
func (n *miniNode) Format(s fmt.State, verb rune) { panic(1) }
func (n *miniNode) copy() Node { panic(1) }
func (n *miniNode) doChildren(do func(Node) error) error { panic(1) }
func (n *miniNode) editChildren(edit func(Node) Node) { panic(1) }
// posOr returns pos if known, or else n.pos.
// For use in DeepCopy.
func (n *miniNode) posOr(pos src.XPos) src.XPos {
@ -85,106 +80,27 @@ func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) }
// Empty, immutable graph structure.
func (n *miniNode) Left() Node { return nil }
func (n *miniNode) Right() Node { return nil }
func (n *miniNode) Init() Nodes { return Nodes{} }
func (n *miniNode) PtrInit() *Nodes { return &immutableEmptyNodes }
func (n *miniNode) Body() Nodes { return Nodes{} }
func (n *miniNode) PtrBody() *Nodes { return &immutableEmptyNodes }
func (n *miniNode) List() Nodes { return Nodes{} }
func (n *miniNode) PtrList() *Nodes { return &immutableEmptyNodes }
func (n *miniNode) Rlist() Nodes { return Nodes{} }
func (n *miniNode) PtrRlist() *Nodes { return &immutableEmptyNodes }
func (n *miniNode) SetLeft(x Node) {
if x != nil {
panic(n.no("SetLeft"))
}
}
func (n *miniNode) SetRight(x Node) {
if x != nil {
panic(n.no("SetRight"))
}
}
func (n *miniNode) SetInit(x Nodes) {
if x != nil {
panic(n.no("SetInit"))
}
}
func (n *miniNode) SetBody(x Nodes) {
if x != nil {
panic(n.no("SetBody"))
}
}
func (n *miniNode) SetList(x Nodes) {
if x != nil {
panic(n.no("SetList"))
}
}
func (n *miniNode) SetRlist(x Nodes) {
if x != nil {
panic(n.no("SetRlist"))
}
}
// Additional functionality unavailable.
func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() }
func (n *miniNode) SetOp(Op) { panic(n.no("SetOp")) }
func (n *miniNode) SubOp() Op { panic(n.no("SubOp")) }
func (n *miniNode) SetSubOp(Op) { panic(n.no("SetSubOp")) }
func (n *miniNode) Type() *types.Type { return nil }
func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) }
func (n *miniNode) Func() *Func { return nil }
func (n *miniNode) Name() *Name { return nil }
func (n *miniNode) Sym() *types.Sym { return nil }
func (n *miniNode) SetSym(*types.Sym) { panic(n.no("SetSym")) }
func (n *miniNode) Offset() int64 { return types.BADWIDTH }
func (n *miniNode) SetOffset(x int64) { panic(n.no("SetOffset")) }
func (n *miniNode) Class() Class { return Pxxx }
func (n *miniNode) SetClass(Class) { panic(n.no("SetClass")) }
func (n *miniNode) Likely() bool { panic(n.no("Likely")) }
func (n *miniNode) SetLikely(bool) { panic(n.no("SetLikely")) }
func (n *miniNode) SliceBounds() (low, high, max Node) {
panic(n.no("SliceBounds"))
}
func (n *miniNode) SetSliceBounds(low, high, max Node) {
panic(n.no("SetSliceBounds"))
}
func (n *miniNode) Iota() int64 { panic(n.no("Iota")) }
func (n *miniNode) SetIota(int64) { panic(n.no("SetIota")) }
func (n *miniNode) Colas() bool { return false }
func (n *miniNode) SetColas(bool) { panic(n.no("SetColas")) }
func (n *miniNode) NoInline() bool { panic(n.no("NoInline")) }
func (n *miniNode) SetNoInline(bool) { panic(n.no("SetNoInline")) }
func (n *miniNode) Transient() bool { panic(n.no("Transient")) }
func (n *miniNode) SetTransient(bool) { panic(n.no("SetTransient")) }
func (n *miniNode) Implicit() bool { return false }
func (n *miniNode) SetImplicit(bool) { panic(n.no("SetImplicit")) }
func (n *miniNode) IsDDD() bool { return false }
func (n *miniNode) SetIsDDD(bool) { panic(n.no("SetIsDDD")) }
func (n *miniNode) Embedded() bool { return false }
func (n *miniNode) SetEmbedded(bool) { panic(n.no("SetEmbedded")) }
func (n *miniNode) IndexMapLValue() bool { panic(n.no("IndexMapLValue")) }
func (n *miniNode) SetIndexMapLValue(bool) { panic(n.no("SetIndexMapLValue")) }
func (n *miniNode) ResetAux() { panic(n.no("ResetAux")) }
func (n *miniNode) HasBreak() bool { panic(n.no("HasBreak")) }
func (n *miniNode) SetHasBreak(bool) { panic(n.no("SetHasBreak")) }
func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
func (n *miniNode) Int64Val() int64 { panic(n.no("Int64Val")) }
func (n *miniNode) Uint64Val() uint64 { panic(n.no("Uint64Val")) }
func (n *miniNode) CanInt64() bool { panic(n.no("CanInt64")) }
func (n *miniNode) BoolVal() bool { panic(n.no("BoolVal")) }
func (n *miniNode) StringVal() string { panic(n.no("StringVal")) }
func (n *miniNode) HasCall() bool { return false }
func (n *miniNode) SetHasCall(bool) { panic(n.no("SetHasCall")) }
func (n *miniNode) NonNil() bool { return false }
func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }
func (n *miniNode) Bounded() bool { return false }
func (n *miniNode) SetBounded(bool) { panic(n.no("SetBounded")) }
func (n *miniNode) Opt() interface{} { return nil }
func (n *miniNode) SetOpt(interface{}) { panic(n.no("SetOpt")) }
func (n *miniNode) MarkReadonly() { panic(n.no("MarkReadonly")) }
func (n *miniNode) TChanDir() types.ChanDir { panic(n.no("TChanDir")) }
func (n *miniNode) SetTChanDir(types.ChanDir) { panic(n.no("SetTChanDir")) }

View file

@ -39,7 +39,7 @@ type Name struct {
flags bitset16
pragma PragmaFlag // int16
sym *types.Sym
fn *Func
Func *Func
Offset_ int64
val constant.Value
orig Node
@ -225,8 +225,7 @@ func (n *Name) SubOp() Op { return n.BuiltinOp }
func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
func (n *Name) Class() Class { return n.Class_ }
func (n *Name) SetClass(x Class) { n.Class_ = x }
func (n *Name) Func() *Func { return n.fn }
func (n *Name) SetFunc(x *Func) { n.fn = x }
func (n *Name) SetFunc(x *Func) { n.Func = x }
func (n *Name) Offset() int64 { panic("Name.Offset") }
func (n *Name) SetOffset(x int64) {
if x != 0 {
@ -414,3 +413,25 @@ func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName {
p.pos = pos
return p
}
// IsParamStackCopy reports whether this is the on-stack copy of a
// function parameter that moved to the heap.
func IsParamStackCopy(n Node) bool {
if n.Op() != ONAME {
return false
}
name := n.(*Name)
return (name.Class_ == PPARAM || name.Class_ == PPARAMOUT) && name.Heapaddr != nil
}
// IsParamHeapCopy reports whether this is the on-heap copy of
// a function parameter that moved to the heap.
func IsParamHeapCopy(n Node) bool {
if n.Op() != ONAME {
return false
}
name := n.(*Name)
return name.Class_ == PAUTOHEAP && name.Name().Stackcopy != nil
}
var RegFP *Name

View file

@ -33,59 +33,15 @@ type Node interface {
// Abstract graph structure, for generic traversals.
Op() Op
SetOp(x Op)
SubOp() Op
SetSubOp(x Op)
Left() Node
SetLeft(x Node)
Right() Node
SetRight(x Node)
Init() Nodes
PtrInit() *Nodes
SetInit(x Nodes)
Body() Nodes
PtrBody() *Nodes
SetBody(x Nodes)
List() Nodes
SetList(x Nodes)
PtrList() *Nodes
Rlist() Nodes
SetRlist(x Nodes)
PtrRlist() *Nodes
// Fields specific to certain Ops only.
Type() *types.Type
SetType(t *types.Type)
Func() *Func
Name() *Name
Sym() *types.Sym
SetSym(x *types.Sym)
Offset() int64
SetOffset(x int64)
Class() Class
SetClass(x Class)
Likely() bool
SetLikely(x bool)
SliceBounds() (low, high, max Node)
SetSliceBounds(low, high, max Node)
Iota() int64
SetIota(x int64)
Colas() bool
SetColas(x bool)
NoInline() bool
SetNoInline(x bool)
Transient() bool
SetTransient(x bool)
Implicit() bool
SetImplicit(x bool)
IsDDD() bool
SetIsDDD(x bool)
IndexMapLValue() bool
SetIndexMapLValue(x bool)
ResetAux()
HasBreak() bool
SetHasBreak(x bool)
MarkReadonly()
Val() constant.Value
SetVal(v constant.Value)
@ -98,8 +54,6 @@ type Node interface {
SetOpt(x interface{})
Diag() bool
SetDiag(x bool)
Bounded() bool
SetBounded(x bool)
Typecheck() uint8
SetTypecheck(x uint8)
NonNil() bool
@ -363,41 +317,6 @@ type Nodes []Node
// The methods that would modify it panic instead.
var immutableEmptyNodes = Nodes{}
// asNodes returns a slice of *Node as a Nodes value.
func AsNodes(s []Node) Nodes {
return s
}
// Slice returns the entries in Nodes as a slice.
// Changes to the slice entries (as in s[i] = n) will be reflected in
// the Nodes.
func (n Nodes) Slice() []Node {
return n
}
// Len returns the number of entries in Nodes.
func (n Nodes) Len() int {
return len(n)
}
// Index returns the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
func (n Nodes) Index(i int) Node {
return n[i]
}
// First returns the first element of Nodes (same as n.Index(0)).
// It panics if n has no elements.
func (n Nodes) First() Node {
return n[0]
}
// Second returns the second element of Nodes (same as n.Index(1)).
// It panics if n has fewer than two elements.
func (n Nodes) Second() Node {
return n[1]
}
func (n *Nodes) mutate() {
if n == &immutableEmptyNodes {
panic("immutable Nodes.Set")
@ -417,55 +336,6 @@ func (n *Nodes) Set(s []Node) {
*n = s
}
// Set1 sets n to a slice containing a single node.
func (n *Nodes) Set1(n1 Node) {
n.mutate()
*n = []Node{n1}
}
// Set2 sets n to a slice containing two nodes.
func (n *Nodes) Set2(n1, n2 Node) {
n.mutate()
*n = []Node{n1, n2}
}
// Set3 sets n to a slice containing three nodes.
func (n *Nodes) Set3(n1, n2, n3 Node) {
n.mutate()
*n = []Node{n1, n2, n3}
}
// MoveNodes sets n to the contents of n2, then clears n2.
func (n *Nodes) MoveNodes(n2 *Nodes) {
n.mutate()
*n = *n2
*n2 = nil
}
// SetIndex sets the i'th element of Nodes to node.
// It panics if n does not have at least i+1 elements.
func (n Nodes) SetIndex(i int, node Node) {
n[i] = node
}
// SetFirst sets the first element of Nodes to node.
// It panics if n does not have at least one elements.
func (n Nodes) SetFirst(node Node) {
n[0] = node
}
// SetSecond sets the second element of Nodes to node.
// It panics if n does not have at least two elements.
func (n Nodes) SetSecond(node Node) {
n[1] = node
}
// Addr returns the address of the i'th element of Nodes.
// It panics if n does not have at least i+1 elements.
func (n Nodes) Addr(i int) *Node {
return &n[i]
}
// Append appends entries to Nodes.
func (n *Nodes) Append(a ...Node) {
if len(a) == 0 {
@ -492,18 +362,12 @@ func (n *Nodes) Take() []Node {
return ret
}
// AppendNodes appends the contents of *n2 to n, then clears n2.
func (n *Nodes) AppendNodes(n2 *Nodes) {
n.mutate()
*n = append(*n, n2.Take()...)
}
// Copy returns a copy of the content of the slice.
func (n Nodes) Copy() Nodes {
if n == nil {
return nil
}
c := make(Nodes, n.Len())
c := make(Nodes, len(n))
copy(c, n)
return c
}
@ -641,122 +505,98 @@ func IsMethod(n Node) bool {
return n.Type().Recv() != nil
}
func Nod(op Op, nleft, nright Node) Node {
return NodAt(base.Pos, op, nleft, nright)
func HasNamedResults(fn *Func) bool {
typ := fn.Type()
return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
}
func NodAt(pos src.XPos, op Op, nleft, nright Node) Node {
switch op {
default:
panic("NodAt " + op.String())
case OADD, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
OCOPY, OCOMPLEX,
OEFACE:
return NewBinaryExpr(pos, op, nleft, nright)
case OADDR:
return NewAddrExpr(pos, nleft)
case OADDSTR:
return NewAddStringExpr(pos, nil)
case OANDAND, OOROR:
return NewLogicalExpr(pos, op, nleft, nright)
case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT:
var typ Ntype
if nright != nil {
typ = nright.(Ntype)
// HasUniquePos reports whether n has a unique position that can be
// used for reporting error messages.
//
// It's primarily used to distinguish references to named objects,
// whose Pos will point back to their declaration position rather than
// their usage position.
func HasUniquePos(n Node) bool {
switch n.Op() {
case ONAME, OPACK:
return false
case OLITERAL, ONIL, OTYPE:
if n.Sym() != nil {
return false
}
return NewCompLitExpr(pos, op, typ, nil)
case OAS:
return NewAssignStmt(pos, nleft, nright)
case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
n := NewAssignListStmt(pos, op, nil, nil)
}
if !n.Pos().IsKnown() {
if base.Flag.K != 0 {
base.Warn("setlineno: unknown position (line 0)")
}
return false
}
return true
}
func SetPos(n Node) src.XPos {
lno := base.Pos
if n != nil && HasUniquePos(n) {
base.Pos = n.Pos()
}
return lno
}
// The result of InitExpr MUST be assigned back to n, e.g.
// n.Left = InitExpr(init, n.Left)
func InitExpr(init []Node, n Node) Node {
if len(init) == 0 {
return n
case OASOP:
return NewAssignOpStmt(pos, OXXX, nleft, nright)
case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW, ONEWOBJ,
OOFFSETOF, OPANIC, OREAL, OSIZEOF,
OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
if nright != nil {
panic("unary nright")
}
return NewUnaryExpr(pos, op, nleft)
case OBLOCK:
return NewBlockStmt(pos, nil)
case OBREAK, OCONTINUE, OFALL, OGOTO, ORETJMP:
return NewBranchStmt(pos, op, nil)
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
OAPPEND, ODELETE, OGETG, OMAKE, OPRINT, OPRINTN, ORECOVER:
return NewCallExpr(pos, op, nleft, nil)
case OCASE:
return NewCaseStmt(pos, nil, nil)
case OCONV, OCONVIFACE, OCONVNOP, ORUNESTR:
return NewConvExpr(pos, op, nil, nleft)
case ODCL, ODCLCONST, ODCLTYPE:
return NewDecl(pos, op, nleft)
case ODCLFUNC:
return NewFunc(pos)
case ODEFER, OGO:
return NewGoDeferStmt(pos, op, nleft)
case ODEREF:
return NewStarExpr(pos, nleft)
case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT:
return NewSelectorExpr(pos, op, nleft, nil)
case ODOTTYPE, ODOTTYPE2:
var typ Ntype
if nright != nil {
typ = nright.(Ntype)
}
n := NewTypeAssertExpr(pos, nleft, typ)
if op != ODOTTYPE {
n.SetOp(op)
if MayBeShared(n) {
// Introduce OCONVNOP to hold init list.
old := n
n = NewConvExpr(base.Pos, OCONVNOP, nil, old)
n.SetType(old.Type())
n.SetTypecheck(1)
}
n.PtrInit().Prepend(init...)
n.SetHasCall(true)
return n
case OFOR:
return NewForStmt(pos, nil, nleft, nright, nil)
case OIF:
return NewIfStmt(pos, nleft, nil, nil)
case OINDEX, OINDEXMAP:
n := NewIndexExpr(pos, nleft, nright)
if op != OINDEX {
n.SetOp(op)
}
return n
case OINLMARK:
return NewInlineMarkStmt(pos, types.BADWIDTH)
case OKEY:
return NewKeyExpr(pos, nleft, nright)
case OSTRUCTKEY:
return NewStructKeyExpr(pos, nil, nleft)
case OLABEL:
return NewLabelStmt(pos, nil)
case OLITERAL, OTYPE, OIOTA:
return newNameAt(pos, op, nil)
case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY:
return NewMakeExpr(pos, op, nleft, nright)
case ONIL:
return NewNilExpr(pos)
case OPACK:
return NewPkgName(pos, nil, nil)
// what's the outer value that a write to n affects?
// outer value means containing struct or array.
func OuterValue(n Node) Node {
for {
switch nn := n; nn.Op() {
case OXDOT:
base.Fatalf("OXDOT in walk")
case ODOT:
nn := nn.(*SelectorExpr)
n = nn.X
continue
case OPAREN:
return NewParenExpr(pos, nleft)
case ORANGE:
return NewRangeStmt(pos, nil, nright, nil)
case ORESULT:
return NewResultExpr(pos, nil, types.BADWIDTH)
case ORETURN:
return NewReturnStmt(pos, nil)
case OSELECT:
return NewSelectStmt(pos, nil)
case OSEND:
return NewSendStmt(pos, nleft, nright)
case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
return NewSliceExpr(pos, op, nleft)
case OSLICEHEADER:
return NewSliceHeaderExpr(pos, nil, nleft, nil, nil)
case OSWITCH:
return NewSwitchStmt(pos, nleft, nil)
case OINLCALL:
return NewInlinedCallExpr(pos, nil, nil)
nn := nn.(*ParenExpr)
n = nn.X
continue
case OCONVNOP:
nn := nn.(*ConvExpr)
n = nn.X
continue
case OINDEX:
nn := nn.(*IndexExpr)
if nn.X.Type() != nil && nn.X.Type().IsArray() {
n = nn.X
continue
}
}
return n
}
}
const (
EscUnknown = iota
EscNone // Does not escape to heap, result, or parameters.
EscHeap // Reachable from the heap
EscNever // By construction will not escape.
)

View file

@ -8,18 +8,18 @@ func (n *AddStringExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *AddStringExpr) copy() Node {
c := *n
c.init = c.init.Copy()
c.List_ = c.List_.Copy()
c.List = c.List.Copy()
return &c
}
func (n *AddStringExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDoList(n.List_, err, do)
err = maybeDoList(n.List, err, do)
return err
}
func (n *AddStringExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
editList(n.List_, edit)
editList(n.List, edit)
}
func (n *AddrExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
@ -154,18 +154,18 @@ func (n *BlockStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *BlockStmt) copy() Node {
c := *n
c.init = c.init.Copy()
c.List_ = c.List_.Copy()
c.List = c.List.Copy()
return &c
}
func (n *BlockStmt) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDoList(n.List_, err, do)
err = maybeDoList(n.List, err, do)
return err
}
func (n *BlockStmt) editChildren(edit func(Node) Node) {
editList(n.init, edit)
editList(n.List_, edit)
editList(n.List, edit)
}
func (n *BranchStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
@ -189,7 +189,7 @@ func (n *CallExpr) copy() Node {
c.init = c.init.Copy()
c.Args = c.Args.Copy()
c.Rargs = c.Rargs.Copy()
c.Body_ = c.Body_.Copy()
c.Body = c.Body.Copy()
return &c
}
func (n *CallExpr) doChildren(do func(Node) error) error {
@ -198,7 +198,7 @@ func (n *CallExpr) doChildren(do func(Node) error) error {
err = maybeDo(n.X, err, do)
err = maybeDoList(n.Args, err, do)
err = maybeDoList(n.Rargs, err, do)
err = maybeDoList(n.Body_, err, do)
err = maybeDoList(n.Body, err, do)
return err
}
func (n *CallExpr) editChildren(edit func(Node) Node) {
@ -206,7 +206,7 @@ func (n *CallExpr) editChildren(edit func(Node) Node) {
n.X = maybeEdit(n.X, edit)
editList(n.Args, edit)
editList(n.Rargs, edit)
editList(n.Body_, edit)
editList(n.Body, edit)
}
func (n *CallPartExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
@ -231,25 +231,25 @@ func (n *CaseStmt) copy() Node {
c := *n
c.init = c.init.Copy()
c.Vars = c.Vars.Copy()
c.List_ = c.List_.Copy()
c.Body_ = c.Body_.Copy()
c.List = c.List.Copy()
c.Body = c.Body.Copy()
return &c
}
func (n *CaseStmt) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDoList(n.Vars, err, do)
err = maybeDoList(n.List_, err, do)
err = maybeDoList(n.List, err, do)
err = maybeDo(n.Comm, err, do)
err = maybeDoList(n.Body_, err, do)
err = maybeDoList(n.Body, err, do)
return err
}
func (n *CaseStmt) editChildren(edit func(Node) Node) {
editList(n.init, edit)
editList(n.Vars, edit)
editList(n.List_, edit)
editList(n.List, edit)
n.Comm = maybeEdit(n.Comm, edit)
editList(n.Body_, edit)
editList(n.Body, edit)
}
func (n *ChanType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
@ -300,20 +300,20 @@ func (n *CompLitExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *CompLitExpr) copy() Node {
c := *n
c.init = c.init.Copy()
c.List_ = c.List_.Copy()
c.List = c.List.Copy()
return &c
}
func (n *CompLitExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.Ntype, err, do)
err = maybeDoList(n.List_, err, do)
err = maybeDoList(n.List, err, do)
return err
}
func (n *CompLitExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.Ntype = toNtype(maybeEdit(n.Ntype, edit))
editList(n.List_, edit)
editList(n.List, edit)
}
func (n *ConstExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
@ -367,7 +367,7 @@ func (n *ForStmt) copy() Node {
c := *n
c.init = c.init.Copy()
c.Late = c.Late.Copy()
c.Body_ = c.Body_.Copy()
c.Body = c.Body.Copy()
return &c
}
func (n *ForStmt) doChildren(do func(Node) error) error {
@ -376,7 +376,7 @@ func (n *ForStmt) doChildren(do func(Node) error) error {
err = maybeDo(n.Cond, err, do)
err = maybeDoList(n.Late, err, do)
err = maybeDo(n.Post, err, do)
err = maybeDoList(n.Body_, err, do)
err = maybeDoList(n.Body, err, do)
return err
}
func (n *ForStmt) editChildren(edit func(Node) Node) {
@ -384,22 +384,22 @@ func (n *ForStmt) editChildren(edit func(Node) Node) {
n.Cond = maybeEdit(n.Cond, edit)
editList(n.Late, edit)
n.Post = maybeEdit(n.Post, edit)
editList(n.Body_, edit)
editList(n.Body, edit)
}
func (n *Func) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *Func) copy() Node {
c := *n
c.Body_ = c.Body_.Copy()
c.Body = c.Body.Copy()
return &c
}
func (n *Func) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.Body_, err, do)
err = maybeDoList(n.Body, err, do)
return err
}
func (n *Func) editChildren(edit func(Node) Node) {
editList(n.Body_, edit)
editList(n.Body, edit)
}
func (n *FuncType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
@ -461,7 +461,7 @@ func (n *IfStmt) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *IfStmt) copy() Node {
c := *n
c.init = c.init.Copy()
c.Body_ = c.Body_.Copy()
c.Body = c.Body.Copy()
c.Else = c.Else.Copy()
return &c
}
@ -469,14 +469,14 @@ func (n *IfStmt) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.Cond, err, do)
err = maybeDoList(n.Body_, err, do)
err = maybeDoList(n.Body, err, do)
err = maybeDoList(n.Else, err, do)
return err
}
func (n *IfStmt) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.Cond = maybeEdit(n.Cond, edit)
editList(n.Body_, edit)
editList(n.Body, edit)
editList(n.Else, edit)
}
@ -518,20 +518,20 @@ func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *InlinedCallExpr) copy() Node {
c := *n
c.init = c.init.Copy()
c.Body_ = c.Body_.Copy()
c.Body = c.Body.Copy()
c.ReturnVars = c.ReturnVars.Copy()
return &c
}
func (n *InlinedCallExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDoList(n.Body_, err, do)
err = maybeDoList(n.Body, err, do)
err = maybeDoList(n.ReturnVars, err, do)
return err
}
func (n *InlinedCallExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
editList(n.Body_, edit)
editList(n.Body, edit)
editList(n.ReturnVars, edit)
}
@ -726,7 +726,7 @@ func (n *RangeStmt) copy() Node {
c := *n
c.init = c.init.Copy()
c.Vars = c.Vars.Copy()
c.Body_ = c.Body_.Copy()
c.Body = c.Body.Copy()
return &c
}
func (n *RangeStmt) doChildren(do func(Node) error) error {
@ -734,14 +734,14 @@ func (n *RangeStmt) doChildren(do func(Node) error) error {
err = maybeDoList(n.init, err, do)
err = maybeDoList(n.Vars, err, do)
err = maybeDo(n.X, err, do)
err = maybeDoList(n.Body_, err, do)
err = maybeDoList(n.Body, err, do)
return err
}
func (n *RangeStmt) editChildren(edit func(Node) Node) {
editList(n.init, edit)
editList(n.Vars, edit)
n.X = maybeEdit(n.X, edit)
editList(n.Body_, edit)
editList(n.Body, edit)
}
func (n *ResultExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
@ -838,40 +838,40 @@ func (n *SliceExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *SliceExpr) copy() Node {
c := *n
c.init = c.init.Copy()
c.List_ = c.List_.Copy()
c.List = c.List.Copy()
return &c
}
func (n *SliceExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.X, err, do)
err = maybeDoList(n.List_, err, do)
err = maybeDoList(n.List, err, do)
return err
}
func (n *SliceExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.X = maybeEdit(n.X, edit)
editList(n.List_, edit)
editList(n.List, edit)
}
func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *SliceHeaderExpr) copy() Node {
c := *n
c.init = c.init.Copy()
c.LenCap_ = c.LenCap_.Copy()
c.LenCap = c.LenCap.Copy()
return &c
}
func (n *SliceHeaderExpr) doChildren(do func(Node) error) error {
var err error
err = maybeDoList(n.init, err, do)
err = maybeDo(n.Ptr, err, do)
err = maybeDoList(n.LenCap_, err, do)
err = maybeDoList(n.LenCap, err, do)
return err
}
func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) {
editList(n.init, edit)
n.Ptr = maybeEdit(n.Ptr, edit)
editList(n.LenCap_, edit)
editList(n.LenCap, edit)
}
func (n *SliceType) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }

View file

@ -2,9 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import "cmd/compile/internal/ir"
package ir
// Strongly connected components.
//
@ -32,13 +30,13 @@ import "cmd/compile/internal/ir"
// when analyzing a set of mutually recursive functions.
type bottomUpVisitor struct {
analyze func([]*ir.Func, bool)
analyze func([]*Func, bool)
visitgen uint32
nodeID map[*ir.Func]uint32
stack []*ir.Func
nodeID map[*Func]uint32
stack []*Func
}
// visitBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
// It calls analyze with successive groups of functions, working from
// the bottom of the call graph upward. Each time analyze is called with
// a list of functions, every function on that list only calls other functions
@ -51,21 +49,21 @@ type bottomUpVisitor struct {
// If recursive is false, the list consists of only a single function and its closures.
// If recursive is true, the list may still contain only a single function,
// if that function is itself recursive.
func visitBottomUp(list []ir.Node, analyze func(list []*ir.Func, recursive bool)) {
func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) {
var v bottomUpVisitor
v.analyze = analyze
v.nodeID = make(map[*ir.Func]uint32)
v.nodeID = make(map[*Func]uint32)
for _, n := range list {
if n.Op() == ir.ODCLFUNC {
n := n.(*ir.Func)
if !n.Func().IsHiddenClosure() {
if n.Op() == ODCLFUNC {
n := n.(*Func)
if !n.IsHiddenClosure() {
v.visit(n)
}
}
}
}
func (v *bottomUpVisitor) visit(n *ir.Func) uint32 {
func (v *bottomUpVisitor) visit(n *Func) uint32 {
if id := v.nodeID[n]; id > 0 {
// already visited
return id
@ -78,42 +76,46 @@ func (v *bottomUpVisitor) visit(n *ir.Func) uint32 {
min := v.visitgen
v.stack = append(v.stack, n)
ir.Visit(n, func(n ir.Node) {
Visit(n, func(n Node) {
switch n.Op() {
case ir.ONAME:
if n.Class() == ir.PFUNC {
case ONAME:
n := n.(*Name)
if n.Class_ == PFUNC {
if n != nil && n.Name().Defn != nil {
if m := v.visit(n.Name().Defn.(*ir.Func)); m < min {
if m := v.visit(n.Name().Defn.(*Func)); m < min {
min = m
}
}
}
case ir.OMETHEXPR:
fn := methodExprName(n)
case OMETHEXPR:
n := n.(*MethodExpr)
fn := MethodExprName(n)
if fn != nil && fn.Defn != nil {
if m := v.visit(fn.Defn.(*ir.Func)); m < min {
if m := v.visit(fn.Defn.(*Func)); m < min {
min = m
}
}
case ir.ODOTMETH:
fn := methodExprName(n)
if fn != nil && fn.Op() == ir.ONAME && fn.Class() == ir.PFUNC && fn.Defn != nil {
if m := v.visit(fn.Defn.(*ir.Func)); m < min {
case ODOTMETH:
n := n.(*SelectorExpr)
fn := MethodExprName(n)
if fn != nil && fn.Op() == ONAME && fn.Class_ == PFUNC && fn.Defn != nil {
if m := v.visit(fn.Defn.(*Func)); m < min {
min = m
}
}
case ir.OCALLPART:
fn := ir.AsNode(callpartMethod(n).Nname)
if fn != nil && fn.Op() == ir.ONAME {
if fn := fn.(*ir.Name); fn.Class() == ir.PFUNC && fn.Name().Defn != nil {
if m := v.visit(fn.Name().Defn.(*ir.Func)); m < min {
case OCALLPART:
n := n.(*CallPartExpr)
fn := AsNode(n.Method.Nname)
if fn != nil && fn.Op() == ONAME {
if fn := fn.(*Name); fn.Class_ == PFUNC && fn.Name().Defn != nil {
if m := v.visit(fn.Name().Defn.(*Func)); m < min {
min = m
}
}
}
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
if m := v.visit(n.Func()); m < min {
case OCLOSURE:
n := n.(*ClosureExpr)
if m := v.visit(n.Func); m < min {
min = m
}
}

View file

@ -30,9 +30,6 @@ func NewDecl(pos src.XPos, op Op, x Node) *Decl {
func (*Decl) isStmt() {}
func (n *Decl) Left() Node { return n.X }
func (n *Decl) SetLeft(x Node) { n.X = x }
// A Stmt is a Node that can appear as a statement.
// This includes statement-like expressions such as f().
//
@ -78,15 +75,6 @@ func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt {
return n
}
func (n *AssignListStmt) List() Nodes { return n.Lhs }
func (n *AssignListStmt) PtrList() *Nodes { return &n.Lhs }
func (n *AssignListStmt) SetList(x Nodes) { n.Lhs = x }
func (n *AssignListStmt) Rlist() Nodes { return n.Rhs }
func (n *AssignListStmt) PtrRlist() *Nodes { return &n.Rhs }
func (n *AssignListStmt) SetRlist(x Nodes) { n.Rhs = x }
func (n *AssignListStmt) Colas() bool { return n.Def }
func (n *AssignListStmt) SetColas(x bool) { n.Def = x }
func (n *AssignListStmt) SetOp(op Op) {
switch op {
default:
@ -112,13 +100,6 @@ func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt {
return n
}
func (n *AssignStmt) Left() Node { return n.X }
func (n *AssignStmt) SetLeft(x Node) { n.X = x }
func (n *AssignStmt) Right() Node { return n.Y }
func (n *AssignStmt) SetRight(y Node) { n.Y = y }
func (n *AssignStmt) Colas() bool { return n.Def }
func (n *AssignStmt) SetColas(x bool) { n.Def = x }
func (n *AssignStmt) SetOp(op Op) {
switch op {
default:
@ -145,21 +126,13 @@ func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt {
return n
}
func (n *AssignOpStmt) Left() Node { return n.X }
func (n *AssignOpStmt) SetLeft(x Node) { n.X = x }
func (n *AssignOpStmt) Right() Node { return n.Y }
func (n *AssignOpStmt) SetRight(y Node) { n.Y = y }
func (n *AssignOpStmt) SubOp() Op { return n.AsOp }
func (n *AssignOpStmt) SetSubOp(x Op) { n.AsOp = x }
func (n *AssignOpStmt) Implicit() bool { return n.IncDec }
func (n *AssignOpStmt) SetImplicit(b bool) { n.IncDec = b }
func (n *AssignOpStmt) Type() *types.Type { return n.typ }
func (n *AssignOpStmt) SetType(x *types.Type) { n.typ = x }
// A BlockStmt is a block: { List }.
type BlockStmt struct {
miniStmt
List_ Nodes
List Nodes
}
func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
@ -172,14 +145,10 @@ func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
}
}
n.op = OBLOCK
n.List_.Set(list)
n.List.Set(list)
return n
}
func (n *BlockStmt) List() Nodes { return n.List_ }
func (n *BlockStmt) PtrList() *Nodes { return &n.List_ }
func (n *BlockStmt) SetList(x Nodes) { n.List_ = x }
// A BranchStmt is a break, continue, fallthrough, or goto statement.
//
// For back-end code generation, Op may also be RETJMP (return+jump),
@ -203,38 +172,25 @@ func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt {
}
func (n *BranchStmt) Sym() *types.Sym { return n.Label }
func (n *BranchStmt) SetSym(sym *types.Sym) { n.Label = sym }
// A CaseStmt is a case statement in a switch or select: case List: Body.
type CaseStmt struct {
miniStmt
Vars Nodes // declared variable for this case in type switch
List_ Nodes // list of expressions for switch, early select
List Nodes // list of expressions for switch, early select
Comm Node // communication case (Exprs[0]) after select is type-checked
Body_ Nodes
Body Nodes
}
func NewCaseStmt(pos src.XPos, list, body []Node) *CaseStmt {
n := &CaseStmt{}
n.pos = pos
n.op = OCASE
n.List_.Set(list)
n.Body_.Set(body)
n.List.Set(list)
n.Body.Set(body)
return n
}
func (n *CaseStmt) List() Nodes { return n.List_ }
func (n *CaseStmt) PtrList() *Nodes { return &n.List_ }
func (n *CaseStmt) SetList(x Nodes) { n.List_ = x }
func (n *CaseStmt) Body() Nodes { return n.Body_ }
func (n *CaseStmt) PtrBody() *Nodes { return &n.Body_ }
func (n *CaseStmt) SetBody(x Nodes) { n.Body_ = x }
func (n *CaseStmt) Rlist() Nodes { return n.Vars }
func (n *CaseStmt) PtrRlist() *Nodes { return &n.Vars }
func (n *CaseStmt) SetRlist(x Nodes) { n.Vars = x }
func (n *CaseStmt) Left() Node { return n.Comm }
func (n *CaseStmt) SetLeft(x Node) { n.Comm = x }
// A ForStmt is a non-range for loop: for Init; Cond; Post { Body }
// Op can be OFOR or OFORUNTIL (!Cond).
type ForStmt struct {
@ -243,8 +199,8 @@ type ForStmt struct {
Cond Node
Late Nodes
Post Node
Body_ Nodes
HasBreak_ bool
Body Nodes
HasBreak bool
}
func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStmt {
@ -252,25 +208,10 @@ func NewForStmt(pos src.XPos, init []Node, cond, post Node, body []Node) *ForStm
n.pos = pos
n.op = OFOR
n.init.Set(init)
n.Body_.Set(body)
n.Body.Set(body)
return n
}
func (n *ForStmt) Sym() *types.Sym { return n.Label }
func (n *ForStmt) SetSym(x *types.Sym) { n.Label = x }
func (n *ForStmt) Left() Node { return n.Cond }
func (n *ForStmt) SetLeft(x Node) { n.Cond = x }
func (n *ForStmt) Right() Node { return n.Post }
func (n *ForStmt) SetRight(x Node) { n.Post = x }
func (n *ForStmt) Body() Nodes { return n.Body_ }
func (n *ForStmt) PtrBody() *Nodes { return &n.Body_ }
func (n *ForStmt) SetBody(x Nodes) { n.Body_ = x }
func (n *ForStmt) List() Nodes { return n.Late }
func (n *ForStmt) PtrList() *Nodes { return &n.Late }
func (n *ForStmt) SetList(x Nodes) { n.Late = x }
func (n *ForStmt) HasBreak() bool { return n.HasBreak_ }
func (n *ForStmt) SetHasBreak(b bool) { n.HasBreak_ = b }
func (n *ForStmt) SetOp(op Op) {
if op != OFOR && op != OFORUNTIL {
panic(n.no("SetOp " + op.String()))
@ -300,38 +241,24 @@ func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt {
return n
}
func (n *GoDeferStmt) Left() Node { return n.Call }
func (n *GoDeferStmt) SetLeft(x Node) { n.Call = x }
// A IfStmt is a return statement: if Init; Cond { Then } else { Else }.
type IfStmt struct {
miniStmt
Cond Node
Body_ Nodes
Body Nodes
Else Nodes
Likely_ bool // code layout hint
Likely bool // code layout hint
}
func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
n := &IfStmt{Cond: cond}
n.pos = pos
n.op = OIF
n.Body_.Set(body)
n.Body.Set(body)
n.Else.Set(els)
return n
}
func (n *IfStmt) Left() Node { return n.Cond }
func (n *IfStmt) SetLeft(x Node) { n.Cond = x }
func (n *IfStmt) Body() Nodes { return n.Body_ }
func (n *IfStmt) PtrBody() *Nodes { return &n.Body_ }
func (n *IfStmt) SetBody(x Nodes) { n.Body_ = x }
func (n *IfStmt) Rlist() Nodes { return n.Else }
func (n *IfStmt) PtrRlist() *Nodes { return &n.Else }
func (n *IfStmt) SetRlist(x Nodes) { n.Else = x }
func (n *IfStmt) Likely() bool { return n.Likely_ }
func (n *IfStmt) SetLikely(x bool) { n.Likely_ = x }
// An InlineMarkStmt is a marker placed just before an inlined body.
type InlineMarkStmt struct {
miniStmt
@ -362,7 +289,6 @@ func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt {
}
func (n *LabelStmt) Sym() *types.Sym { return n.Label }
func (n *LabelStmt) SetSym(x *types.Sym) { n.Label = x }
// A RangeStmt is a range loop: for Vars = range X { Stmts }
// Op can be OFOR or OFORUNTIL (!Cond).
@ -372,8 +298,8 @@ type RangeStmt struct {
Vars Nodes // TODO(rsc): Replace with Key, Value Node
Def bool
X Node
Body_ Nodes
HasBreak_ bool
Body Nodes
HasBreak bool
typ *types.Type // TODO(rsc): Remove - use X.Type() instead
Prealloc *Name
}
@ -383,24 +309,10 @@ func NewRangeStmt(pos src.XPos, vars []Node, x Node, body []Node) *RangeStmt {
n.pos = pos
n.op = ORANGE
n.Vars.Set(vars)
n.Body_.Set(body)
n.Body.Set(body)
return n
}
func (n *RangeStmt) Sym() *types.Sym { return n.Label }
func (n *RangeStmt) SetSym(x *types.Sym) { n.Label = x }
func (n *RangeStmt) Right() Node { return n.X }
func (n *RangeStmt) SetRight(x Node) { n.X = x }
func (n *RangeStmt) Body() Nodes { return n.Body_ }
func (n *RangeStmt) PtrBody() *Nodes { return &n.Body_ }
func (n *RangeStmt) SetBody(x Nodes) { n.Body_ = x }
func (n *RangeStmt) List() Nodes { return n.Vars }
func (n *RangeStmt) PtrList() *Nodes { return &n.Vars }
func (n *RangeStmt) SetList(x Nodes) { n.Vars = x }
func (n *RangeStmt) HasBreak() bool { return n.HasBreak_ }
func (n *RangeStmt) SetHasBreak(b bool) { n.HasBreak_ = b }
func (n *RangeStmt) Colas() bool { return n.Def }
func (n *RangeStmt) SetColas(b bool) { n.Def = b }
func (n *RangeStmt) Type() *types.Type { return n.typ }
func (n *RangeStmt) SetType(x *types.Type) { n.typ = x }
@ -422,17 +334,13 @@ func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt {
func (n *ReturnStmt) Orig() Node { return n.orig }
func (n *ReturnStmt) SetOrig(x Node) { n.orig = x }
func (n *ReturnStmt) List() Nodes { return n.Results }
func (n *ReturnStmt) PtrList() *Nodes { return &n.Results }
func (n *ReturnStmt) SetList(x Nodes) { n.Results = x }
func (n *ReturnStmt) IsDDD() bool { return false } // typecheckargs asks
// A SelectStmt is a block: { Cases }.
type SelectStmt struct {
miniStmt
Label *types.Sym
Cases Nodes
HasBreak_ bool
HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
Compiled Nodes // compiled form, after walkswitch
@ -446,17 +354,6 @@ func NewSelectStmt(pos src.XPos, cases []Node) *SelectStmt {
return n
}
func (n *SelectStmt) List() Nodes { return n.Cases }
func (n *SelectStmt) PtrList() *Nodes { return &n.Cases }
func (n *SelectStmt) SetList(x Nodes) { n.Cases = x }
func (n *SelectStmt) Sym() *types.Sym { return n.Label }
func (n *SelectStmt) SetSym(x *types.Sym) { n.Label = x }
func (n *SelectStmt) HasBreak() bool { return n.HasBreak_ }
func (n *SelectStmt) SetHasBreak(x bool) { n.HasBreak_ = x }
func (n *SelectStmt) Body() Nodes { return n.Compiled }
func (n *SelectStmt) PtrBody() *Nodes { return &n.Compiled }
func (n *SelectStmt) SetBody(x Nodes) { n.Compiled = x }
// A SendStmt is a send statement: X <- Y.
type SendStmt struct {
miniStmt
@ -471,18 +368,13 @@ func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt {
return n
}
func (n *SendStmt) Left() Node { return n.Chan }
func (n *SendStmt) SetLeft(x Node) { n.Chan = x }
func (n *SendStmt) Right() Node { return n.Value }
func (n *SendStmt) SetRight(y Node) { n.Value = y }
// A SwitchStmt is a switch statement: switch Init; Expr { Cases }.
type SwitchStmt struct {
miniStmt
Tag Node
Cases Nodes // list of *CaseStmt
Label *types.Sym
HasBreak_ bool
HasBreak bool
// TODO(rsc): Instead of recording here, replace with a block?
Compiled Nodes // compiled form, after walkswitch
@ -496,19 +388,6 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []Node) *SwitchStmt {
return n
}
func (n *SwitchStmt) Left() Node { return n.Tag }
func (n *SwitchStmt) SetLeft(x Node) { n.Tag = x }
func (n *SwitchStmt) List() Nodes { return n.Cases }
func (n *SwitchStmt) PtrList() *Nodes { return &n.Cases }
func (n *SwitchStmt) SetList(x Nodes) { n.Cases = x }
func (n *SwitchStmt) Body() Nodes { return n.Compiled }
func (n *SwitchStmt) PtrBody() *Nodes { return &n.Compiled }
func (n *SwitchStmt) SetBody(x Nodes) { n.Compiled = x }
func (n *SwitchStmt) Sym() *types.Sym { return n.Label }
func (n *SwitchStmt) SetSym(x *types.Sym) { n.Label = x }
func (n *SwitchStmt) HasBreak() bool { return n.HasBreak_ }
func (n *SwitchStmt) SetHasBreak(x bool) { n.HasBreak_ = x }
// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
type TypeSwitchGuard struct {
miniNode
@ -523,19 +402,3 @@ func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard {
n.op = OTYPESW
return n
}
func (n *TypeSwitchGuard) Left() Node {
if n.Tag == nil {
return nil
}
return n.Tag
}
func (n *TypeSwitchGuard) SetLeft(x Node) {
if x == nil {
n.Tag = nil
return
}
n.Tag = x.(*Ident)
}
func (n *TypeSwitchGuard) Right() Node { return n.X }
func (n *TypeSwitchGuard) SetRight(x Node) { n.X = x }

View file

@ -0,0 +1,82 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ir
import (
"cmd/compile/internal/types"
"cmd/internal/obj"
)
// Names holds known names.
var Names struct {
Staticuint64s *Name
Zerobase *Name
}
// Syms holds known symbols.
var Syms struct {
AssertE2I *obj.LSym
AssertE2I2 *obj.LSym
AssertI2I *obj.LSym
AssertI2I2 *obj.LSym
Deferproc *obj.LSym
DeferprocStack *obj.LSym
Deferreturn *obj.LSym
Duffcopy *obj.LSym
Duffzero *obj.LSym
GCWriteBarrier *obj.LSym
Goschedguarded *obj.LSym
Growslice *obj.LSym
Msanread *obj.LSym
Msanwrite *obj.LSym
Msanmove *obj.LSym
Newobject *obj.LSym
Newproc *obj.LSym
Panicdivide *obj.LSym
Panicshift *obj.LSym
PanicdottypeE *obj.LSym
PanicdottypeI *obj.LSym
Panicnildottype *obj.LSym
Panicoverflow *obj.LSym
Raceread *obj.LSym
Racereadrange *obj.LSym
Racewrite *obj.LSym
Racewriterange *obj.LSym
// Wasm
SigPanic *obj.LSym
Typedmemclr *obj.LSym
Typedmemmove *obj.LSym
Udiv *obj.LSym
WriteBarrier *obj.LSym
Zerobase *obj.LSym
ARM64HasATOMICS *obj.LSym
ARMHasVFPv4 *obj.LSym
X86HasFMA *obj.LSym
X86HasPOPCNT *obj.LSym
X86HasSSE41 *obj.LSym
// Wasm
WasmDiv *obj.LSym
// Wasm
WasmMove *obj.LSym
// Wasm
WasmZero *obj.LSym
// Wasm
WasmTruncS *obj.LSym
// Wasm
WasmTruncU *obj.LSym
}
// Pkgs holds known packages.
var Pkgs struct {
Go *types.Pkg
Itab *types.Pkg
Itablink *types.Pkg
Map *types.Pkg
Msan *types.Pkg
Race *types.Pkg
Runtime *types.Pkg
Track *types.Pkg
Unsafe *types.Pkg
}

View file

@ -106,7 +106,7 @@ func DoChildren(n Node, do func(Node) error) error {
// Note that DoList only calls do on the nodes in the list, not their children.
// If x's children should be processed, do(x) must call DoChildren(x, do) itself.
func DoList(list Nodes, do func(Node) error) error {
for _, x := range list.Slice() {
for _, x := range list {
if x != nil {
if err := do(x); err != nil {
return err
@ -131,7 +131,7 @@ func Visit(n Node, visit func(Node)) {
// VisitList calls Visit(x, visit) for each node x in the list.
func VisitList(list Nodes, visit func(Node)) {
for _, x := range list.Slice() {
for _, x := range list {
Visit(x, visit)
}
}
@ -163,7 +163,7 @@ func Any(n Node, cond func(Node) bool) bool {
// Otherwise, AnyList returns false after calling Any(x, cond)
// for every x in the list.
func AnyList(list Nodes, cond func(Node) bool) bool {
for _, x := range list.Slice() {
for _, x := range list {
if Any(x, cond) {
return true
}
@ -217,8 +217,8 @@ func EditChildren(n Node, edit func(Node) Node) {
// Note that editList only calls edit on the nodes in the list, not their children.
// If x's children should be processed, edit(x) must call EditChildren(x, edit) itself.
func editList(list Nodes, edit func(Node) Node) {
s := list.Slice()
for i, x := range list.Slice() {
s := list
for i, x := range list {
if x != nil {
s[i] = edit(x)
}

View file

@ -0,0 +1,97 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package liveness
import "cmd/compile/internal/bitvec"
// FNV-1 hash function constants.
const (
h0 = 2166136261
hp = 16777619
)
// bvecSet is a set of bvecs, in initial insertion order.
type bvecSet struct {
index []int // hash -> uniq index. -1 indicates empty slot.
uniq []bitvec.BitVec // unique bvecs, in insertion order
}
func (m *bvecSet) grow() {
// Allocate new index.
n := len(m.index) * 2
if n == 0 {
n = 32
}
newIndex := make([]int, n)
for i := range newIndex {
newIndex[i] = -1
}
// Rehash into newIndex.
for i, bv := range m.uniq {
h := hashbitmap(h0, bv) % uint32(len(newIndex))
for {
j := newIndex[h]
if j < 0 {
newIndex[h] = i
break
}
h++
if h == uint32(len(newIndex)) {
h = 0
}
}
}
m.index = newIndex
}
// add adds bv to the set and returns its index in m.extractUniqe.
// The caller must not modify bv after this.
func (m *bvecSet) add(bv bitvec.BitVec) int {
if len(m.uniq)*4 >= len(m.index) {
m.grow()
}
index := m.index
h := hashbitmap(h0, bv) % uint32(len(index))
for {
j := index[h]
if j < 0 {
// New bvec.
index[h] = len(m.uniq)
m.uniq = append(m.uniq, bv)
return len(m.uniq) - 1
}
jlive := m.uniq[j]
if bv.Eq(jlive) {
// Existing bvec.
return j
}
h++
if h == uint32(len(index)) {
h = 0
}
}
}
// extractUnique returns this slice of unique bit vectors in m, as
// indexed by the result of bvecSet.add.
func (m *bvecSet) extractUnique() []bitvec.BitVec {
return m.uniq
}
func hashbitmap(h uint32, bv bitvec.BitVec) uint32 {
n := int((bv.N + 31) / 32)
for i := 0; i < n; i++ {
w := bv.B[i]
h = (h * hp) ^ (w & 0xff)
h = (h * hp) ^ ((w >> 8) & 0xff)
h = (h * hp) ^ ((w >> 16) & 0xff)
h = (h * hp) ^ ((w >> 24) & 0xff)
}
return h
}

View file

@ -12,18 +12,21 @@
//
// Each level includes the earlier output as well.
package gc
package liveness
import (
"crypto/md5"
"fmt"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"crypto/md5"
"fmt"
"strings"
)
// OpVarDef is an annotation for the liveness analysis, marking a place
@ -81,117 +84,87 @@ import (
// so the compiler can allocate two temps to the same location. Here it's now
// useless, since the implementation of stack objects.
// BlockEffects summarizes the liveness effects on an SSA block.
type BlockEffects struct {
// blockEffects summarizes the liveness effects on an SSA block.
type blockEffects struct {
// Computed during Liveness.prologue using only the content of
// individual blocks:
//
// uevar: upward exposed variables (used before set in block)
// varkill: killed variables (set in block)
uevar bvec
varkill bvec
uevar bitvec.BitVec
varkill bitvec.BitVec
// Computed during Liveness.solve using control flow information:
//
// livein: variables live at block entry
// liveout: variables live at block exit
livein bvec
liveout bvec
livein bitvec.BitVec
liveout bitvec.BitVec
}
// A collection of global state used by liveness analysis.
type Liveness struct {
type liveness struct {
fn *ir.Func
f *ssa.Func
vars []*ir.Name
idx map[*ir.Name]int32
stkptrsize int64
be []BlockEffects
be []blockEffects
// allUnsafe indicates that all points in this function are
// unsafe-points.
allUnsafe bool
// unsafePoints bit i is set if Value ID i is an unsafe-point
// (preemption is not allowed). Only valid if !allUnsafe.
unsafePoints bvec
unsafePoints bitvec.BitVec
// An array with a bit vector for each safe point in the
// current Block during Liveness.epilogue. Indexed in Value
// order for that block. Additionally, for the entry block
// livevars[0] is the entry bitmap. Liveness.compact moves
// these to stackMaps.
livevars []bvec
livevars []bitvec.BitVec
// livenessMap maps from safe points (i.e., CALLs) to their
// liveness map indexes.
livenessMap LivenessMap
livenessMap Map
stackMapSet bvecSet
stackMaps []bvec
stackMaps []bitvec.BitVec
cache progeffectscache
}
// LivenessMap maps from *ssa.Value to LivenessIndex.
type LivenessMap struct {
vals map[ssa.ID]LivenessIndex
// The set of live, pointer-containing variables at the deferreturn
// Map maps from *ssa.Value to LivenessIndex.
type Map struct {
Vals map[ssa.ID]objw.LivenessIndex
// The set of live, pointer-containing variables at the DeferReturn
// call (only set when open-coded defers are used).
deferreturn LivenessIndex
DeferReturn objw.LivenessIndex
}
func (m *LivenessMap) reset() {
if m.vals == nil {
m.vals = make(map[ssa.ID]LivenessIndex)
func (m *Map) reset() {
if m.Vals == nil {
m.Vals = make(map[ssa.ID]objw.LivenessIndex)
} else {
for k := range m.vals {
delete(m.vals, k)
for k := range m.Vals {
delete(m.Vals, k)
}
}
m.deferreturn = LivenessDontCare
m.DeferReturn = objw.LivenessDontCare
}
func (m *LivenessMap) set(v *ssa.Value, i LivenessIndex) {
m.vals[v.ID] = i
func (m *Map) set(v *ssa.Value, i objw.LivenessIndex) {
m.Vals[v.ID] = i
}
func (m LivenessMap) Get(v *ssa.Value) LivenessIndex {
func (m Map) Get(v *ssa.Value) objw.LivenessIndex {
// If v isn't in the map, then it's a "don't care" and not an
// unsafe-point.
if idx, ok := m.vals[v.ID]; ok {
if idx, ok := m.Vals[v.ID]; ok {
return idx
}
return LivenessIndex{StackMapDontCare, false}
}
// LivenessIndex stores the liveness map information for a Value.
type LivenessIndex struct {
stackMapIndex int
// isUnsafePoint indicates that this is an unsafe-point.
//
// Note that it's possible for a call Value to have a stack
// map while also being an unsafe-point. This means it cannot
// be preempted at this instruction, but that a preemption or
// stack growth may happen in the called function.
isUnsafePoint bool
}
// LivenessDontCare indicates that the liveness information doesn't
// matter. Currently it is used in deferreturn liveness when we don't
// actually need it. It should never be emitted to the PCDATA stream.
var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
// StackMapDontCare indicates that the stack map index at a Value
// doesn't matter.
//
// This is a sentinel value that should never be emitted to the PCDATA
// stream. We use -1000 because that's obviously never a valid stack
// index (but -1 is).
const StackMapDontCare = -1000
func (idx LivenessIndex) StackMapValid() bool {
return idx.stackMapIndex != StackMapDontCare
return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false}
}
type progeffectscache struct {
@ -200,18 +173,18 @@ type progeffectscache struct {
initialized bool
}
// livenessShouldTrack reports whether the liveness analysis
// ShouldTrack reports whether the liveness analysis
// should track the variable n.
// We don't care about variables that have no pointers,
// nor do we care about non-local variables,
// nor do we care about empty structs (handled by the pointer check),
// nor do we care about the fake PAUTOHEAP variables.
func livenessShouldTrack(nn ir.Node) bool {
func ShouldTrack(nn ir.Node) bool {
if nn.Op() != ir.ONAME {
return false
}
n := nn.(*ir.Name)
return (n.Class() == ir.PAUTO || n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) && n.Type().HasPointers()
return (n.Class_ == ir.PAUTO || n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT) && n.Type().HasPointers()
}
// getvariables returns the list of on-stack variables that we need to track
@ -219,7 +192,7 @@ func livenessShouldTrack(nn ir.Node) bool {
func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
var vars []*ir.Name
for _, n := range fn.Dcl {
if livenessShouldTrack(n) {
if ShouldTrack(n) {
vars = append(vars, n)
}
}
@ -230,7 +203,7 @@ func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
return vars, idx
}
func (lv *Liveness) initcache() {
func (lv *liveness) initcache() {
if lv.cache.initialized {
base.Fatalf("liveness cache initialized twice")
return
@ -238,7 +211,7 @@ func (lv *Liveness) initcache() {
lv.cache.initialized = true
for i, node := range lv.vars {
switch node.Class() {
switch node.Class_ {
case ir.PPARAM:
// A return instruction with a p.to is a tail return, which brings
// the stack pointer back up (if it ever went down) and then jumps
@ -274,7 +247,7 @@ const (
// valueEffects returns the index of a variable in lv.vars and the
// liveness effects v has on that variable.
// If v does not affect any tracked variables, it returns -1, 0.
func (lv *Liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
n, e := affectedNode(v)
if e == 0 || n == nil || n.Op() != ir.ONAME { // cheapest checks first
return -1, 0
@ -321,10 +294,10 @@ func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) {
// Special cases.
switch v.Op {
case ssa.OpLoadReg:
n, _ := AutoVar(v.Args[0])
n, _ := ssa.AutoVar(v.Args[0])
return n, ssa.SymRead
case ssa.OpStoreReg:
n, _ := AutoVar(v)
n, _ := ssa.AutoVar(v)
return n, ssa.SymWrite
case ssa.OpVarLive:
@ -332,7 +305,7 @@ func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) {
case ssa.OpVarDef, ssa.OpVarKill:
return v.Aux.(*ir.Name), ssa.SymWrite
case ssa.OpKeepAlive:
n, _ := AutoVar(v.Args[0])
n, _ := ssa.AutoVar(v.Args[0])
return n, ssa.SymRead
}
@ -354,15 +327,15 @@ func affectedNode(v *ssa.Value) (ir.Node, ssa.SymEffect) {
}
type livenessFuncCache struct {
be []BlockEffects
livenessMap LivenessMap
be []blockEffects
livenessMap Map
}
// Constructs a new liveness structure used to hold the global state of the
// liveness computation. The cfg argument is a slice of *BasicBlocks and the
// vars argument is a slice of *Nodes.
func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *Liveness {
lv := &Liveness{
func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *liveness {
lv := &liveness{
fn: fn,
f: f,
vars: vars,
@ -380,23 +353,23 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int
if cap(lc.be) >= f.NumBlocks() {
lv.be = lc.be[:f.NumBlocks()]
}
lv.livenessMap = LivenessMap{vals: lc.livenessMap.vals, deferreturn: LivenessDontCare}
lc.livenessMap.vals = nil
lv.livenessMap = Map{Vals: lc.livenessMap.Vals, DeferReturn: objw.LivenessDontCare}
lc.livenessMap.Vals = nil
}
if lv.be == nil {
lv.be = make([]BlockEffects, f.NumBlocks())
lv.be = make([]blockEffects, f.NumBlocks())
}
nblocks := int32(len(f.Blocks))
nvars := int32(len(vars))
bulk := bvbulkalloc(nvars, nblocks*7)
bulk := bitvec.NewBulk(nvars, nblocks*7)
for _, b := range f.Blocks {
be := lv.blockEffects(b)
be.uevar = bulk.next()
be.varkill = bulk.next()
be.livein = bulk.next()
be.liveout = bulk.next()
be.uevar = bulk.Next()
be.varkill = bulk.Next()
be.livein = bulk.Next()
be.liveout = bulk.Next()
}
lv.livenessMap.reset()
@ -404,14 +377,14 @@ func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int
return lv
}
func (lv *Liveness) blockEffects(b *ssa.Block) *BlockEffects {
func (lv *liveness) blockEffects(b *ssa.Block) *blockEffects {
return &lv.be[b.ID]
}
// NOTE: The bitmap for a specific type t could be cached in t after
// the first run and then simply copied into bv at the correct offset
// on future calls with the same type t.
func onebitwalktype1(t *types.Type, off int64, bv bvec) {
func SetTypeBits(t *types.Type, off int64, bv bitvec.BitVec) {
if t.Align > 0 && off&int64(t.Align-1) != 0 {
base.Fatalf("onebitwalktype1: invalid initial alignment: type %v has alignment %d, but offset is %v", t, t.Align, off)
}
@ -423,23 +396,23 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
switch t.Kind() {
case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
if off&int64(Widthptr-1) != 0 {
if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer
bv.Set(int32(off / int64(types.PtrSize))) // pointer
case types.TSTRING:
// struct { byte *str; intgo len; }
if off&int64(Widthptr-1) != 0 {
if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) //pointer in first slot
bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot
case types.TINTER:
// struct { Itab *tab; void *data; }
// or, when isnilinter(t)==true:
// struct { Type *type; void *data; }
if off&int64(Widthptr-1) != 0 {
if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
}
// The first word of an interface is a pointer, but we don't
@ -454,14 +427,14 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
// the underlying type so it won't be GCd.
// If we ever have a moving GC, we need to change this for 2b (as
// well as scan itabs to update their itab._type fields).
bv.Set(int32(off/int64(Widthptr) + 1)) // pointer in second slot
bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot
case types.TSLICE:
// struct { byte *array; uintgo len; uintgo cap; }
if off&int64(Widthptr-1) != 0 {
if off&int64(types.PtrSize-1) != 0 {
base.Fatalf("onebitwalktype1: invalid TARRAY alignment, %v", t)
}
bv.Set(int32(off / int64(Widthptr))) // pointer in first slot (BitsPointer)
bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer)
case types.TARRAY:
elt := t.Elem()
@ -470,13 +443,13 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
break
}
for i := int64(0); i < t.NumElem(); i++ {
onebitwalktype1(elt, off, bv)
SetTypeBits(elt, off, bv)
off += elt.Width
}
case types.TSTRUCT:
for _, f := range t.Fields().Slice() {
onebitwalktype1(f.Type, off+f.Offset, bv)
SetTypeBits(f.Type, off+f.Offset, bv)
}
default:
@ -487,26 +460,26 @@ func onebitwalktype1(t *types.Type, off int64, bv bvec) {
// Generates live pointer value maps for arguments and local variables. The
// this argument and the in arguments are always assumed live. The vars
// argument is a slice of *Nodes.
func (lv *Liveness) pointerMap(liveout bvec, vars []*ir.Name, args, locals bvec) {
func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) {
for i := int32(0); ; i++ {
i = liveout.Next(i)
if i < 0 {
break
}
node := vars[i]
switch node.Class() {
switch node.Class_ {
case ir.PAUTO:
onebitwalktype1(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
SetTypeBits(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
case ir.PPARAM, ir.PPARAMOUT:
onebitwalktype1(node.Type(), node.FrameOffset(), args)
SetTypeBits(node.Type(), node.FrameOffset(), args)
}
}
}
// allUnsafe indicates that all points in this function are
// IsUnsafe indicates that all points in this function are
// unsafe-points.
func allUnsafe(f *ssa.Func) bool {
func IsUnsafe(f *ssa.Func) bool {
// The runtime assumes the only safe-points are function
// prologues (because that's how it used to be). We could and
// should improve that, but for now keep consider all points
@ -520,14 +493,14 @@ func allUnsafe(f *ssa.Func) bool {
}
// markUnsafePoints finds unsafe points and computes lv.unsafePoints.
func (lv *Liveness) markUnsafePoints() {
if allUnsafe(lv.f) {
func (lv *liveness) markUnsafePoints() {
if IsUnsafe(lv.f) {
// No complex analysis necessary.
lv.allUnsafe = true
return
}
lv.unsafePoints = bvalloc(int32(lv.f.NumValues()))
lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
// Mark architecture-specific unsafe points.
for _, b := range lv.f.Blocks {
@ -571,7 +544,7 @@ func (lv *Liveness) markUnsafePoints() {
var load *ssa.Value
v := wbBlock.Controls[0]
for {
if sym, ok := v.Aux.(*obj.LSym); ok && sym == writeBarrier {
if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
load = v
break
}
@ -638,11 +611,11 @@ func (lv *Liveness) markUnsafePoints() {
// nice to only flood as far as the unsafe.Pointer -> uintptr
// conversion, but it's hard to know which argument of an Add
// or Sub to follow.
var flooded bvec
var flooded bitvec.BitVec
var flood func(b *ssa.Block, vi int)
flood = func(b *ssa.Block, vi int) {
if flooded.n == 0 {
flooded = bvalloc(int32(lv.f.NumBlocks()))
if flooded.N == 0 {
flooded = bitvec.New(int32(lv.f.NumBlocks()))
}
if flooded.Get(int32(b.ID)) {
return
@ -683,14 +656,14 @@ func (lv *Liveness) markUnsafePoints() {
// This does not necessarily mean the instruction is a safe-point. In
// particular, call Values can have a stack map in case the callee
// grows the stack, but not themselves be a safe-point.
func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
func (lv *liveness) hasStackMap(v *ssa.Value) bool {
if !v.Op.IsCall() {
return false
}
// typedmemclr and typedmemmove are write barriers and
// deeply non-preemptible. They are unsafe points and
// hence should not have liveness maps.
if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == typedmemclr || sym.Fn == typedmemmove) {
if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
return false
}
return true
@ -699,7 +672,7 @@ func (lv *Liveness) hasStackMap(v *ssa.Value) bool {
// Initializes the sets for solving the live variables. Visits all the
// instructions in each basic block to summarizes the information at each basic
// block
func (lv *Liveness) prologue() {
func (lv *liveness) prologue() {
lv.initcache()
for _, b := range lv.f.Blocks {
@ -721,12 +694,12 @@ func (lv *Liveness) prologue() {
}
// Solve the liveness dataflow equations.
func (lv *Liveness) solve() {
func (lv *liveness) solve() {
// These temporary bitvectors exist to avoid successive allocations and
// frees within the loop.
nvars := int32(len(lv.vars))
newlivein := bvalloc(nvars)
newliveout := bvalloc(nvars)
newlivein := bitvec.New(nvars)
newliveout := bitvec.New(nvars)
// Walk blocks in postorder ordering. This improves convergence.
po := lv.f.Postorder()
@ -781,10 +754,10 @@ func (lv *Liveness) solve() {
// Visits all instructions in a basic block and computes a bit vector of live
// variables at each safe point locations.
func (lv *Liveness) epilogue() {
func (lv *liveness) epilogue() {
nvars := int32(len(lv.vars))
liveout := bvalloc(nvars)
livedefer := bvalloc(nvars) // always-live variables
liveout := bitvec.New(nvars)
livedefer := bitvec.New(nvars) // always-live variables
// If there is a defer (that could recover), then all output
// parameters are live all the time. In addition, any locals
@ -795,7 +768,7 @@ func (lv *Liveness) epilogue() {
// don't need to keep the stack copy live?
if lv.fn.HasDefer() {
for i, n := range lv.vars {
if n.Class() == ir.PPARAMOUT {
if n.Class_ == ir.PPARAMOUT {
if n.Name().IsOutputParamHeapAddr() {
// Just to be paranoid. Heap addresses are PAUTOs.
base.Fatalf("variable %v both output param and heap output param", n)
@ -838,7 +811,7 @@ func (lv *Liveness) epilogue() {
{
// Reserve an entry for function entry.
live := bvalloc(nvars)
live := bitvec.New(nvars)
lv.livevars = append(lv.livevars, live)
}
@ -852,7 +825,7 @@ func (lv *Liveness) epilogue() {
continue
}
live := bvalloc(nvars)
live := bitvec.New(nvars)
lv.livevars = append(lv.livevars, live)
}
@ -893,7 +866,7 @@ func (lv *Liveness) epilogue() {
if !liveout.Get(int32(i)) {
continue
}
if n.Class() == ir.PPARAM {
if n.Class_ == ir.PPARAM {
continue // ok
}
base.Fatalf("bad live variable at entry of %v: %L", lv.fn.Nname, n)
@ -910,23 +883,23 @@ func (lv *Liveness) epilogue() {
// If we have an open-coded deferreturn call, make a liveness map for it.
if lv.fn.OpenCodedDeferDisallowed() {
lv.livenessMap.deferreturn = LivenessDontCare
lv.livenessMap.DeferReturn = objw.LivenessDontCare
} else {
lv.livenessMap.deferreturn = LivenessIndex{
stackMapIndex: lv.stackMapSet.add(livedefer),
isUnsafePoint: false,
lv.livenessMap.DeferReturn = objw.LivenessIndex{
StackMapIndex: lv.stackMapSet.add(livedefer),
IsUnsafePoint: false,
}
}
// Done compacting. Throw out the stack map set.
lv.stackMaps = lv.stackMapSet.extractUniqe()
lv.stackMaps = lv.stackMapSet.extractUnique()
lv.stackMapSet = bvecSet{}
// Useful sanity check: on entry to the function,
// the only things that can possibly be live are the
// input parameters.
for j, n := range lv.vars {
if n.Class() != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
if n.Class_ != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n)
}
}
@ -948,7 +921,7 @@ func (lv *Liveness) epilogue() {
// is actually a net loss: we save about 50k of argument bitmaps but the new
// PCDATA tables cost about 100k. So for now we keep using a single index for
// both bitmap lists.
func (lv *Liveness) compact(b *ssa.Block) {
func (lv *liveness) compact(b *ssa.Block) {
pos := 0
if b == lv.f.Entry {
// Handle entry stack map.
@ -958,9 +931,9 @@ func (lv *Liveness) compact(b *ssa.Block) {
for _, v := range b.Values {
hasStackMap := lv.hasStackMap(v)
isUnsafePoint := lv.allUnsafe || lv.unsafePoints.Get(int32(v.ID))
idx := LivenessIndex{StackMapDontCare, isUnsafePoint}
idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
if hasStackMap {
idx.stackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
idx.StackMapIndex = lv.stackMapSet.add(lv.livevars[pos])
pos++
}
if hasStackMap || isUnsafePoint {
@ -972,7 +945,7 @@ func (lv *Liveness) compact(b *ssa.Block) {
lv.livevars = lv.livevars[:0]
}
func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
return
}
@ -1012,7 +985,7 @@ func (lv *Liveness) showlive(v *ssa.Value, live bvec) {
base.WarnfAt(pos, s)
}
func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
func (lv *liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool {
if live.IsEmpty() {
return printed
}
@ -1036,7 +1009,7 @@ func (lv *Liveness) printbvec(printed bool, name string, live bvec) bool {
}
// printeffect is like printbvec, but for valueEffects.
func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
func (lv *liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
if !x {
return printed
}
@ -1056,7 +1029,7 @@ func (lv *Liveness) printeffect(printed bool, name string, pos int32, x bool) bo
// Prints the computed liveness information and inputs, for debugging.
// This format synthesizes the information used during the multiple passes
// into a single presentation.
func (lv *Liveness) printDebug() {
func (lv *liveness) printDebug() {
fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn))
for i, b := range lv.f.Blocks {
@ -1128,7 +1101,7 @@ func (lv *Liveness) printDebug() {
fmt.Printf("\tlive=")
printed = false
if pcdata.StackMapValid() {
live := lv.stackMaps[pcdata.stackMapIndex]
live := lv.stackMaps[pcdata.StackMapIndex]
for j, n := range lv.vars {
if !live.Get(int32(j)) {
continue
@ -1143,7 +1116,7 @@ func (lv *Liveness) printDebug() {
fmt.Printf("\n")
}
if pcdata.isUnsafePoint {
if pcdata.IsUnsafePoint {
fmt.Printf("\tunsafe-point\n")
}
}
@ -1165,13 +1138,13 @@ func (lv *Liveness) printDebug() {
// first word dumped is the total number of bitmaps. The second word is the
// length of the bitmaps. All bitmaps are assumed to be of equal length. The
// remaining bytes are the raw bitmaps.
func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) {
// Size args bitmaps to be just large enough to hold the largest pointer.
// First, find the largest Xoffset node we care about.
// (Nodes without pointers aren't in lv.vars; see livenessShouldTrack.)
var maxArgNode *ir.Name
for _, n := range lv.vars {
switch n.Class() {
switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT:
if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() {
maxArgNode = n
@ -1181,7 +1154,7 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Next, find the offset of the largest pointer in the largest node.
var maxArgs int64
if maxArgNode != nil {
maxArgs = maxArgNode.FrameOffset() + typeptrdata(maxArgNode.Type())
maxArgs = maxArgNode.FrameOffset() + types.PtrDataSize(maxArgNode.Type())
}
// Size locals bitmaps to be stkptrsize sized.
@ -1196,13 +1169,13 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
// Temporary symbols for encoding bitmaps.
var argsSymTmp, liveSymTmp obj.LSym
args := bvalloc(int32(maxArgs / int64(Widthptr)))
aoff := duint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
aoff = duint32(&argsSymTmp, aoff, uint32(args.n)) // number of bits in each bitmap
args := bitvec.New(int32(maxArgs / int64(types.PtrSize)))
aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap
locals := bvalloc(int32(maxLocals / int64(Widthptr)))
loff := duint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
loff = duint32(&liveSymTmp, loff, uint32(locals.n)) // number of bits in each bitmap
locals := bitvec.New(int32(maxLocals / int64(types.PtrSize)))
loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap
for _, live := range lv.stackMaps {
args.Clear()
@ -1210,8 +1183,8 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
lv.pointerMap(live, lv.vars, args, locals)
aoff = dbvec(&argsSymTmp, aoff, args)
loff = dbvec(&liveSymTmp, loff, locals)
aoff = objw.BitVec(&argsSymTmp, aoff, args)
loff = objw.BitVec(&liveSymTmp, loff, locals)
}
// Give these LSyms content-addressable names,
@ -1229,11 +1202,11 @@ func (lv *Liveness) emit() (argsSym, liveSym *obj.LSym) {
return makeSym(&argsSymTmp), makeSym(&liveSymTmp)
}
// Entry pointer for liveness analysis. Solves for the liveness of
// Entry pointer for Compute analysis. Solves for the Compute of
// pointer variables in the function and emits a runtime data
// structure read by the garbage collector.
// Returns a map from GC safe points to their corresponding stack map index.
func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) LivenessMap {
func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) Map {
// Construct the global liveness state.
vars, idx := getvariables(curfn)
lv := newliveness(curfn, f, vars, idx, stkptrsize)
@ -1247,7 +1220,7 @@ func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) Liveness
for _, b := range f.Blocks {
for _, val := range b.Values {
if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
lv.showlive(val, lv.stackMaps[idx.stackMapIndex])
lv.showlive(val, lv.stackMaps[idx.StackMapIndex])
}
}
}
@ -1261,11 +1234,11 @@ func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) Liveness
cache := f.Cache.Liveness.(*livenessFuncCache)
if cap(lv.be) < 2000 { // Threshold from ssa.Cache slices.
for i := range lv.be {
lv.be[i] = BlockEffects{}
lv.be[i] = blockEffects{}
}
cache.be = lv.be
}
if len(lv.livenessMap.vals) < 2000 {
if len(lv.livenessMap.Vals) < 2000 {
cache.livenessMap = lv.livenessMap
}
}
@ -1276,13 +1249,13 @@ func liveness(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *Progs) Liveness
fninfo.GCArgs, fninfo.GCLocals = lv.emit()
p := pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_ArgsPointerMaps)
p.From.SetConst(objabi.FUNCDATA_ArgsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCArgs
p = pp.Prog(obj.AFUNCDATA)
Addrconst(&p.From, objabi.FUNCDATA_LocalsPointerMaps)
p.From.SetConst(objabi.FUNCDATA_LocalsPointerMaps)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = fninfo.GCLocals
@ -1326,3 +1299,33 @@ func isfat(t *types.Type) bool {
return false
}
func WriteFuncMap(fn *ir.Func) {
if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
return
}
lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
nptr := int(fn.Type().ArgWidth() / int64(types.PtrSize))
bv := bitvec.New(int32(nptr) * 2)
nbitmap := 1
if fn.Type().NumResults() > 0 {
nbitmap = 2
}
off := objw.Uint32(lsym, 0, uint32(nbitmap))
off = objw.Uint32(lsym, off, uint32(bv.N))
if ir.IsMethod(fn) {
SetTypeBits(fn.Type().Recvs(), 0, bv)
}
if fn.Type().NumParams() > 0 {
SetTypeBits(fn.Type().Params(), 0, bv)
}
off = objw.BitVec(lsym, off, bv)
if fn.Type().NumResults() > 0 {
SetTypeBits(fn.Type().Results(), 0, bv)
off = objw.BitVec(lsym, off, bv)
}
objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL)
}

View file

@ -5,13 +5,13 @@
package mips
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/mips"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &mips.Linkmips
if objabi.GOARCH == "mipsle" {
arch.LinkArch = &mips.Linkmipsle
@ -22,7 +22,7 @@ func Init(arch *gc.Arch) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -6,20 +6,21 @@ package mips
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
)
// TODO(mips): implement DUFFZERO
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else {
//fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
@ -29,22 +30,22 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// MOVW R0, (Widthptr)r1
// ADD $Widthptr, r1
// BNE r1, r2, loop
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
p = pp.Appendpp(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p
p = pp.Appendpp(p, mips.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2
gc.Patch(p, p1)
p.To.SetTarget(p1)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REG_R0

View file

@ -8,10 +8,10 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
@ -77,7 +77,7 @@ func storeByType(t *types.Type, r int16) obj.As {
panic("bad store type")
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpMIPSMOVWreg:
t := v.Type
@ -123,7 +123,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
r := v.Reg()
p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
@ -153,7 +153,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpMIPSADD,
ssa.OpMIPSSUB,
ssa.OpMIPSAND,
@ -288,10 +288,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
@ -312,7 +312,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPSMOVBstore,
@ -325,7 +325,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpMIPSMOVBstorezero,
ssa.OpMIPSMOVHstorezero,
ssa.OpMIPSMOVWstorezero:
@ -334,7 +334,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpMIPSMOVBreg,
ssa.OpMIPSMOVBUreg,
ssa.OpMIPSMOVHreg,
@ -427,7 +427,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p2)
p4.To.SetTarget(p2)
case ssa.OpMIPSLoweredMove:
// SUBU $4, R1
// MOVW 4(R1), Rtmp
@ -480,7 +480,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH
gc.Patch(p6, p2)
p6.To.SetTarget(p2)
case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
s.Call(v)
case ssa.OpMIPSLoweredWB:
@ -492,13 +492,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.OpMIPSLoweredAtomicLoad8,
ssa.OpMIPSLoweredAtomicLoad32:
@ -577,7 +577,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
case ssa.OpMIPSLoweredAtomicAdd:
@ -613,7 +613,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
@ -657,7 +657,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
@ -701,7 +701,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
@ -750,19 +750,19 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1)
p5.To.SetTarget(p1)
s.Prog(mips.ASYNC)
p6 := s.Prog(obj.ANOP)
gc.Patch(p2, p6)
p2.To.SetTarget(p6)
case ssa.OpMIPSLoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
if logopt.Enabled() {
@ -793,7 +793,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpMIPSLoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpMIPSLoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVW)
@ -826,13 +826,13 @@ var blockJump = map[ssa.BlockKind]struct {
ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT},
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R1:
@ -843,11 +843,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,13 +5,13 @@
package mips64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/mips"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &mips.Linkmips64
if objabi.GOARCH == "mips64le" {
arch.LinkArch = &mips.Linkmips64le
@ -23,7 +23,7 @@ func Init(arch *gc.Arch) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -5,26 +5,28 @@
package mips64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
)
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
} else if cnt <= int64(128*types.PtrSize) {
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
} else {
// ADDV $(8+frame+lo-8), SP, r1
// ADDV $cnt, r1, r2
@ -32,22 +34,22 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// MOVV R0, (Widthptr)r1
// ADDV $Widthptr, r1
// BNE r1, r2, loop
p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
p.Reg = mips.REGSP
p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
p.Reg = mips.REGRT1
p = pp.Appendpp(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(gc.Widthptr))
p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
p1 := p
p = pp.Appendpp(p, mips.AADDV, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Appendpp(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = mips.REGRT2
gc.Patch(p, p1)
p.To.SetTarget(p1)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(mips.ANOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = mips.REG_R0

View file

@ -8,10 +8,10 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
@ -85,7 +85,7 @@ func storeByType(t *types.Type, r int16) obj.As {
panic("bad store type")
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpMIPS64MOVVreg:
if v.Type.IsMemory() {
@ -126,7 +126,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
r := v.Reg()
p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
@ -156,7 +156,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpMIPS64ADDV,
ssa.OpMIPS64SUBV,
ssa.OpMIPS64AND,
@ -262,10 +262,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVV $off(SP), R
wantreg = "SP"
@ -288,7 +288,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPS64MOVBstore,
@ -302,7 +302,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpMIPS64MOVBstorezero,
ssa.OpMIPS64MOVHstorezero,
ssa.OpMIPS64MOVWstorezero,
@ -312,7 +312,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpMIPS64MOVBreg,
ssa.OpMIPS64MOVBUreg,
ssa.OpMIPS64MOVHreg,
@ -383,7 +383,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p = s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpMIPS64LoweredZero:
// SUBV $8, R1
@ -428,12 +428,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Reg = v.Args[1].Reg()
p4.Reg = mips.REG_R1
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p2)
p4.To.SetTarget(p2)
case ssa.OpMIPS64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
case ssa.OpMIPS64LoweredMove:
// SUBV $8, R1
@ -490,7 +490,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p6.From.Reg = v.Args[2].Reg()
p6.Reg = mips.REG_R1
p6.To.Type = obj.TYPE_BRANCH
gc.Patch(p6, p2)
p6.To.SetTarget(p2)
case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
s.Call(v)
case ssa.OpMIPS64LoweredWB:
@ -502,7 +502,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
as := mips.AMOVV
@ -579,7 +579,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
// SYNC
@ -616,7 +616,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_REG
@ -659,7 +659,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.From.Type = obj.TYPE_REG
p3.From.Reg = mips.REGTMP
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
s.Prog(mips.ASYNC)
p4 := s.Prog(mips.AADDVU)
p4.From.Type = obj.TYPE_CONST
@ -712,15 +712,15 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Reg0()
p5.To.Type = obj.TYPE_BRANCH
gc.Patch(p5, p1)
p5.To.SetTarget(p1)
p6 := s.Prog(mips.ASYNC)
gc.Patch(p2, p6)
p2.To.SetTarget(p6)
case ssa.OpMIPS64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
if logopt.Enabled() {
@ -751,10 +751,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.To.Type = obj.TYPE_REG
p3.To.Reg = v.Reg()
p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
gc.Patch(p2, p4)
p2.To.SetTarget(p4)
case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpMIPS64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVV)
@ -787,13 +787,13 @@ var blockJump = map[ssa.BlockKind]struct {
ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R1:
@ -804,11 +804,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -0,0 +1,493 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run mkbuiltin.go
package noder
import (
"fmt"
"go/constant"
"os"
"path"
"runtime"
"sort"
"strings"
"unicode"
"unicode/utf8"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/archive"
"cmd/internal/bio"
"cmd/internal/goobj"
"cmd/internal/objabi"
"cmd/internal/src"
)
func isDriveLetter(b byte) bool {
return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
}
// is this path a local name? begins with ./ or ../ or /
func islocalname(name string) bool {
return strings.HasPrefix(name, "/") ||
runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
strings.HasPrefix(name, "./") || name == "." ||
strings.HasPrefix(name, "../") || name == ".."
}
func findpkg(name string) (file string, ok bool) {
if islocalname(name) {
if base.Flag.NoLocalImports {
return "", false
}
if base.Flag.Cfg.PackageFile != nil {
file, ok = base.Flag.Cfg.PackageFile[name]
return file, ok
}
// try .a before .6. important for building libraries:
// if there is an array.6 in the array.a library,
// want to find all of array.a, not just array.6.
file = fmt.Sprintf("%s.a", name)
if _, err := os.Stat(file); err == nil {
return file, true
}
file = fmt.Sprintf("%s.o", name)
if _, err := os.Stat(file); err == nil {
return file, true
}
return "", false
}
// local imports should be canonicalized already.
// don't want to see "encoding/../encoding/base64"
// as different from "encoding/base64".
if q := path.Clean(name); q != name {
base.Errorf("non-canonical import path %q (should be %q)", name, q)
return "", false
}
if base.Flag.Cfg.PackageFile != nil {
file, ok = base.Flag.Cfg.PackageFile[name]
return file, ok
}
for _, dir := range base.Flag.Cfg.ImportDirs {
file = fmt.Sprintf("%s/%s.a", dir, name)
if _, err := os.Stat(file); err == nil {
return file, true
}
file = fmt.Sprintf("%s/%s.o", dir, name)
if _, err := os.Stat(file); err == nil {
return file, true
}
}
if objabi.GOROOT != "" {
suffix := ""
suffixsep := ""
if base.Flag.InstallSuffix != "" {
suffixsep = "_"
suffix = base.Flag.InstallSuffix
} else if base.Flag.Race {
suffixsep = "_"
suffix = "race"
} else if base.Flag.MSan {
suffixsep = "_"
suffix = "msan"
}
file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
if _, err := os.Stat(file); err == nil {
return file, true
}
file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", objabi.GOROOT, objabi.GOOS, objabi.GOARCH, suffixsep, suffix, name)
if _, err := os.Stat(file); err == nil {
return file, true
}
}
return "", false
}
// myheight tracks the local package's height based on packages
// imported so far.
var myheight int
func importfile(f constant.Value) *types.Pkg {
if f.Kind() != constant.String {
base.Errorf("import path must be a string")
return nil
}
path_ := constant.StringVal(f)
if len(path_) == 0 {
base.Errorf("import path is empty")
return nil
}
if isbadimport(path_, false) {
return nil
}
// The package name main is no longer reserved,
// but we reserve the import path "main" to identify
// the main package, just as we reserve the import
// path "math" to identify the standard math package.
if path_ == "main" {
base.Errorf("cannot import \"main\"")
base.ErrorExit()
}
if base.Ctxt.Pkgpath != "" && path_ == base.Ctxt.Pkgpath {
base.Errorf("import %q while compiling that package (import cycle)", path_)
base.ErrorExit()
}
if mapped, ok := base.Flag.Cfg.ImportMap[path_]; ok {
path_ = mapped
}
if path_ == "unsafe" {
return ir.Pkgs.Unsafe
}
if islocalname(path_) {
if path_[0] == '/' {
base.Errorf("import path cannot be absolute path")
return nil
}
prefix := base.Ctxt.Pathname
if base.Flag.D != "" {
prefix = base.Flag.D
}
path_ = path.Join(prefix, path_)
if isbadimport(path_, true) {
return nil
}
}
file, found := findpkg(path_)
if !found {
base.Errorf("can't find import: %q", path_)
base.ErrorExit()
}
importpkg := types.NewPkg(path_, "")
if importpkg.Imported {
return importpkg
}
importpkg.Imported = true
imp, err := bio.Open(file)
if err != nil {
base.Errorf("can't open import: %q: %v", path_, err)
base.ErrorExit()
}
defer imp.Close()
// check object header
p, err := imp.ReadString('\n')
if err != nil {
base.Errorf("import %s: reading input: %v", file, err)
base.ErrorExit()
}
if p == "!<arch>\n" { // package archive
// package export block should be first
sz := archive.ReadHeader(imp.Reader, "__.PKGDEF")
if sz <= 0 {
base.Errorf("import %s: not a package file", file)
base.ErrorExit()
}
p, err = imp.ReadString('\n')
if err != nil {
base.Errorf("import %s: reading input: %v", file, err)
base.ErrorExit()
}
}
if !strings.HasPrefix(p, "go object ") {
base.Errorf("import %s: not a go object file: %s", file, p)
base.ErrorExit()
}
q := fmt.Sprintf("%s %s %s %s\n", objabi.GOOS, objabi.GOARCH, objabi.Version, objabi.Expstring())
if p[10:] != q {
base.Errorf("import %s: object is [%s] expected [%s]", file, p[10:], q)
base.ErrorExit()
}
// process header lines
for {
p, err = imp.ReadString('\n')
if err != nil {
base.Errorf("import %s: reading input: %v", file, err)
base.ErrorExit()
}
if p == "\n" {
break // header ends with blank line
}
}
// Expect $$B\n to signal binary import format.
// look for $$
var c byte
for {
c, err = imp.ReadByte()
if err != nil {
break
}
if c == '$' {
c, err = imp.ReadByte()
if c == '$' || err != nil {
break
}
}
}
// get character after $$
if err == nil {
c, _ = imp.ReadByte()
}
var fingerprint goobj.FingerprintType
switch c {
case '\n':
base.Errorf("cannot import %s: old export format no longer supported (recompile library)", path_)
return nil
case 'B':
if base.Debug.Export != 0 {
fmt.Printf("importing %s (%s)\n", path_, file)
}
imp.ReadByte() // skip \n after $$B
c, err = imp.ReadByte()
if err != nil {
base.Errorf("import %s: reading input: %v", file, err)
base.ErrorExit()
}
// Indexed format is distinguished by an 'i' byte,
// whereas previous export formats started with 'c', 'd', or 'v'.
if c != 'i' {
base.Errorf("import %s: unexpected package format byte: %v", file, c)
base.ErrorExit()
}
fingerprint = typecheck.ReadImports(importpkg, imp)
default:
base.Errorf("no import in %q", path_)
base.ErrorExit()
}
// assume files move (get installed) so don't record the full path
if base.Flag.Cfg.PackageFile != nil {
// If using a packageFile map, assume path_ can be recorded directly.
base.Ctxt.AddImport(path_, fingerprint)
} else {
// For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
base.Ctxt.AddImport(file[len(file)-len(path_)-len(".a"):], fingerprint)
}
if importpkg.Height >= myheight {
myheight = importpkg.Height + 1
}
return importpkg
}
// The linker uses the magic symbol prefixes "go." and "type."
// Avoid potential confusion between import paths and symbols
// by rejecting these reserved imports for now. Also, people
// "can do weird things in GOPATH and we'd prefer they didn't
// do _that_ weird thing" (per rsc). See also #4257.
var reservedimports = []string{
"go",
"type",
}
func isbadimport(path string, allowSpace bool) bool {
if strings.Contains(path, "\x00") {
base.Errorf("import path contains NUL")
return true
}
for _, ri := range reservedimports {
if path == ri {
base.Errorf("import path %q is reserved and cannot be used", path)
return true
}
}
for _, r := range path {
if r == utf8.RuneError {
base.Errorf("import path contains invalid UTF-8 sequence: %q", path)
return true
}
if r < 0x20 || r == 0x7f {
base.Errorf("import path contains control character: %q", path)
return true
}
if r == '\\' {
base.Errorf("import path contains backslash; use slash: %q", path)
return true
}
if !allowSpace && unicode.IsSpace(r) {
base.Errorf("import path contains space character: %q", path)
return true
}
if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
base.Errorf("import path contains invalid character '%c': %q", r, path)
return true
}
}
return false
}
func pkgnotused(lineno src.XPos, path string, name string) {
// If the package was imported with a name other than the final
// import path element, show it explicitly in the error message.
// Note that this handles both renamed imports and imports of
// packages containing unconventional package declarations.
// Note that this uses / always, even on Windows, because Go import
// paths always use forward slashes.
elem := path
if i := strings.LastIndex(elem, "/"); i >= 0 {
elem = elem[i+1:]
}
if name == "" || elem == name {
base.ErrorfAt(lineno, "imported and not used: %q", path)
} else {
base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name)
}
}
func mkpackage(pkgname string) {
if types.LocalPkg.Name == "" {
if pkgname == "_" {
base.Errorf("invalid package name _")
}
types.LocalPkg.Name = pkgname
} else {
if pkgname != types.LocalPkg.Name {
base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name)
}
}
}
func clearImports() {
type importedPkg struct {
pos src.XPos
path string
name string
}
var unused []importedPkg
for _, s := range types.LocalPkg.Syms {
n := ir.AsNode(s.Def)
if n == nil {
continue
}
if n.Op() == ir.OPACK {
// throw away top-level package name left over
// from previous file.
// leave s->block set to cause redeclaration
// errors if a conflicting top-level name is
// introduced by a different file.
p := n.(*ir.PkgName)
if !p.Used && base.SyntaxErrors() == 0 {
unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name})
}
s.Def = nil
continue
}
if types.IsDotAlias(s) {
// throw away top-level name left over
// from previous import . "x"
// We'll report errors after type checking in checkDotImports.
s.Def = nil
continue
}
}
sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) })
for _, pkg := range unused {
pkgnotused(pkg.pos, pkg.path, pkg.name)
}
}
// CheckDotImports reports errors for any unused dot imports.
func CheckDotImports() {
for _, pack := range dotImports {
if !pack.Used {
base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
}
}
// No longer needed; release memory.
dotImports = nil
typecheck.DotImportRefs = nil
}
// dotImports tracks all PkgNames that have been dot-imported.
var dotImports []*ir.PkgName
// find all the exported symbols in package referenced by PkgName,
// and make them available in the current package
func importDot(pack *ir.PkgName) {
if typecheck.DotImportRefs == nil {
typecheck.DotImportRefs = make(map[*ir.Ident]*ir.PkgName)
}
opkg := pack.Pkg
for _, s := range opkg.Syms {
if s.Def == nil {
if _, ok := typecheck.DeclImporter[s]; !ok {
continue
}
}
if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
continue
}
s1 := typecheck.Lookup(s.Name)
if s1.Def != nil {
pkgerror := fmt.Sprintf("during import %q", opkg.Path)
typecheck.Redeclared(base.Pos, s1, pkgerror)
continue
}
id := ir.NewIdent(src.NoXPos, s)
typecheck.DotImportRefs[id] = pack
s1.Def = id
s1.Block = 1
}
dotImports = append(dotImports, pack)
}
// importName is like oldname,
// but it reports an error if sym is from another package and not exported.
func importName(sym *types.Sym) ir.Node {
n := oldname(sym)
if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg {
n.SetDiag(true)
base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
}
return n
}

View file

@ -2,22 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package noder
import (
"cmd/compile/internal/base"
"fmt"
"strings"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/internal/objabi"
"cmd/internal/src"
"fmt"
"strings"
)
func makePos(b *src.PosBase, line, col uint) src.XPos {
return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
}
func isSpace(c rune) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
}
@ -27,7 +22,7 @@ func isQuoted(s string) bool {
}
const (
FuncPragmas = ir.Nointerface |
funcPragmas = ir.Nointerface |
ir.Noescape |
ir.Norace |
ir.Nosplit |
@ -40,7 +35,7 @@ const (
ir.Nowritebarrierrec |
ir.Yeswritebarrierrec
TypePragmas = ir.NotInHeap
typePragmas = ir.NotInHeap
)
func pragmaFlag(verb string) ir.PragmaFlag {

View file

@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package noder
import (
"cmd/compile/internal/syntax"
"reflect"
"runtime"
"testing"
"cmd/compile/internal/syntax"
)
func eq(a, b []string) bool {

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package noder
import (
"fmt"
@ -21,17 +21,18 @@ import (
"cmd/compile/internal/importer"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/compile/internal/types2"
"cmd/internal/objabi"
"cmd/internal/src"
)
// parseFiles concurrently parses files into *syntax.File structures.
// ParseFiles concurrently parses files into *syntax.File structures.
// Each declaration in every *syntax.File is converted to a syntax tree
// and its root represented by *Node is appended to Target.Decls.
// Returns the total count of parsed lines.
func parseFiles(filenames []string) (lines uint) {
func ParseFiles(filenames []string) (lines uint) {
noders := make([]*noder, 0, len(filenames))
// Limit the number of simultaneously open files.
sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
@ -137,7 +138,7 @@ func parseFiles(filenames []string) (lines uint) {
base.ExitIfErrors()
// Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
testdclstack()
types.CheckDclstack()
}
types.LocalPkg.Height = myheight
@ -158,7 +159,7 @@ func parseFiles(filenames []string) (lines uint) {
}
// Always run testdclstack here, even when debug_dclstack is not set, as a sanity measure.
testdclstack()
types.CheckDclstack()
}
for _, p := range noders {
@ -293,20 +294,20 @@ func (p *noder) sel(x *syntax.SelectorExpr) *types2.Selection {
func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) {
oldScope := p.scope
p.scope = 0
funchdr(fn)
typecheck.StartFuncBody(fn)
if block != nil {
body := p.stmts(block.List)
if body == nil {
body = []ir.Node{ir.Nod(ir.OBLOCK, nil, nil)}
body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
}
fn.PtrBody().Set(body)
fn.Body.Set(body)
base.Pos = p.makeXPos(block.Rbrace)
fn.Endlineno = base.Pos
}
funcbody()
typecheck.FinishFuncBody()
p.scope = oldScope
}
@ -314,9 +315,9 @@ func (p *noder) openScope(pos syntax.Pos) {
types.Markdcl()
if p.trackScopes {
Curfn.Parents = append(Curfn.Parents, p.scope)
p.scopeVars = append(p.scopeVars, len(Curfn.Dcl))
p.scope = ir.ScopeID(len(Curfn.Parents))
ir.CurFunc.Parents = append(ir.CurFunc.Parents, p.scope)
p.scopeVars = append(p.scopeVars, len(ir.CurFunc.Dcl))
p.scope = ir.ScopeID(len(ir.CurFunc.Parents))
p.markScope(pos)
}
@ -329,29 +330,29 @@ func (p *noder) closeScope(pos syntax.Pos) {
if p.trackScopes {
scopeVars := p.scopeVars[len(p.scopeVars)-1]
p.scopeVars = p.scopeVars[:len(p.scopeVars)-1]
if scopeVars == len(Curfn.Dcl) {
if scopeVars == len(ir.CurFunc.Dcl) {
// no variables were declared in this scope, so we can retract it.
if int(p.scope) != len(Curfn.Parents) {
if int(p.scope) != len(ir.CurFunc.Parents) {
base.Fatalf("scope tracking inconsistency, no variables declared but scopes were not retracted")
}
p.scope = Curfn.Parents[p.scope-1]
Curfn.Parents = Curfn.Parents[:len(Curfn.Parents)-1]
p.scope = ir.CurFunc.Parents[p.scope-1]
ir.CurFunc.Parents = ir.CurFunc.Parents[:len(ir.CurFunc.Parents)-1]
nmarks := len(Curfn.Marks)
Curfn.Marks[nmarks-1].Scope = p.scope
nmarks := len(ir.CurFunc.Marks)
ir.CurFunc.Marks[nmarks-1].Scope = p.scope
prevScope := ir.ScopeID(0)
if nmarks >= 2 {
prevScope = Curfn.Marks[nmarks-2].Scope
prevScope = ir.CurFunc.Marks[nmarks-2].Scope
}
if Curfn.Marks[nmarks-1].Scope == prevScope {
Curfn.Marks = Curfn.Marks[:nmarks-1]
if ir.CurFunc.Marks[nmarks-1].Scope == prevScope {
ir.CurFunc.Marks = ir.CurFunc.Marks[:nmarks-1]
}
return
}
p.scope = Curfn.Parents[p.scope-1]
p.scope = ir.CurFunc.Parents[p.scope-1]
p.markScope(pos)
}
@ -359,10 +360,10 @@ func (p *noder) closeScope(pos syntax.Pos) {
func (p *noder) markScope(pos syntax.Pos) {
xpos := p.makeXPos(pos)
if i := len(Curfn.Marks); i > 0 && Curfn.Marks[i-1].Pos == xpos {
Curfn.Marks[i-1].Scope = p.scope
if i := len(ir.CurFunc.Marks); i > 0 && ir.CurFunc.Marks[i-1].Pos == xpos {
ir.CurFunc.Marks[i-1].Scope = p.scope
} else {
Curfn.Marks = append(Curfn.Marks, ir.Mark{Pos: xpos, Scope: p.scope})
ir.CurFunc.Marks = append(ir.CurFunc.Marks, ir.Mark{Pos: xpos, Scope: p.scope})
}
}
@ -389,12 +390,12 @@ func (p *noder) node() {
p.setlineno(p.file.PkgName)
mkpackage(p.file.PkgName.Value)
if pragma, ok := p.file.Pragma.(*Pragma); ok {
if pragma, ok := p.file.Pragma.(*pragmas); ok {
pragma.Flag &^= ir.GoBuildPragma
p.checkUnused(pragma)
}
Target.Decls = append(Target.Decls, p.decls(p.file.DeclList)...)
typecheck.Target.Decls = append(typecheck.Target.Decls, p.decls(p.file.DeclList)...)
base.Pos = src.NoXPos
clearImports()
@ -406,7 +407,7 @@ func (p *noder) processPragmas() {
p.errorAt(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
continue
}
n := ir.AsNode(lookup(l.local).Def)
n := ir.AsNode(typecheck.Lookup(l.local).Def)
if n == nil || n.Op() != ir.ONAME {
// TODO(mdempsky): Change to p.errorAt before Go 1.17 release.
// base.WarnfAt(p.makeXPos(l.pos), "//go:linkname must refer to declared function or variable (will be an error in Go 1.17)")
@ -418,7 +419,7 @@ func (p *noder) processPragmas() {
}
n.Sym().Linkname = l.remote
}
Target.CgoPragmas = append(Target.CgoPragmas, p.pragcgobuf...)
typecheck.Target.CgoPragmas = append(typecheck.Target.CgoPragmas, p.pragcgobuf...)
}
func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
@ -455,7 +456,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
return // avoid follow-on errors if there was a syntax error
}
if pragma, ok := imp.Pragma.(*Pragma); ok {
if pragma, ok := imp.Pragma.(*pragmas); ok {
p.checkUnused(pragma)
}
@ -467,7 +468,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
return
}
if ipkg == unsafepkg {
if ipkg == ir.Pkgs.Unsafe {
p.importedUnsafe = true
}
if ipkg.Path == "embed" {
@ -475,7 +476,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
}
if !ipkg.Direct {
Target.Imports = append(Target.Imports, ipkg)
typecheck.Target.Imports = append(typecheck.Target.Imports, ipkg)
}
ipkg.Direct = true
@ -483,7 +484,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
if imp.LocalPkgName != nil {
my = p.name(imp.LocalPkgName)
} else {
my = lookup(ipkg.Name)
my = typecheck.Lookup(ipkg.Name)
}
pack := ir.NewPkgName(p.pos(imp), my, ipkg)
@ -499,7 +500,7 @@ func (p *noder) importDecl(imp *syntax.ImportDecl) {
return
}
if my.Def != nil {
redeclare(pack.Pos(), my, "as imported package name")
typecheck.Redeclared(pack.Pos(), my, "as imported package name")
}
my.Def = pack
my.Lastlineno = pack.Pos()
@ -515,7 +516,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
exprs = p.exprList(decl.Values)
}
if pragma, ok := decl.Pragma.(*Pragma); ok {
if pragma, ok := decl.Pragma.(*pragmas); ok {
if len(pragma.Embeds) > 0 {
if !p.importedEmbed {
// This check can't be done when building the list pragma.Embeds
@ -534,7 +535,7 @@ func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
}
p.setlineno(decl)
return variter(names, typ, exprs)
return typecheck.DeclVars(names, typ, exprs)
}
// constState tracks state between constant specifiers within a
@ -554,7 +555,7 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
}
}
if pragma, ok := decl.Pragma.(*Pragma); ok {
if pragma, ok := decl.Pragma.(*pragmas); ok {
p.checkUnused(pragma)
}
@ -582,13 +583,13 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
if decl.Values == nil {
v = ir.DeepCopy(n.Pos(), v)
}
declare(n, dclcontext)
typecheck.Declare(n, typecheck.DeclContext)
n.Ntype = typ
n.Defn = v
n.SetIota(cs.iota)
nn = append(nn, p.nod(decl, ir.ODCLCONST, n, nil))
nn = append(nn, ir.NewDecl(p.pos(decl), ir.ODCLCONST, n))
}
if len(values) > len(names) {
@ -602,23 +603,23 @@ func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
n := p.declName(ir.OTYPE, decl.Name)
declare(n, dclcontext)
typecheck.Declare(n, typecheck.DeclContext)
// decl.Type may be nil but in that case we got a syntax error during parsing
typ := p.typeExprOrNil(decl.Type)
n.Ntype = typ
n.SetAlias(decl.Alias)
if pragma, ok := decl.Pragma.(*Pragma); ok {
if pragma, ok := decl.Pragma.(*pragmas); ok {
if !decl.Alias {
n.SetPragma(pragma.Flag & TypePragmas)
pragma.Flag &^= TypePragmas
n.SetPragma(pragma.Flag & typePragmas)
pragma.Flag &^= typePragmas
}
p.checkUnused(pragma)
}
nod := p.nod(decl, ir.ODCLTYPE, n, nil)
if n.Alias() && !langSupported(1, 9, types.LocalPkg) {
nod := ir.NewDecl(p.pos(decl), ir.ODCLTYPE, n)
if n.Alias() && !types.AllowsGoVersion(types.LocalPkg, 1, 9) {
base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9")
}
return nod
@ -647,7 +648,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
if len(t.Params) > 0 || len(t.Results) > 0 {
base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
}
Target.Inits = append(Target.Inits, f)
typecheck.Target.Inits = append(typecheck.Target.Inits, f)
}
if types.LocalPkg.Name == "main" && name.Name == "main" {
@ -660,21 +661,21 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
name = ir.BlankNode.Sym() // filled in by typecheckfunc
}
f.Nname = newFuncNameAt(p.pos(fun.Name), name, f)
f.Nname = ir.NewFuncNameAt(p.pos(fun.Name), name, f)
f.Nname.Defn = f
f.Nname.Ntype = t
if pragma, ok := fun.Pragma.(*Pragma); ok {
f.Pragma = pragma.Flag & FuncPragmas
if pragma, ok := fun.Pragma.(*pragmas); ok {
f.Pragma = pragma.Flag & funcPragmas
if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
}
pragma.Flag &^= FuncPragmas
pragma.Flag &^= funcPragmas
p.checkUnused(pragma)
}
if fun.Recv == nil {
declare(f.Nname, ir.PFUNC)
typecheck.Declare(f.Nname, ir.PFUNC)
}
p.funcBody(f, fun.Body)
@ -781,24 +782,24 @@ func (p *noder) expr(expr syntax.Expr) ir.Node {
n.SetDiag(expr.Bad) // avoid follow-on errors if there was a syntax error
return n
case *syntax.CompositeLit:
n := p.nod(expr, ir.OCOMPLIT, nil, nil)
n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, nil, nil)
if expr.Type != nil {
n.SetRight(p.expr(expr.Type))
n.Ntype = ir.Node(p.expr(expr.Type)).(ir.Ntype)
}
l := p.exprs(expr.ElemList)
for i, e := range l {
l[i] = p.wrapname(expr.ElemList[i], e)
}
n.PtrList().Set(l)
n.List.Set(l)
base.Pos = p.makeXPos(expr.Rbrace)
return n
case *syntax.KeyValueExpr:
// use position of expr.Key rather than of expr (which has position of ':')
return p.nod(expr.Key, ir.OKEY, p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
return ir.NewKeyExpr(p.pos(expr.Key), p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
case *syntax.FuncLit:
return p.funcLit(expr)
case *syntax.ParenExpr:
return p.nod(expr, ir.OPAREN, p.expr(expr.X), nil)
return ir.NewParenExpr(p.pos(expr), p.expr(expr.X))
case *syntax.SelectorExpr:
// parser.new_dotname
obj := p.expr(expr.X)
@ -807,11 +808,11 @@ func (p *noder) expr(expr syntax.Expr) ir.Node {
pack.Used = true
return importName(pack.Pkg.Lookup(expr.Sel.Value))
}
n := nodSym(ir.OXDOT, obj, p.name(expr.Sel))
n := ir.NewSelectorExpr(base.Pos, ir.OXDOT, obj, p.name(expr.Sel))
n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X)
return n
case *syntax.IndexExpr:
return p.nod(expr, ir.OINDEX, p.expr(expr.X), p.expr(expr.Index))
return ir.NewIndexExpr(p.pos(expr), p.expr(expr.X), p.expr(expr.Index))
case *syntax.SliceExpr:
op := ir.OSLICE
if expr.Full {
@ -827,7 +828,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node {
n.SetSliceBounds(index[0], index[1], index[2])
return n
case *syntax.AssertExpr:
return p.nod(expr, ir.ODOTTYPE, p.expr(expr.X), p.typeExpr(expr.Type))
return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type).(ir.Ntype))
case *syntax.Operation:
if expr.Op == syntax.Add && expr.Y != nil {
return p.sum(expr)
@ -837,7 +838,7 @@ func (p *noder) expr(expr syntax.Expr) ir.Node {
pos, op := p.pos(expr), p.unOp(expr.Op)
switch op {
case ir.OADDR:
return nodAddrAt(pos, x)
return typecheck.NodAddrAt(pos, x)
case ir.ODEREF:
return ir.NewStarExpr(pos, x)
}
@ -851,9 +852,9 @@ func (p *noder) expr(expr syntax.Expr) ir.Node {
}
return ir.NewBinaryExpr(pos, op, x, y)
case *syntax.CallExpr:
n := p.nod(expr, ir.OCALL, p.expr(expr.Fun), nil)
n.PtrList().Set(p.exprs(expr.ArgList))
n.SetIsDDD(expr.HasDots)
n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), nil)
n.Args.Set(p.exprs(expr.ArgList))
n.IsDDD = expr.HasDots
return n
case *syntax.ArrayType:
@ -961,7 +962,7 @@ func (p *noder) sum(x syntax.Expr) ir.Node {
nstr = nil
chunks = chunks[:0]
}
n = p.nod(add, ir.OADD, n, r)
n = ir.NewBinaryExpr(p.pos(add), ir.OADD, n, r)
}
if len(chunks) > 1 {
nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
@ -1083,7 +1084,7 @@ func (p *noder) embedded(typ syntax.Expr) *ir.Field {
}
sym := p.packname(typ)
n := ir.NewField(p.pos(typ), lookup(sym.Name), importName(sym).(ir.Ntype), nil)
n := ir.NewField(p.pos(typ), typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
n.Embedded = true
if isStar {
@ -1101,10 +1102,10 @@ func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
for i, stmt := range stmts {
s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
if s == nil {
} else if s.Op() == ir.OBLOCK && s.(*ir.BlockStmt).List().Len() > 0 {
} else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 {
// Inline non-empty block.
// Empty blocks must be preserved for checkreturn.
nodes = append(nodes, s.(*ir.BlockStmt).List().Slice()...)
nodes = append(nodes, s.(*ir.BlockStmt).List...)
} else {
nodes = append(nodes, s)
}
@ -1127,35 +1128,35 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
l := p.blockStmt(stmt)
if len(l) == 0 {
// TODO(mdempsky): Line number?
return ir.Nod(ir.OBLOCK, nil, nil)
return ir.NewBlockStmt(base.Pos, nil)
}
return liststmt(l)
return ir.NewBlockStmt(src.NoXPos, l)
case *syntax.ExprStmt:
return p.wrapname(stmt, p.expr(stmt.X))
case *syntax.SendStmt:
return p.nod(stmt, ir.OSEND, p.expr(stmt.Chan), p.expr(stmt.Value))
return ir.NewSendStmt(p.pos(stmt), p.expr(stmt.Chan), p.expr(stmt.Value))
case *syntax.DeclStmt:
return liststmt(p.decls(stmt.DeclList))
return ir.NewBlockStmt(src.NoXPos, p.decls(stmt.DeclList))
case *syntax.AssignStmt:
if stmt.Op != 0 && stmt.Op != syntax.Def {
n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs))
n.SetImplicit(stmt.Rhs == syntax.ImplicitOne)
n.IncDec = stmt.Rhs == syntax.ImplicitOne
return n
}
rhs := p.exprList(stmt.Rhs)
if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 {
n := p.nod(stmt, ir.OAS2, nil, nil)
n.SetColas(stmt.Op == syntax.Def)
n.PtrList().Set(p.assignList(stmt.Lhs, n, n.Colas()))
n.PtrRlist().Set(rhs)
n := ir.NewAssignListStmt(p.pos(stmt), ir.OAS2, nil, nil)
n.Def = stmt.Op == syntax.Def
n.Lhs.Set(p.assignList(stmt.Lhs, n, n.Def))
n.Rhs.Set(rhs)
return n
}
n := p.nod(stmt, ir.OAS, nil, nil)
n.SetColas(stmt.Op == syntax.Def)
n.SetLeft(p.assignList(stmt.Lhs, n, n.Colas())[0])
n.SetRight(rhs[0])
n := ir.NewAssignStmt(p.pos(stmt), nil, nil)
n.Def = stmt.Op == syntax.Def
n.X = p.assignList(stmt.Lhs, n, n.Def)[0]
n.Y = rhs[0]
return n
case *syntax.BranchStmt:
@ -1196,14 +1197,14 @@ func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
if stmt.Results != nil {
results = p.exprList(stmt.Results)
}
n := p.nod(stmt, ir.ORETURN, nil, nil)
n.PtrList().Set(results)
if n.List().Len() == 0 && Curfn != nil {
for _, ln := range Curfn.Dcl {
if ln.Class() == ir.PPARAM {
n := ir.NewReturnStmt(p.pos(stmt), nil)
n.Results.Set(results)
if len(n.Results) == 0 && ir.CurFunc != nil {
for _, ln := range ir.CurFunc.Dcl {
if ln.Class_ == ir.PPARAM {
continue
}
if ln.Class() != ir.PPARAMOUT {
if ln.Class_ != ir.PPARAMOUT {
break
}
if ln.Sym().Def != ln {
@ -1269,10 +1270,10 @@ func (p *noder) assignList(expr syntax.Expr, defn ir.Node, colas bool) []ir.Node
}
newOrErr = true
n := NewName(sym)
declare(n, dclcontext)
n := typecheck.NewName(sym)
typecheck.Declare(n, typecheck.DeclContext)
n.Defn = defn
defn.PtrInit().Append(ir.Nod(ir.ODCL, n, nil))
defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n))
res[i] = n
}
@ -1291,20 +1292,21 @@ func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
p.openScope(stmt.Pos())
n := p.nod(stmt, ir.OIF, nil, nil)
n := ir.NewIfStmt(p.pos(stmt), nil, nil, nil)
if stmt.Init != nil {
n.PtrInit().Set1(p.stmt(stmt.Init))
*n.PtrInit() = []ir.Node{p.stmt(stmt.Init)}
}
if stmt.Cond != nil {
n.SetLeft(p.expr(stmt.Cond))
n.Cond = p.expr(stmt.Cond)
}
n.PtrBody().Set(p.blockStmt(stmt.Then))
n.Body.Set(p.blockStmt(stmt.Then))
if stmt.Else != nil {
e := p.stmt(stmt.Else)
if e.Op() == ir.OBLOCK {
n.PtrRlist().Set(e.List().Slice())
e := e.(*ir.BlockStmt)
n.Else.Set(e.List)
} else {
n.PtrRlist().Set1(e)
n.Else = []ir.Node{e}
}
}
p.closeAnotherScope()
@ -1318,46 +1320,46 @@ func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
panic("unexpected RangeClause")
}
n := p.nod(r, ir.ORANGE, nil, p.expr(r.X))
n := ir.NewRangeStmt(p.pos(r), nil, p.expr(r.X), nil)
if r.Lhs != nil {
n.SetColas(r.Def)
n.PtrList().Set(p.assignList(r.Lhs, n, n.Colas()))
n.Def = r.Def
n.Vars.Set(p.assignList(r.Lhs, n, n.Def))
}
n.PtrBody().Set(p.blockStmt(stmt.Body))
n.Body.Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
return n
}
n := p.nod(stmt, ir.OFOR, nil, nil)
n := ir.NewForStmt(p.pos(stmt), nil, nil, nil, nil)
if stmt.Init != nil {
n.PtrInit().Set1(p.stmt(stmt.Init))
*n.PtrInit() = []ir.Node{p.stmt(stmt.Init)}
}
if stmt.Cond != nil {
n.SetLeft(p.expr(stmt.Cond))
n.Cond = p.expr(stmt.Cond)
}
if stmt.Post != nil {
n.SetRight(p.stmt(stmt.Post))
n.Post = p.stmt(stmt.Post)
}
n.PtrBody().Set(p.blockStmt(stmt.Body))
n.Body.Set(p.blockStmt(stmt.Body))
p.closeAnotherScope()
return n
}
func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
p.openScope(stmt.Pos())
n := p.nod(stmt, ir.OSWITCH, nil, nil)
n := ir.NewSwitchStmt(p.pos(stmt), nil, nil)
if stmt.Init != nil {
n.PtrInit().Set1(p.stmt(stmt.Init))
*n.PtrInit() = []ir.Node{p.stmt(stmt.Init)}
}
if stmt.Tag != nil {
n.SetLeft(p.expr(stmt.Tag))
n.Tag = p.expr(stmt.Tag)
}
var tswitch *ir.TypeSwitchGuard
if l := n.Left(); l != nil && l.Op() == ir.OTYPESW {
if l := n.Tag; l != nil && l.Op() == ir.OTYPESW {
tswitch = l.(*ir.TypeSwitchGuard)
}
n.PtrList().Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
n.Cases.Set(p.caseClauses(stmt.Body, tswitch, stmt.Rbrace))
p.closeScope(stmt.Rbrace)
return n
@ -1372,14 +1374,14 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch
}
p.openScope(clause.Pos())
n := p.nod(clause, ir.OCASE, nil, nil)
n := ir.NewCaseStmt(p.pos(clause), nil, nil)
if clause.Cases != nil {
n.PtrList().Set(p.exprList(clause.Cases))
n.List.Set(p.exprList(clause.Cases))
}
if tswitch != nil && tswitch.Left() != nil {
nn := NewName(tswitch.Left().Sym())
declare(nn, dclcontext)
n.PtrRlist().Set1(nn)
if tswitch != nil && tswitch.Tag != nil {
nn := typecheck.NewName(tswitch.Tag.Sym())
typecheck.Declare(nn, typecheck.DeclContext)
n.Vars = []ir.Node{nn}
// keep track of the instances for reporting unused
nn.Defn = tswitch
}
@ -1395,8 +1397,8 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch
body = body[:len(body)-1]
}
n.PtrBody().Set(p.stmtsFall(body, true))
if l := n.Body().Len(); l > 0 && n.Body().Index(l-1).Op() == ir.OFALL {
n.Body.Set(p.stmtsFall(body, true))
if l := len(n.Body); l > 0 && n.Body[l-1].Op() == ir.OFALL {
if tswitch != nil {
base.Errorf("cannot fallthrough in type switch")
}
@ -1414,8 +1416,8 @@ func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitch
}
func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
n := p.nod(stmt, ir.OSELECT, nil, nil)
n.PtrList().Set(p.commClauses(stmt.Body, stmt.Rbrace))
n := ir.NewSelectStmt(p.pos(stmt), nil)
n.Cases.Set(p.commClauses(stmt.Body, stmt.Rbrace))
return n
}
@ -1428,11 +1430,11 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []i
}
p.openScope(clause.Pos())
n := p.nod(clause, ir.OCASE, nil, nil)
n := ir.NewCaseStmt(p.pos(clause), nil, nil)
if clause.Comm != nil {
n.PtrList().Set1(p.stmt(clause.Comm))
n.List = []ir.Node{p.stmt(clause.Comm)}
}
n.PtrBody().Set(p.stmts(clause.Body))
n.Body.Set(p.stmts(clause.Body))
nodes = append(nodes, n)
}
if len(clauses) > 0 {
@ -1443,7 +1445,7 @@ func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []i
func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
sym := p.name(label.Label)
lhs := p.nodSym(label, ir.OLABEL, nil, sym)
lhs := ir.NewLabelStmt(p.pos(label), sym)
var ls ir.Node
if label.Stmt != nil { // TODO(mdempsky): Should always be present.
@ -1452,13 +1454,17 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
if ls != nil {
switch ls.Op() {
case ir.OFOR:
ls.SetSym(sym)
ls := ls.(*ir.ForStmt)
ls.Label = sym
case ir.ORANGE:
ls.SetSym(sym)
ls := ls.(*ir.RangeStmt)
ls.Label = sym
case ir.OSWITCH:
ls.SetSym(sym)
ls := ls.(*ir.SwitchStmt)
ls.Label = sym
case ir.OSELECT:
ls.SetSym(sym)
ls := ls.(*ir.SelectStmt)
ls.Label = sym
}
}
}
@ -1466,12 +1472,13 @@ func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
l := []ir.Node{lhs}
if ls != nil {
if ls.Op() == ir.OBLOCK {
l = append(l, ls.List().Slice()...)
ls := ls.(*ir.BlockStmt)
l = append(l, ls.List...)
} else {
l = append(l, ls)
}
}
return liststmt(l)
return ir.NewBlockStmt(src.NoXPos, l)
}
var unOps = [...]ir.Op{
@ -1528,7 +1535,7 @@ func (p *noder) binOp(op syntax.Operator) ir.Op {
// literal is not compatible with the current language version.
func checkLangCompat(lit *syntax.BasicLit) {
s := lit.Value
if len(s) <= 2 || langSupported(1, 13, types.LocalPkg) {
if len(s) <= 2 || types.AllowsGoVersion(types.LocalPkg, 1, 13) {
return
}
// len(s) > 2
@ -1578,7 +1585,7 @@ func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value {
// to big.Float to match cmd/compile's historical precision.
// TODO(mdempsky): Remove.
if v.Kind() == constant.Float {
v = constant.Make(bigFloatVal(v))
v = constant.Make(ir.BigFloat(v))
}
return v
@ -1593,7 +1600,7 @@ var tokenForLitKind = [...]token.Token{
}
func (p *noder) name(name *syntax.Name) *types.Sym {
return lookup(name.Value)
return typecheck.Lookup(name.Value)
}
func (p *noder) mkname(name *syntax.Name) ir.Node {
@ -1611,23 +1618,13 @@ func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
}
fallthrough
case ir.ONAME, ir.ONONAME, ir.OPACK:
p := p.nod(n, ir.OPAREN, x, nil)
p := ir.NewParenExpr(p.pos(n), x)
p.SetImplicit(true)
return p
}
return x
}
func (p *noder) nod(orig syntax.Node, op ir.Op, left, right ir.Node) ir.Node {
return ir.NodAt(p.pos(orig), op, left, right)
}
func (p *noder) nodSym(orig syntax.Node, op ir.Op, left ir.Node, sym *types.Sym) ir.Node {
n := nodSym(op, left, sym)
n.SetPos(p.pos(orig))
return n
}
func (p *noder) pos(n syntax.Node) src.XPos {
// TODO(gri): orig.Pos() should always be known - fix package syntax
xpos := base.Pos
@ -1661,24 +1658,24 @@ var allowedStdPragmas = map[string]bool{
"go:generate": true,
}
// *Pragma is the value stored in a syntax.Pragma during parsing.
type Pragma struct {
// *pragmas is the value stored in a syntax.pragmas during parsing.
type pragmas struct {
Flag ir.PragmaFlag // collected bits
Pos []PragmaPos // position of each individual flag
Embeds []PragmaEmbed
Pos []pragmaPos // position of each individual flag
Embeds []pragmaEmbed
}
type PragmaPos struct {
type pragmaPos struct {
Flag ir.PragmaFlag
Pos syntax.Pos
}
type PragmaEmbed struct {
type pragmaEmbed struct {
Pos syntax.Pos
Patterns []string
}
func (p *noder) checkUnused(pragma *Pragma) {
func (p *noder) checkUnused(pragma *pragmas) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
p.errorAt(pos.Pos, "misplaced compiler directive")
@ -1691,7 +1688,7 @@ func (p *noder) checkUnused(pragma *Pragma) {
}
}
func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
func (p *noder) checkUnusedDuringParse(pragma *pragmas) {
for _, pos := range pragma.Pos {
if pos.Flag&pragma.Flag != 0 {
p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
@ -1706,9 +1703,9 @@ func (p *noder) checkUnusedDuringParse(pragma *Pragma) {
// pragma is called concurrently if files are parsed concurrently.
func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma {
pragma, _ := old.(*Pragma)
pragma, _ := old.(*pragmas)
if pragma == nil {
pragma = new(Pragma)
pragma = new(pragmas)
}
if text == "" {
@ -1762,7 +1759,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
break
}
pragma.Embeds = append(pragma.Embeds, PragmaEmbed{pos, args})
pragma.Embeds = append(pragma.Embeds, pragmaEmbed{pos, args})
case strings.HasPrefix(text, "go:cgo_import_dynamic "):
// This is permitted for general use because Solaris
@ -1801,7 +1798,7 @@ func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.P
p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
}
pragma.Flag |= flag
pragma.Pos = append(pragma.Pos, PragmaPos{flag, pos})
pragma.Pos = append(pragma.Pos, pragmaPos{flag, pos})
}
return pragma
@ -1897,3 +1894,178 @@ func parseGoEmbed(args string) ([]string, error) {
}
return list, nil
}
func fakeRecv() *ir.Field {
return ir.NewField(base.Pos, nil, nil, types.FakeRecvType())
}
func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
xtype := p.typeExpr(expr.Type)
ntype := p.typeExpr(expr.Type)
fn := ir.NewFunc(p.pos(expr))
fn.SetIsHiddenClosure(ir.CurFunc != nil)
fn.Nname = ir.NewFuncNameAt(p.pos(expr), ir.BlankNode.Sym(), fn) // filled in by typecheckclosure
fn.Nname.Ntype = xtype
fn.Nname.Defn = fn
clo := ir.NewClosureExpr(p.pos(expr), fn)
fn.ClosureType = ntype
fn.OClosure = clo
p.funcBody(fn, expr.Body)
// closure-specific variables are hanging off the
// ordinary ones in the symbol table; see oldname.
// unhook them.
// make the list of pointers for the closure call.
for _, v := range fn.ClosureVars {
// Unlink from v1; see comment in syntax.go type Param for these fields.
v1 := v.Defn
v1.Name().Innermost = v.Outer
// If the closure usage of v is not dense,
// we need to make it dense; now that we're out
// of the function in which v appeared,
// look up v.Sym in the enclosing function
// and keep it around for use in the compiled code.
//
// That is, suppose we just finished parsing the innermost
// closure f4 in this code:
//
// func f() {
// v := 1
// func() { // f2
// use(v)
// func() { // f3
// func() { // f4
// use(v)
// }()
// }()
// }()
// }
//
// At this point v.Outer is f2's v; there is no f3's v.
// To construct the closure f4 from within f3,
// we need to use f3's v and in this case we need to create f3's v.
// We are now in the context of f3, so calling oldname(v.Sym)
// obtains f3's v, creating it if necessary (as it is in the example).
//
// capturevars will decide whether to use v directly or &v.
v.Outer = oldname(v.Sym()).(*ir.Name)
}
return clo
}
// A function named init is a special case.
// It is called by the initialization before main is run.
// To make it unique within a package and also uncallable,
// the name, normally "pkg.init", is altered to "pkg.init.0".
var renameinitgen int
func renameinit() *types.Sym {
s := typecheck.LookupNum("init.", renameinitgen)
renameinitgen++
return s
}
// oldname returns the Node that declares symbol s in the current scope.
// If no such Node currently exists, an ONONAME Node is returned instead.
// Automatically creates a new closure variable if the referenced symbol was
// declared in a different (containing) function.
func oldname(s *types.Sym) ir.Node {
if s.Pkg != types.LocalPkg {
return ir.NewIdent(base.Pos, s)
}
n := ir.AsNode(s.Def)
if n == nil {
// Maybe a top-level declaration will come along later to
// define s. resolve will check s.Def again once all input
// source has been processed.
return ir.NewIdent(base.Pos, s)
}
if ir.CurFunc != nil && n.Op() == ir.ONAME && n.Name().Curfn != nil && n.Name().Curfn != ir.CurFunc {
// Inner func is referring to var in outer func.
//
// TODO(rsc): If there is an outer variable x and we
// are parsing x := 5 inside the closure, until we get to
// the := it looks like a reference to the outer x so we'll
// make x a closure variable unnecessarily.
n := n.(*ir.Name)
c := n.Name().Innermost
if c == nil || c.Curfn != ir.CurFunc {
// Do not have a closure var for the active closure yet; make one.
c = typecheck.NewName(s)
c.Class_ = ir.PAUTOHEAP
c.SetIsClosureVar(true)
c.SetIsDDD(n.IsDDD())
c.Defn = n
// Link into list of active closure variables.
// Popped from list in func funcLit.
c.Outer = n.Name().Innermost
n.Name().Innermost = c
ir.CurFunc.ClosureVars = append(ir.CurFunc.ClosureVars, c)
}
// return ref to closure var, not original
return c
}
return n
}
func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []pragmaEmbed) (newExprs []ir.Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
if !ok {
// imports always come first
break
}
path, _ := strconv.Unquote(imp.Path.Value)
if path == "embed" {
haveEmbed = true
break
}
}
pos := embeds[0].Pos
if !haveEmbed {
p.errorAt(pos, "invalid go:embed: missing import \"embed\"")
return exprs
}
if base.Flag.Cfg.Embed.Patterns == nil {
p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration")
return exprs
}
if len(names) > 1 {
p.errorAt(pos, "go:embed cannot apply to multiple vars")
return exprs
}
if len(exprs) > 0 {
p.errorAt(pos, "go:embed cannot apply to var with initializer")
return exprs
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
p.errorAt(pos, "go:embed cannot apply to var without type")
return exprs
}
if typecheck.DeclContext != ir.PEXTERN {
p.errorAt(pos, "go:embed cannot apply to var inside func")
return exprs
}
v := names[0]
typecheck.Target.Embeds = append(typecheck.Target.Embeds, v)
v.Embed = new([]ir.Embed)
for _, e := range embeds {
*v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
}
return exprs
}

View file

@ -0,0 +1,72 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package objw
import (
"cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
func Uint8(s *obj.LSym, off int, v uint8) int {
return UintN(s, off, uint64(v), 1)
}
func Uint16(s *obj.LSym, off int, v uint16) int {
return UintN(s, off, uint64(v), 2)
}
func Uint32(s *obj.LSym, off int, v uint32) int {
return UintN(s, off, uint64(v), 4)
}
func Uintptr(s *obj.LSym, off int, v uint64) int {
return UintN(s, off, v, types.PtrSize)
}
func UintN(s *obj.LSym, off int, v uint64, wid int) int {
if off&(wid-1) != 0 {
base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
}
s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
return off + wid
}
func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
off = int(types.Rnd(int64(off), int64(types.PtrSize)))
s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
off += types.PtrSize
return off
}
func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
off += 4
return off
}
func Global(s *obj.LSym, width int32, flags int16) {
if flags&obj.LOCAL != 0 {
s.Set(obj.AttrLocal, true)
flags &^= obj.LOCAL
}
base.Ctxt.Globl(s, int64(width), int(flags))
}
func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int {
// Runtime reads the bitmaps as byte arrays. Oblige.
for j := 0; int32(j) < bv.N; j += 8 {
word := bv.B[j/32]
off = Uint8(s, off, uint8(word>>(uint(j)%32)))
}
return off
}

View file

@ -0,0 +1,218 @@
// Derived from Inferno utils/6c/txt.c
// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package objw
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
// NewProgs returns a new Progs for fn.
// worker indicates which of the backend workers will use the Progs.
func NewProgs(fn *ir.Func, worker int) *Progs {
pp := new(Progs)
if base.Ctxt.CanReuseProgs() {
sz := len(sharedProgArray) / base.Flag.LowerC
pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)]
}
pp.CurFunc = fn
// prime the pump
pp.Next = pp.NewProg()
pp.Clear(pp.Next)
pp.Pos = fn.Pos()
pp.SetText(fn)
// PCDATA tables implicitly start with index -1.
pp.PrevLive = LivenessIndex{-1, false}
pp.NextLive = pp.PrevLive
return pp
}
// Progs accumulates Progs for a function and converts them into machine code.
type Progs struct {
Text *obj.Prog // ATEXT Prog for this function
Next *obj.Prog // next Prog
PC int64 // virtual PC; count of Progs
Pos src.XPos // position to use for new Progs
CurFunc *ir.Func // fn these Progs are for
Cache []obj.Prog // local progcache
CacheIndex int // first free element of progcache
NextLive LivenessIndex // liveness index for the next Prog
PrevLive LivenessIndex // last emitted liveness index
}
// LivenessIndex stores the liveness map information for a Value.
type LivenessIndex struct {
StackMapIndex int
// IsUnsafePoint indicates that this is an unsafe-point.
//
// Note that it's possible for a call Value to have a stack
// map while also being an unsafe-point. This means it cannot
// be preempted at this instruction, but that a preemption or
// stack growth may happen in the called function.
IsUnsafePoint bool
}
// StackMapDontCare indicates that the stack map index at a Value
// doesn't matter.
//
// This is a sentinel value that should never be emitted to the PCDATA
// stream. We use -1000 because that's obviously never a valid stack
// index (but -1 is).
const StackMapDontCare = -1000
// LivenessDontCare indicates that the liveness information doesn't
// matter. Currently it is used in deferreturn liveness when we don't
// actually need it. It should never be emitted to the PCDATA stream.
var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
func (idx LivenessIndex) StackMapValid() bool {
return idx.StackMapIndex != StackMapDontCare
}
func (pp *Progs) NewProg() *obj.Prog {
var p *obj.Prog
if pp.CacheIndex < len(pp.Cache) {
p = &pp.Cache[pp.CacheIndex]
pp.CacheIndex++
} else {
p = new(obj.Prog)
}
p.Ctxt = base.Ctxt
return p
}
// Flush converts from pp to machine code.
func (pp *Progs) Flush() {
plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
}
// Free clears pp and any associated resources.
func (pp *Progs) Free() {
if base.Ctxt.CanReuseProgs() {
// Clear progs to enable GC and avoid abuse.
s := pp.Cache[:pp.CacheIndex]
for i := range s {
s[i] = obj.Prog{}
}
}
// Clear pp to avoid abuse.
*pp = Progs{}
}
// Prog adds a Prog with instruction As to pp.
func (pp *Progs) Prog(as obj.As) *obj.Prog {
if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex {
// Emit stack map index change.
idx := pp.NextLive.StackMapIndex
pp.PrevLive.StackMapIndex = idx
p := pp.Prog(obj.APCDATA)
p.From.SetConst(objabi.PCDATA_StackMapIndex)
p.To.SetConst(int64(idx))
}
if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint {
// Emit unsafe-point marker.
pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint
p := pp.Prog(obj.APCDATA)
p.From.SetConst(objabi.PCDATA_UnsafePoint)
if pp.NextLive.IsUnsafePoint {
p.To.SetConst(objabi.PCDATA_UnsafePointUnsafe)
} else {
p.To.SetConst(objabi.PCDATA_UnsafePointSafe)
}
}
p := pp.Next
pp.Next = pp.NewProg()
pp.Clear(pp.Next)
p.Link = pp.Next
if !pp.Pos.IsKnown() && base.Flag.K != 0 {
base.Warn("prog: unknown position (line 0)")
}
p.As = as
p.Pos = pp.Pos
if pp.Pos.IsStmt() == src.PosIsStmt {
// Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
if ssa.LosesStmtMark(as) {
return p
}
pp.Pos = pp.Pos.WithNotStmt()
}
return p
}
func (pp *Progs) Clear(p *obj.Prog) {
obj.Nopout(p)
p.As = obj.AEND
p.Pc = pp.PC
pp.PC++
}
func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
q := pp.NewProg()
pp.Clear(q)
q.As = as
q.Pos = p.Pos
q.From.Type = ftype
q.From.Reg = freg
q.From.Offset = foffset
q.To.Type = ttype
q.To.Reg = treg
q.To.Offset = toffset
q.Link = p.Link
p.Link = q
return q
}
func (pp *Progs) SetText(fn *ir.Func) {
if pp.Text != nil {
base.Fatalf("Progs.settext called twice")
}
ptxt := pp.Prog(obj.ATEXT)
pp.Text = ptxt
fn.LSym.Func().Text = ptxt
ptxt.From.Type = obj.TYPE_MEM
ptxt.From.Name = obj.NAME_EXTERN
ptxt.From.Sym = fn.LSym
}

View file

@ -2,50 +2,36 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package pkginit
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
// A function named init is a special case.
// It is called by the initialization before main is run.
// To make it unique within a package and also uncallable,
// the name, normally "pkg.init", is altered to "pkg.init.0".
var renameinitgen int
// Function collecting autotmps generated during typechecking,
// to be included in the package-level init function.
var initTodo = ir.NewFunc(base.Pos)
func renameinit() *types.Sym {
s := lookupN("init.", renameinitgen)
renameinitgen++
return s
}
// fninit makes and returns an initialization record for the package.
// Task makes and returns an initialization record for the package.
// See runtime/proc.go:initTask for its layout.
// The 3 tasks for initialization are:
// 1) Initialize all of the packages the current package depends on.
// 2) Initialize all the variables that have initializers.
// 3) Run any init functions.
func fninit() *ir.Name {
nf := initOrder(Target.Decls)
func Task() *ir.Name {
nf := initOrder(typecheck.Target.Decls)
var deps []*obj.LSym // initTask records for packages the current package depends on
var fns []*obj.LSym // functions to call for package initialization
// Find imported packages with init tasks.
for _, pkg := range Target.Imports {
n := resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask")))
for _, pkg := range typecheck.Target.Imports {
n := typecheck.Resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask")))
if n.Op() == ir.ONONAME {
continue
}
if n.Op() != ir.ONAME || n.(*ir.Name).Class() != ir.PEXTERN {
if n.Op() != ir.ONAME || n.(*ir.Name).Class_ != ir.PEXTERN {
base.Fatalf("bad inittask: %v", n)
}
deps = append(deps, n.(*ir.Name).Sym().Linksym())
@ -54,37 +40,37 @@ func fninit() *ir.Name {
// Make a function that contains all the initialization statements.
if len(nf) > 0 {
base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
initializers := lookup("init")
fn := dclfunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil))
for _, dcl := range initTodo.Dcl {
initializers := typecheck.Lookup("init")
fn := typecheck.DeclFunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil))
for _, dcl := range typecheck.InitTodoFunc.Dcl {
dcl.Curfn = fn
}
fn.Dcl = append(fn.Dcl, initTodo.Dcl...)
initTodo.Dcl = nil
fn.Dcl = append(fn.Dcl, typecheck.InitTodoFunc.Dcl...)
typecheck.InitTodoFunc.Dcl = nil
fn.PtrBody().Set(nf)
funcbody()
fn.Body.Set(nf)
typecheck.FinishFuncBody()
typecheckFunc(fn)
Curfn = fn
typecheckslice(nf, ctxStmt)
Curfn = nil
Target.Decls = append(Target.Decls, fn)
typecheck.Func(fn)
ir.CurFunc = fn
typecheck.Stmts(nf)
ir.CurFunc = nil
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
fns = append(fns, initializers.Linksym())
}
if initTodo.Dcl != nil {
if typecheck.InitTodoFunc.Dcl != nil {
// We only generate temps using initTodo if there
// are package-scope initialization statements, so
// something's weird if we get here.
base.Fatalf("initTodo still has declarations")
}
initTodo = nil
typecheck.InitTodoFunc = nil
// Record user init functions.
for _, fn := range Target.Inits {
for _, fn := range typecheck.Target.Inits {
// Skip init functions with empty bodies.
if fn.Body().Len() == 1 {
if stmt := fn.Body().First(); stmt.Op() == ir.OBLOCK && stmt.(*ir.BlockStmt).List().Len() == 0 {
if len(fn.Body) == 1 {
if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 {
continue
}
}
@ -96,24 +82,24 @@ func fninit() *ir.Name {
}
// Make an .inittask structure.
sym := lookup(".inittask")
task := NewName(sym)
sym := typecheck.Lookup(".inittask")
task := typecheck.NewName(sym)
task.SetType(types.Types[types.TUINT8]) // fake type
task.SetClass(ir.PEXTERN)
task.Class_ = ir.PEXTERN
sym.Def = task
lsym := sym.Linksym()
ot := 0
ot = duintptr(lsym, ot, 0) // state: not initialized yet
ot = duintptr(lsym, ot, uint64(len(deps)))
ot = duintptr(lsym, ot, uint64(len(fns)))
ot = objw.Uintptr(lsym, ot, 0) // state: not initialized yet
ot = objw.Uintptr(lsym, ot, uint64(len(deps)))
ot = objw.Uintptr(lsym, ot, uint64(len(fns)))
for _, d := range deps {
ot = dsymptr(lsym, ot, d, 0)
ot = objw.SymPtr(lsym, ot, d, 0)
}
for _, f := range fns {
ot = dsymptr(lsym, ot, f, 0)
ot = objw.SymPtr(lsym, ot, f, 0)
}
// An initTask has pointers, but none into the Go heap.
// It's not quite read only, the state field must be modifiable.
ggloblsym(lsym, int32(ot), obj.NOPTR)
objw.Global(lsym, int32(ot), obj.NOPTR)
return task
}

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package pkginit
import (
"bytes"
@ -11,6 +11,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/staticinit"
)
// Package initialization
@ -77,9 +78,9 @@ type InitOrder struct {
// corresponding list of statements to include in the init() function
// body.
func initOrder(l []ir.Node) []ir.Node {
s := InitSchedule{
initplans: make(map[ir.Node]*InitPlan),
inittemps: make(map[ir.Node]*ir.Name),
s := staticinit.Schedule{
Plans: make(map[ir.Node]*staticinit.Plan),
Temps: make(map[ir.Node]*ir.Name),
}
o := InitOrder{
blocking: make(map[ir.Node][]ir.Node),
@ -91,7 +92,7 @@ func initOrder(l []ir.Node) []ir.Node {
switch n.Op() {
case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
o.processAssign(n)
o.flushReady(s.staticInit)
o.flushReady(s.StaticInit)
case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
// nop
default:
@ -124,7 +125,7 @@ func initOrder(l []ir.Node) []ir.Node {
base.Fatalf("expected empty map: %v", o.blocking)
}
return s.out
return s.Out
}
func (o *InitOrder) processAssign(n ir.Node) {
@ -139,7 +140,7 @@ func (o *InitOrder) processAssign(n ir.Node) {
defn := dep.Defn
// Skip dependencies on functions (PFUNC) and
// variables already initialized (InitDone).
if dep.Class() != ir.PEXTERN || o.order[defn] == orderDone {
if dep.Class_ != ir.PEXTERN || o.order[defn] == orderDone {
continue
}
o.order[n]++
@ -203,7 +204,7 @@ func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name) {
*path = append(*path, n)
for _, ref := range refers {
// Short-circuit variables that were initialized.
if ref.Class() == ir.PEXTERN && o.order[ref.Defn] == orderDone {
if ref.Class_ == ir.PEXTERN && o.order[ref.Defn] == orderDone {
continue
}
@ -220,7 +221,7 @@ func reportInitLoopAndExit(l []*ir.Name) {
// the start.
i := -1
for j, n := range l {
if n.Class() == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
if n.Class_ == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
i = j
}
}
@ -254,11 +255,14 @@ func collectDeps(n ir.Node, transitive bool) ir.NameSet {
d := initDeps{transitive: transitive}
switch n.Op() {
case ir.OAS:
d.inspect(n.Right())
n := n.(*ir.AssignStmt)
d.inspect(n.Y)
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
d.inspect(n.Rlist().First())
n := n.(*ir.AssignListStmt)
d.inspect(n.Rhs[0])
case ir.ODCLFUNC:
d.inspectList(n.Body())
n := n.(*ir.Func)
d.inspectList(n.Body)
default:
base.Fatalf("unexpected Op: %v", n.Op())
}
@ -286,21 +290,22 @@ func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) }
func (d *initDeps) visit(n ir.Node) {
switch n.Op() {
case ir.OMETHEXPR:
d.foundDep(methodExprName(n))
n := n.(*ir.MethodExpr)
d.foundDep(ir.MethodExprName(n))
case ir.ONAME:
n := n.(*ir.Name)
switch n.Class() {
switch n.Class_ {
case ir.PEXTERN, ir.PFUNC:
d.foundDep(n)
}
case ir.OCLOSURE:
n := n.(*ir.ClosureExpr)
d.inspectList(n.Func().Body())
d.inspectList(n.Func.Body)
case ir.ODOTMETH, ir.OCALLPART:
d.foundDep(methodExprName(n))
d.foundDep(ir.MethodExprName(n))
}
}
@ -323,8 +328,8 @@ func (d *initDeps) foundDep(n *ir.Name) {
return
}
d.seen.Add(n)
if d.transitive && n.Class() == ir.PFUNC {
d.inspectList(n.Defn.(*ir.Func).Body())
if d.transitive && n.Class_ == ir.PFUNC {
d.inspectList(n.Defn.(*ir.Func).Body)
}
}
@ -355,9 +360,11 @@ func (s *declOrder) Pop() interface{} {
func firstLHS(n ir.Node) *ir.Name {
switch n.Op() {
case ir.OAS:
return n.Left().Name()
n := n.(*ir.AssignStmt)
return n.X.Name()
case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
return n.List().First().Name()
n := n.(*ir.AssignListStmt)
return n.Lhs[0].Name()
}
base.Fatalf("unexpected Op: %v", n.Op())

View file

@ -5,12 +5,12 @@
package ppc64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/ppc64"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &ppc64.Linkppc64
if objabi.GOARCH == "ppc64le" {
arch.LinkArch = &ppc64.Linkppc64le

View file

@ -6,44 +6,46 @@ package ppc64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
)
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
}
} else if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
} else if cnt <= int64(128*types.PtrSize) {
p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr))
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
} else {
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
p.Reg = ppc64.REGSP
p = pp.Appendpp(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Appendpp(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p.Reg = ppc64.REGRT1
p = pp.Appendpp(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(gc.Widthptr))
p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize))
p1 := p
p = pp.Appendpp(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p = pp.Appendpp(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, p1)
p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
p.To.SetTarget(p1)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
p := pp.Prog(ppc64.AOR)
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REG_R0
@ -52,7 +54,7 @@ func ginsnop(pp *gc.Progs) *obj.Prog {
return p
}
func ginsnopdefer(pp *gc.Progs) *obj.Prog {
func ginsnopdefer(pp *objw.Progs) *obj.Prog {
// On PPC64 two nops are required in the defer case.
//
// (see gc/cgen.go, gc/plive.go -- copy of comment below)

View file

@ -6,10 +6,10 @@ package ppc64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
@ -19,7 +19,7 @@ import (
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
// flive := b.FlagsLiveAtEnd
// if b.Control != nil && b.Control.Type.IsFlags() {
// flive = true
@ -101,7 +101,7 @@ func storeByType(t *types.Type) obj.As {
panic("bad store type")
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy:
t := v.Type
@ -210,7 +210,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE retry
p3 := s.Prog(ppc64.ABNE)
p3.To.Type = obj.TYPE_BRANCH
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpPPC64LoweredAtomicAdd32,
ssa.OpPPC64LoweredAtomicAdd64:
@ -254,7 +254,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE retry
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p4.To.SetTarget(p)
// Ensure a 32 bit result
if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
@ -300,7 +300,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE retry
p2 := s.Prog(ppc64.ABNE)
p2.To.Type = obj.TYPE_BRANCH
gc.Patch(p2, p)
p2.To.SetTarget(p)
// ISYNC
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
@ -348,7 +348,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// ISYNC
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
gc.Patch(p2, pisync)
p2.To.SetTarget(pisync)
case ssa.OpPPC64LoweredAtomicStore8,
ssa.OpPPC64LoweredAtomicStore32,
@ -439,7 +439,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// BNE retry
p4 := s.Prog(ppc64.ABNE)
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p)
p4.To.SetTarget(p)
// LWSYNC - Assuming shared data not write-through-required nor
// caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
// If the operation is a CAS-Release, then synchronization is not necessary.
@ -462,14 +462,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p7.From.Offset = 0
p7.To.Type = obj.TYPE_REG
p7.To.Reg = out
gc.Patch(p2, p7)
p2.To.SetTarget(p7)
// done (label)
p8 := s.Prog(obj.ANOP)
gc.Patch(p6, p8)
p6.To.SetTarget(p8)
case ssa.OpPPC64LoweredGetClosurePtr:
// Closure pointer is R11 (already)
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpPPC64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
@ -491,7 +491,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpLoadReg:
loadOp := loadByType(v.Type)
p := s.Prog(loadOp)
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -500,7 +500,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeOp)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpPPC64DIVD:
// For now,
@ -539,10 +539,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
gc.Patch(pbahead, p)
pbahead.To.SetTarget(p)
p = s.Prog(obj.ANOP)
gc.Patch(pbover, p)
pbover.To.SetTarget(p)
case ssa.OpPPC64DIVW:
// word-width version of above
@ -574,10 +574,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r
p.From.Type = obj.TYPE_REG
p.From.Reg = r0
gc.Patch(pbahead, p)
pbahead.To.SetTarget(p)
p = s.Prog(obj.ANOP)
gc.Patch(pbover, p)
pbover.To.SetTarget(p)
case ssa.OpPPC64CLRLSLWI:
r := v.Reg()
@ -758,7 +758,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
}
@ -819,7 +819,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
// Load go.string using 0 offset
@ -837,7 +837,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -871,7 +871,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
p := s.Prog(v.Op.Asm())
@ -879,7 +879,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
@ -1028,7 +1028,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
gc.Patch(p, top)
p.To.SetTarget(top)
}
// When ctr == 1 the loop was not generated but
// there are at least 64 bytes to clear, so add
@ -1228,7 +1228,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
gc.Patch(p, top)
p.To.SetTarget(top)
}
// when ctr == 1 the loop was not generated but
@ -1407,7 +1407,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
gc.Patch(p, top)
p.To.SetTarget(top)
// srcReg and dstReg were incremented in the loop, so
// later instructions start with offset 0.
@ -1654,7 +1654,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = ppc64.BO_BCTR
p.Reg = ppc64.REG_R0
p.To.Type = obj.TYPE_BRANCH
gc.Patch(p, top)
p.To.SetTarget(top)
// srcReg and dstReg were incremented in the loop, so
// later instructions start with offset 0.
@ -1809,7 +1809,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpPPC64LoweredNilCheck:
@ -1840,14 +1840,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// NOP (so the BNE has somewhere to land)
nop := s.Prog(obj.ANOP)
gc.Patch(p2, nop)
p2.To.SetTarget(nop)
} else {
// Issue a load which will fault if arg is nil.
p := s.Prog(ppc64.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
}
@ -1893,7 +1893,7 @@ var blockJump = [...]struct {
ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockDefer:
// defer returns in R3:
@ -1907,18 +1907,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p = s.Prog(ppc64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,11 +5,11 @@
package riscv64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/riscv"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &riscv.LinkRISCV64
arch.REGSP = riscv.REG_SP

View file

@ -6,12 +6,14 @@ package riscv64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
)
func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
@ -19,20 +21,20 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// Adjust the frame to account for LR.
off += base.Ctxt.FixedFrameSize()
if cnt < int64(4*gc.Widthptr) {
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
if cnt < int64(4*types.PtrSize) {
for i := int64(0); i < cnt; i += int64(types.PtrSize) {
p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
}
return p
}
if cnt <= int64(128*gc.Widthptr) {
p = pp.Appendpp(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
if cnt <= int64(128*types.PtrSize) {
p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
p.Reg = riscv.REG_SP
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(gc.Widthptr))
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
return p
}
@ -43,15 +45,15 @@ func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// MOV ZERO, (T0)
// ADD $Widthptr, T0
// BNE T0, T1, loop
p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
p.Reg = riscv.REG_SP
p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
p.Reg = riscv.REG_T0
p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
loop := p
p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, riscv.REG_T0, 0)
p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
p.Reg = riscv.REG_T1
gc.Patch(p, loop)
p.To.SetTarget(loop)
return p
}

View file

@ -5,12 +5,12 @@
package riscv64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/objw"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
)
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
// Hardware nop is ADD $0, ZERO
p := pp.Prog(riscv.AADD)
p.From.Type = obj.TYPE_CONST

View file

@ -6,9 +6,9 @@ package riscv64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
@ -180,9 +180,9 @@ func largestMove(alignment int64) (obj.As, int64) {
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
// RISC-V has no flags, so this is a no-op.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {}
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
s.SetPos(v.Pos)
switch v.Op {
@ -191,7 +191,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpArg:
// input args need no code
case ssa.OpPhi:
gc.CheckLoweredPhi(v)
ssagen.CheckLoweredPhi(v)
case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
if v.Type.IsMemory() {
return
@ -221,7 +221,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -232,7 +232,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
// nothing to do
case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
@ -323,10 +323,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
@ -342,7 +342,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
@ -352,14 +352,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = riscv.REG_ZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
@ -377,7 +377,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpRISCV64LoweredAtomicLoad8:
@ -502,7 +502,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p4.From.Reg = riscv.REG_TMP
p4.Reg = riscv.REG_ZERO
p4.To.Type = obj.TYPE_BRANCH
gc.Patch(p4, p1)
p4.To.SetTarget(p1)
p5 := s.Prog(riscv.AMOV)
p5.From.Type = obj.TYPE_CONST
@ -511,7 +511,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.To.Reg = out
p6 := s.Prog(obj.ANOP)
gc.Patch(p2, p6)
p2.To.SetTarget(p6)
case ssa.OpRISCV64LoweredZero:
mov, sz := largestMove(v.AuxInt)
@ -537,7 +537,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p3.Reg = v.Args[0].Reg()
p3.From.Type = obj.TYPE_REG
p3.From.Reg = v.Args[1].Reg()
gc.Patch(p3, p)
p3.To.SetTarget(p)
case ssa.OpRISCV64LoweredMove:
mov, sz := largestMove(v.AuxInt)
@ -577,7 +577,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p5.Reg = v.Args[1].Reg()
p5.From.Type = obj.TYPE_REG
p5.From.Reg = v.Args[2].Reg()
gc.Patch(p5, p)
p5.To.SetTarget(p)
case ssa.OpRISCV64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
@ -585,7 +585,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(riscv.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = riscv.REG_ZERO
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
@ -594,7 +594,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpRISCV64LoweredGetClosurePtr:
// Closure pointer is S4 (riscv.REG_CTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpRISCV64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
@ -614,14 +614,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ADUFFZERO)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffzero
p.To.Sym = ir.Syms.Duffzero
p.To.Offset = v.AuxInt
case ssa.OpRISCV64DUFFCOPY:
p := s.Prog(obj.ADUFFCOPY)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.Duffcopy
p.To.Sym = ir.Syms.Duffcopy
p.To.Offset = v.AuxInt
default:
@ -644,7 +644,7 @@ var blockBranch = [...]obj.As{
ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
s.SetPos(b.Pos)
switch b.Kind {
@ -657,17 +657,17 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Type = obj.TYPE_REG
p.From.Reg = riscv.REG_ZERO
p.Reg = riscv.REG_A0
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,11 +5,11 @@
package s390x
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/s390x"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &s390x.Links390x
arch.REGSP = s390x.REGSP
arch.MAXWIDTH = 1 << 50

View file

@ -6,7 +6,7 @@ package s390x
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/objw"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
@ -18,7 +18,7 @@ import (
const clearLoopCutoff = 1024
// zerorange clears the stack in the given range.
func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
if cnt == 0 {
return p
}
@ -31,7 +31,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// need to create a copy of the stack pointer that we can adjust.
// We also need to do this if we are going to loop.
if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
p.Reg = int16(s390x.REGSP)
reg = s390x.REGRT1
off = 0
@ -40,12 +40,12 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
// Generate a loop of large clears.
if cnt > clearLoopCutoff {
ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
p = pp.Appendpp(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
pl := p
p = pp.Appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
p = pp.Appendpp(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
gc.Patch(p, pl)
p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
p.To.SetTarget(pl)
cnt = cnt % 256
}
@ -70,11 +70,11 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
case 2:
ins = s390x.AMOVH
}
p = pp.Appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
// Handle clears that would require multiple move instructions with CLEAR (assembled as XC).
default:
p = pp.Appendpp(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
}
cnt -= n
@ -84,6 +84,6 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
func ginsnop(pp *objw.Progs) *obj.Prog {
return pp.Prog(s390x.ANOPH)
}

View file

@ -8,16 +8,16 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
@ -135,7 +135,7 @@ func moveByType(t *types.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@ -148,7 +148,7 @@ func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
// dest := src(From) op off
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog {
func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_CONST
p.From.Offset = off
@ -158,7 +158,7 @@ func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.
return p
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpS390XSLD, ssa.OpS390XSLW,
ssa.OpS390XSRD, ssa.OpS390XSRW,
@ -395,14 +395,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_ADDR
p.From.Reg = r
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVDaddr:
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
@ -448,7 +448,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XMOVDload,
@ -459,7 +459,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx,
@ -476,7 +476,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.From.Scale = 1
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
@ -487,7 +487,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx,
ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
@ -503,7 +503,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r
p.To.Scale = 1
p.To.Index = i
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
@ -511,7 +511,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
ssa.OpS390XLDGR, ssa.OpS390XLGDR,
@ -530,7 +530,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpCopy:
if v.Type.IsMemory() {
return
@ -546,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -557,10 +557,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpS390XLoweredGetClosurePtr:
// Closure pointer is R12 (already)
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F:
// input is already rounded
case ssa.OpS390XLoweredGetG:
@ -593,7 +593,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
ssa.OpS390XNEG, ssa.OpS390XNEGW,
@ -637,7 +637,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = s390x.REGTMP
if logopt.Enabled() {
@ -672,7 +672,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.Reg = v.Args[len(v.Args)-2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XLoweredMove:
// Inputs must be valid pointers to memory,
// so adjust arg0 and arg1 as part of the expansion.
@ -709,7 +709,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, mvc)
bne.To.SetTarget(mvc)
if v.AuxInt > 0 {
mvc := s.Prog(s390x.AMVC)
@ -751,7 +751,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
bne := s.Prog(s390x.ABLT)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, clear)
bne.To.SetTarget(clear)
if v.AuxInt > 0 {
clear := s.Prog(s390x.ACLEAR)
@ -764,7 +764,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
@ -773,7 +773,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XLAN, ssa.OpS390XLAO:
// LA(N|O) Ry, TMP, 0(Rx)
op := s.Prog(v.Op.Asm())
@ -808,7 +808,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64:
// Convert the flags output of CS{,G} into a bool.
// CS{,G} arg1, arg2, arg0
@ -824,7 +824,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
cs.Reg = v.Args[2].Reg() // new
cs.To.Type = obj.TYPE_MEM
cs.To.Reg = v.Args[0].Reg()
gc.AddAux(&cs.To, v)
ssagen.AddAux(&cs.To, v)
// MOVD $0, ret
movd := s.Prog(s390x.AMOVD)
@ -846,7 +846,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
// NOP (so the BNE has somewhere to land)
nop := s.Prog(obj.ANOP)
gc.Patch(bne, nop)
bne.To.SetTarget(nop)
case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
// Loop until the CS{,G} succeeds.
// MOV{WZ,D} arg0, ret
@ -859,7 +859,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
load.From.Reg = v.Args[0].Reg()
load.To.Type = obj.TYPE_REG
load.To.Reg = v.Reg0()
gc.AddAux(&load.From, v)
ssagen.AddAux(&load.From, v)
// CS{,G} ret, arg1, arg0
cs := s.Prog(v.Op.Asm())
@ -868,12 +868,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
cs.Reg = v.Args[1].Reg() // new
cs.To.Type = obj.TYPE_MEM
cs.To.Reg = v.Args[0].Reg()
gc.AddAux(&cs.To, v)
ssagen.AddAux(&cs.To, v)
// BNE cs
bne := s.Prog(s390x.ABNE)
bne.To.Type = obj.TYPE_BRANCH
gc.Patch(bne, cs)
bne.To.SetTarget(cs)
case ssa.OpS390XSYNC:
s.Prog(s390x.ASYNC)
case ssa.OpClobber:
@ -908,14 +908,14 @@ func blockAsm(b *ssa.Block) obj.As {
panic("unreachable")
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
// Handle generic blocks first.
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
return
case ssa.BlockDefer:

View file

@ -137,7 +137,6 @@ func init() {
// Initialize just enough of the universe and the types package to make our tests function.
// TODO(josharian): move universe initialization to the types package,
// so this test setup can share it.
types.Dowidth = func(t *types.Type) {}
for _, typ := range [...]struct {
width int64

View file

@ -5,6 +5,7 @@
package ssa
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
@ -495,3 +496,13 @@ func (v *Value) removeable() bool {
// TODO(mdempsky): Shouldn't be necessary; see discussion at golang.org/cl/275756
func (*Value) CanBeAnSSAAux() {}
// AutoVar returns a *Name and int64 representing the auto variable and offset within it
// where v should be spilled.
func AutoVar(v *Value) (*ir.Name, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(LocalSlot)
if v.Type.Size() > loc.Type.Size() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
return loc.N, loc.Off
}

View file

@ -0,0 +1,367 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run mkbuiltin.go
package ssagen
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/escape"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
)
// useNewABIWrapGen returns TRUE if the compiler should generate an
// ABI wrapper for the function 'f'.
func useABIWrapGen(f *ir.Func) bool {
if !base.Flag.ABIWrap {
return false
}
// Support limit option for bisecting.
if base.Flag.ABIWrapLimit == 1 {
return false
}
if base.Flag.ABIWrapLimit < 1 {
return true
}
base.Flag.ABIWrapLimit--
if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 {
fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n",
f.LSym.Name)
}
return true
}
// symabiDefs and symabiRefs record the defined and referenced ABIs of
// symbols required by non-Go code. These are keyed by link symbol
// name, where the local package prefix is always `"".`
var symabiDefs, symabiRefs map[string]obj.ABI
func CgoSymABIs() {
// The linker expects an ABI0 wrapper for all cgo-exported
// functions.
for _, prag := range typecheck.Target.CgoPragmas {
switch prag[0] {
case "cgo_export_static", "cgo_export_dynamic":
if symabiRefs == nil {
symabiRefs = make(map[string]obj.ABI)
}
symabiRefs[prag[1]] = obj.ABI0
}
}
}
// ReadSymABIs reads a symabis file that specifies definitions and
// references of text symbols by ABI.
//
// The symabis format is a set of lines, where each line is a sequence
// of whitespace-separated fields. The first field is a verb and is
// either "def" for defining a symbol ABI or "ref" for referencing a
// symbol using an ABI. For both "def" and "ref", the second field is
// the symbol name and the third field is the ABI name, as one of the
// named cmd/internal/obj.ABI constants.
func ReadSymABIs(file, myimportpath string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-symabis: %v", err)
}
symabiDefs = make(map[string]obj.ABI)
symabiRefs = make(map[string]obj.ABI)
localPrefix := ""
if myimportpath != "" {
// Symbols in this package may be written either as
// "".X or with the package's import path already in
// the symbol.
localPrefix = objabi.PathToPrefix(myimportpath) + "."
}
for lineNum, line := range strings.Split(string(data), "\n") {
lineNum++ // 1-based
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
parts := strings.Fields(line)
switch parts[0] {
case "def", "ref":
// Parse line.
if len(parts) != 3 {
log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
}
sym, abistr := parts[1], parts[2]
abi, valid := obj.ParseABI(abistr)
if !valid {
log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
}
// If the symbol is already prefixed with
// myimportpath, rewrite it to start with ""
// so it matches the compiler's internal
// symbol names.
if localPrefix != "" && strings.HasPrefix(sym, localPrefix) {
sym = `"".` + sym[len(localPrefix):]
}
// Record for later.
if parts[0] == "def" {
symabiDefs[sym] = abi
} else {
symabiRefs[sym] = abi
}
default:
log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
}
}
}
// InitLSym defines f's obj.LSym and initializes it based on the
// properties of f. This includes setting the symbol flags and ABI and
// creating and initializing related DWARF symbols.
//
// InitLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
// For body-less functions, we only create the LSym; for functions
// with bodies call a helper to setup up / populate the LSym.
func InitLSym(f *ir.Func, hasBody bool) {
// FIXME: for new-style ABI wrappers, we set up the lsym at the
// point the wrapper is created.
if f.LSym != nil && base.Flag.ABIWrap {
return
}
selectLSym(f, hasBody)
if hasBody {
setupTextLSym(f, 0)
}
}
// selectLSym sets up the LSym for a given function, and
// makes calls to helpers to create ABI wrappers if needed.
func selectLSym(f *ir.Func, hasBody bool) {
if f.LSym != nil {
base.Fatalf("Func.initLSym called twice")
}
if nam := f.Nname; !ir.IsBlank(nam) {
var wrapperABI obj.ABI
needABIWrapper := false
defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
f.LSym = nam.Sym().LinksymABI0()
needABIWrapper, wrapperABI = true, obj.ABIInternal
} else {
f.LSym = nam.Sym().Linksym()
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
if f.LSym.ABI() != want {
base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
}
}
if f.Pragma&ir.Systemstack != 0 {
f.LSym.Set(obj.AttrCFunc, true)
}
isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
// given a linkname, indicating that it may be
// referenced from another package. Create an
// ABI0 -> Internal wrapper so it can be
// called as ABI0. In case 2, it's important
// that we know it's defined in this package
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
if f.LSym.ABI() != obj.ABI0 {
needABIWrapper, wrapperABI = true, obj.ABI0
}
}
if needABIWrapper {
if !useABIWrapGen(f) {
// Fallback: use alias instead. FIXME.
// These LSyms have the same name as the
// native function, so we create them directly
// rather than looking them up. The uniqueness
// of f.lsym ensures uniqueness of asym.
asym := &obj.LSym{
Name: f.LSym.Name,
Type: objabi.SABIALIAS,
R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
}
asym.SetABI(wrapperABI)
asym.Set(obj.AttrDuplicateOK, true)
base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
} else {
if base.Debug.ABIWrap != 0 {
fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n",
wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name)
}
makeABIWrapper(f, wrapperABI)
}
}
}
}
// makeABIWrapper creates a new function that wraps a cross-ABI call
// to "f". The wrapper is marked as an ABIWRAPPER.
func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
// Q: is this needed?
savepos := base.Pos
savedclcontext := typecheck.DeclContext
savedcurfn := ir.CurFunc
base.Pos = base.AutogeneratedPos
typecheck.DeclContext = ir.PEXTERN
// At the moment we don't support wrapping a method, we'd need machinery
// below to handle the receiver. Panic if we see this scenario.
ft := f.Nname.Ntype.Type()
if ft.NumRecvs() != 0 {
panic("makeABIWrapper support for wrapping methods not implemented")
}
// Manufacture a new func type to use for the wrapper.
var noReceiver *ir.Field
tfn := ir.NewFuncType(base.Pos,
noReceiver,
typecheck.NewFuncParams(ft.Params(), true),
typecheck.NewFuncParams(ft.Results(), false))
// Reuse f's types.Sym to create a new ODCLFUNC/function.
fn := typecheck.DeclFunc(f.Nname.Sym(), tfn)
fn.SetDupok(true)
fn.SetWrapper(true) // ignore frame for panic+recover matching
// Select LSYM now.
asym := base.Ctxt.LookupABI(f.LSym.Name, wrapperABI)
asym.Type = objabi.STEXT
if fn.LSym != nil {
panic("unexpected")
}
fn.LSym = asym
// ABI0-to-ABIInternal wrappers will be mainly loading params from
// stack into registers (and/or storing stack locations back to
// registers after the wrapped call); in most cases they won't
// need to allocate stack space, so it should be OK to mark them
// as NOSPLIT in these cases. In addition, my assumption is that
// functions written in assembly are NOSPLIT in most (but not all)
// cases. In the case of an ABIInternal target that has too many
// parameters to fit into registers, the wrapper would need to
// allocate stack space, but this seems like an unlikely scenario.
// Hence: mark these wrappers NOSPLIT.
//
// ABIInternal-to-ABI0 wrappers on the other hand will be taking
// things in registers and pushing them onto the stack prior to
// the ABI0 call, meaning that they will always need to allocate
// stack space. If the compiler marks them as NOSPLIT this seems
// as though it could lead to situations where the the linker's
// nosplit-overflow analysis would trigger a link failure. On the
// other hand if they not tagged NOSPLIT then this could cause
// problems when building the runtime (since there may be calls to
// asm routine in cases where it's not safe to grow the stack). In
// most cases the wrapper would be (in effect) inlined, but are
// there (perhaps) indirect calls from the runtime that could run
// into trouble here.
// FIXME: at the moment all.bash does not pass when I leave out
// NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT.
setupTextLSym(fn, obj.NOSPLIT|obj.ABIWRAPPER)
// Generate call. Use tail call if no params and no returns,
// but a regular call otherwise.
//
// Note: ideally we would be using a tail call in cases where
// there are params but no returns for ABI0->ABIInternal wrappers,
// provided that all params fit into registers (e.g. we don't have
// to allocate any stack space). Doing this will require some
// extra work in typecheck/walk/ssa, might want to add a new node
// OTAILCALL or something to this effect.
var tail ir.Node
if tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0 {
tail = ir.NewBranchStmt(base.Pos, ir.ORETJMP, f.Nname.Sym())
} else {
call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
call.Args.Set(ir.ParamNames(tfn.Type()))
call.IsDDD = tfn.Type().IsVariadic()
tail = call
if tfn.Type().NumResults() > 0 {
n := ir.NewReturnStmt(base.Pos, nil)
n.Results = []ir.Node{call}
tail = n
}
}
fn.Body.Append(tail)
typecheck.FinishFuncBody()
if base.Debug.DclStack != 0 {
types.CheckDclstack()
}
typecheck.Func(fn)
ir.CurFunc = fn
typecheck.Stmts(fn.Body)
escape.Batch([]*ir.Func{fn}, false)
typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
// Restore previous context.
base.Pos = savepos
typecheck.DeclContext = savedclcontext
ir.CurFunc = savedcurfn
}
// setupTextLsym initializes the LSym for a with-body text symbol.
func setupTextLSym(f *ir.Func, flag int) {
if f.Dupok() {
flag |= obj.DUPOK
}
if f.Wrapper() {
flag |= obj.WRAPPER
}
if f.Needctxt() {
flag |= obj.NEEDCTXT
}
if f.Pragma&ir.Nosplit != 0 {
flag |= obj.NOSPLIT
}
if f.ReflectMethod() {
flag |= obj.REFLECTMETHOD
}
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
if base.Ctxt.Pkgpath == "reflect" {
switch f.Sym().Name {
case "callReflect", "callMethod":
flag |= obj.WRAPPER
}
}
base.Ctxt.InitTextSym(f.LSym, flag)
}

View file

@ -0,0 +1,42 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssagen
import (
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/internal/obj"
)
var Arch ArchInfo
// interface to back end
type ArchInfo struct {
LinkArch *obj.LinkArch
REGSP int
MAXWIDTH int64
SoftFloat bool
PadFrame func(int64) int64
// ZeroRange zeroes a range of memory on stack. It is only inserted
// at function entry, and it is ok to clobber registers.
ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
Ginsnop func(*objw.Progs) *obj.Prog
Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*State, *ssa.Block)
// SSAGenValue emits Prog(s) for the Value.
SSAGenValue func(*State, *ssa.Value)
// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *State, b, next *ssa.Block)
}

View file

@ -0,0 +1,200 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssagen
import (
"bytes"
"fmt"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
)
func EnableNoWriteBarrierRecCheck() {
nowritebarrierrecCheck = newNowritebarrierrecChecker()
}
func NoWriteBarrierRecCheck() {
// Write barriers are now known. Check the
// call graph.
nowritebarrierrecCheck.check()
nowritebarrierrecCheck = nil
}
var nowritebarrierrecCheck *nowritebarrierrecChecker
type nowritebarrierrecChecker struct {
// extraCalls contains extra function calls that may not be
// visible during later analysis. It maps from the ODCLFUNC of
// the caller to a list of callees.
extraCalls map[*ir.Func][]nowritebarrierrecCall
// curfn is the current function during AST walks.
curfn *ir.Func
}
type nowritebarrierrecCall struct {
target *ir.Func // caller or callee
lineno src.XPos // line of call
}
// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
// must be called before transformclosure and walk.
func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
c := &nowritebarrierrecChecker{
extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
}
// Find all systemstack calls and record their targets. In
// general, flow analysis can't see into systemstack, but it's
// important to handle it for this check, so we model it
// directly. This has to happen before transformclosure since
// it's a lot harder to work out the argument after.
for _, n := range typecheck.Target.Decls {
if n.Op() != ir.ODCLFUNC {
continue
}
c.curfn = n.(*ir.Func)
ir.Visit(n, c.findExtraCalls)
}
c.curfn = nil
return c
}
func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
if nn.Op() != ir.OCALLFUNC {
return
}
n := nn.(*ir.CallExpr)
if n.X == nil || n.X.Op() != ir.ONAME {
return
}
fn := n.X.(*ir.Name)
if fn.Class_ != ir.PFUNC || fn.Name().Defn == nil {
return
}
if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
return
}
var callee *ir.Func
arg := n.Args[0]
switch arg.Op() {
case ir.ONAME:
arg := arg.(*ir.Name)
callee = arg.Name().Defn.(*ir.Func)
case ir.OCLOSURE:
arg := arg.(*ir.ClosureExpr)
callee = arg.Func
default:
base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
}
if callee.Op() != ir.ODCLFUNC {
base.Fatalf("expected ODCLFUNC node, got %+v", callee)
}
c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
}
// recordCall records a call from ODCLFUNC node "from", to function
// symbol "to" at position pos.
//
// This should be done as late as possible during compilation to
// capture precise call graphs. The target of the call is an LSym
// because that's all we know after we start SSA.
//
// This can be called concurrently for different from Nodes.
func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) {
// We record this information on the *Func so this is concurrent-safe.
if fn.NWBRCalls == nil {
fn.NWBRCalls = new([]ir.SymAndPos)
}
*fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos})
}
func (c *nowritebarrierrecChecker) check() {
// We walk the call graph as late as possible so we can
// capture all calls created by lowering, but this means we
// only get to see the obj.LSyms of calls. symToFunc lets us
// get back to the ODCLFUNCs.
symToFunc := make(map[*obj.LSym]*ir.Func)
// funcs records the back-edges of the BFS call graph walk. It
// maps from the ODCLFUNC of each function that must not have
// write barriers to the call that inhibits them. Functions
// that are directly marked go:nowritebarrierrec are in this
// map with a zero-valued nowritebarrierrecCall. This also
// acts as the set of marks for the BFS of the call graph.
funcs := make(map[*ir.Func]nowritebarrierrecCall)
// q is the queue of ODCLFUNC Nodes to visit in BFS order.
var q ir.NameQueue
for _, n := range typecheck.Target.Decls {
if n.Op() != ir.ODCLFUNC {
continue
}
fn := n.(*ir.Func)
symToFunc[fn.LSym] = fn
// Make nowritebarrierrec functions BFS roots.
if fn.Pragma&ir.Nowritebarrierrec != 0 {
funcs[fn] = nowritebarrierrecCall{}
q.PushRight(fn.Nname)
}
// Check go:nowritebarrier functions.
if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() {
base.ErrorfAt(fn.WBPos, "write barrier prohibited")
}
}
// Perform a BFS of the call graph from all
// go:nowritebarrierrec functions.
enqueue := func(src, target *ir.Func, pos src.XPos) {
if target.Pragma&ir.Yeswritebarrierrec != 0 {
// Don't flow into this function.
return
}
if _, ok := funcs[target]; ok {
// Already found a path to target.
return
}
// Record the path.
funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
q.PushRight(target.Nname)
}
for !q.Empty() {
fn := q.PopLeft().Func
// Check fn.
if fn.WBPos.IsKnown() {
var err bytes.Buffer
call := funcs[fn]
for call.target != nil {
fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
call = funcs[call.target]
}
base.ErrorfAt(fn.WBPos, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
continue
}
// Enqueue fn's calls.
for _, callee := range c.extraCalls[fn] {
enqueue(fn, callee.target, callee.lineno)
}
if fn.NWBRCalls == nil {
continue
}
for _, callee := range *fn.NWBRCalls {
target := symToFunc[callee.Sym]
if target != nil {
enqueue(fn, target, callee.Pos)
}
}
}
}

View file

@ -0,0 +1,279 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssagen
import (
"internal/race"
"math/rand"
"sort"
"sync"
"time"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
// cmpstackvarlt reports whether the stack variable a sorts before b.
//
// Sort the list of stack variables. Autos after anything else,
// within autos, unused after used, within used, things with
// pointers first, zeroed things first, and then decreasing size.
// Because autos are laid out in decreasing addresses
// on the stack, pointers first, zeroed things first and decreasing size
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
func cmpstackvarlt(a, b *ir.Name) bool {
if (a.Class_ == ir.PAUTO) != (b.Class_ == ir.PAUTO) {
return b.Class_ == ir.PAUTO
}
if a.Class_ != ir.PAUTO {
return a.FrameOffset() < b.FrameOffset()
}
if a.Used() != b.Used() {
return a.Used()
}
ap := a.Type().HasPointers()
bp := b.Type().HasPointers()
if ap != bp {
return ap
}
ap = a.Needzero()
bp = b.Needzero()
if ap != bp {
return ap
}
if a.Type().Width != b.Type().Width {
return a.Type().Width > b.Type().Width
}
return a.Sym().Name < b.Sym().Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
type byStackVar []*ir.Name
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
fn := s.curfn
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
if ln.Class_ == ir.PAUTO {
ln.SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
ls.N.Name().SetUsed(true)
}
}
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
if n, ok := v.Aux.(*ir.Name); ok {
switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != ir.RegFP {
n.Name().SetUsed(true)
}
case ir.PAUTO:
n.Name().SetUsed(true)
}
}
if !scratchUsed {
scratchUsed = v.Op.UsesScratch()
}
}
}
if f.Config.NeedsFpScratch && scratchUsed {
s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
if n.Op() != ir.ONAME || n.Class_ != ir.PAUTO {
continue
}
if !n.Used() {
fn.Dcl = fn.Dcl[:i]
break
}
types.CalcSize(n.Type())
w := n.Type().Width
if w >= types.MaxWidth || w < 0 {
base.Fatalf("bad width")
}
if w == 0 && lastHasPtr {
// Pad between a pointer-containing object and a zero-sized object.
// This prevents a pointer to the zero-sized object from being interpreted
// as a pointer to the pointer-containing object (and causing it
// to be scanned when it shouldn't be). See issue 24993.
w = 1
}
s.stksize += w
s.stksize = types.Rnd(s.stksize, int64(n.Type().Align))
if n.Type().HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
lastHasPtr = false
}
if Arch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = types.Rnd(s.stksize, int64(types.PtrSize))
}
n.SetFrameOffset(-s.stksize)
}
s.stksize = types.Rnd(s.stksize, int64(types.RegSize))
s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize))
}
const maxStackSize = 1 << 30
// Compile builds an SSA backend function,
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
func Compile(fn *ir.Func, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp := objw.NewProgs(fn, worker)
defer pp.Free()
genssa(f, pp)
// Check frame size again.
// The check above included only the space needed for local variables.
// After genssa, the space needed includes local variables and the callee arg region.
// We must do this check prior to calling pp.Flush.
// If there are any oversized stack frames,
// the assembler may emit inscrutable complaints about invalid instructions.
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
}
func init() {
if race.Enabled {
rand.Seed(time.Now().UnixNano())
}
}
// StackOffset returns the stack location of a LocalSlot relative to the
// stack pointer, suitable for use in a DWARF location entry. This has nothing
// to do with its offset in the user variable.
func StackOffset(slot ssa.LocalSlot) int32 {
n := slot.N
var off int64
switch n.Class_ {
case ir.PAUTO:
off = n.FrameOffset()
if base.Ctxt.FixedFrameSize() == 0 {
off -= int64(types.PtrSize)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
off -= int64(types.PtrSize)
}
case ir.PPARAM, ir.PPARAMOUT:
off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
}
return int32(off + slot.Off)
}
// fieldtrack adds R_USEFIELD relocations to fnsym to record any
// struct fields that it used.
func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
if fnsym == nil {
return
}
if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
return
}
trackSyms := make([]*types.Sym, 0, len(tracked))
for sym := range tracked {
trackSyms = append(trackSyms, sym)
}
sort.Sort(symByName(trackSyms))
for _, sym := range trackSyms {
r := obj.Addrel(fnsym)
r.Sym = sym.Linksym()
r.Type = objabi.R_USEFIELD
}
}
type symByName []*types.Sym
func (a symByName) Len() int { return len(a) }
func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// largeStack is info about a function whose stack frame is too large (rare).
type largeStack struct {
locals int64
args int64
callee int64
pos src.XPos
}
var (
largeStackFramesMu sync.Mutex // protects largeStackFrames
largeStackFrames []largeStack
)
func CheckLargeStacks() {
// Check whether any of the functions we have compiled have gigantic stack frames.
sort.Slice(largeStackFrames, func(i, j int) bool {
return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
})
for _, large := range largeStackFrames {
if large.callee != 0 {
base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
} else {
base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
}
}
}

View file

@ -2,15 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ssagen
import (
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
"cmd/internal/src"
"reflect"
"sort"
"testing"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
)
func typeWithoutPointers() *types.Type {
@ -41,10 +43,10 @@ func TestCmpstackvar(t *testing.T) {
if s == nil {
s = &types.Sym{Name: "."}
}
n := NewName(s)
n := typecheck.NewName(s)
n.SetType(t)
n.SetFrameOffset(xoffset)
n.SetClass(cl)
n.Class_ = cl
return n
}
testdata := []struct {
@ -156,10 +158,10 @@ func TestCmpstackvar(t *testing.T) {
func TestStackvarSort(t *testing.T) {
nod := func(xoffset int64, t *types.Type, s *types.Sym, cl ir.Class) *ir.Name {
n := NewName(s)
n := typecheck.NewName(s)
n.SetType(t)
n.SetFrameOffset(xoffset)
n.SetClass(cl)
n.Class_ = cl
return n
}
inp := []*ir.Name{

View file

@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ssagen
import (
"container/heap"
"fmt"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/src"
"container/heap"
"fmt"
)
// This file contains the algorithm to place phi nodes in a function.
@ -23,13 +24,13 @@ const smallBlocks = 500
const debugPhi = false
// FwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
type FwdRefAux struct {
// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
type fwdRefAux struct {
_ [0]func() // ensure ir.Node isn't compared for equality
N ir.Node
}
func (FwdRefAux) CanBeAnSSAAux() {}
func (fwdRefAux) CanBeAnSSAAux() {}
// insertPhis finds all the places in the function where a phi is
// necessary and inserts them.
@ -87,7 +88,7 @@ func (s *phiState) insertPhis() {
if v.Op != ssa.OpFwdRef {
continue
}
var_ := v.Aux.(FwdRefAux).N
var_ := v.Aux.(fwdRefAux).N
// Optimization: look back 1 block for the definition.
if len(b.Preds) == 1 {
@ -334,7 +335,7 @@ func (s *phiState) resolveFwdRefs() {
if v.Op != ssa.OpFwdRef {
continue
}
n := s.varnum[v.Aux.(FwdRefAux).N]
n := s.varnum[v.Aux.(fwdRefAux).N]
v.Op = ssa.OpCopy
v.Aux = nil
v.AddArg(values[n])
@ -465,7 +466,7 @@ func (s *simplePhiState) insertPhis() {
continue
}
s.fwdrefs = append(s.fwdrefs, v)
var_ := v.Aux.(FwdRefAux).N
var_ := v.Aux.(fwdRefAux).N
if _, ok := s.defvars[b.ID][var_]; !ok {
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
}
@ -479,7 +480,7 @@ loop:
v := s.fwdrefs[len(s.fwdrefs)-1]
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
b := v.Block
var_ := v.Aux.(FwdRefAux).N
var_ := v.Aux.(fwdRefAux).N
if b == s.f.Entry {
// No variable should be live at entry.
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
@ -546,7 +547,7 @@ func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.
}
}
// Generate a FwdRef for the variable and return that.
v := b.NewValue0A(line, ssa.OpFwdRef, t, FwdRefAux{N: var_})
v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_})
s.defvars[b.ID][var_] = v
if var_.Op() == ir.ONAME {
s.s.addNamedValue(var_.(*ir.Name), v)

View file

@ -0,0 +1,353 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package staticdata
import (
"crypto/sha256"
"fmt"
"go/constant"
"io"
"io/ioutil"
"os"
"sort"
"strconv"
"sync"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
)
// InitAddr writes the static address of a to n. a must be an ONAME.
// Neither n nor a is modified.
func InitAddr(n *ir.Name, noff int64, a *ir.Name, aoff int64) {
if n.Op() != ir.ONAME {
base.Fatalf("addrsym n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("addrsym nil n sym")
}
if a.Op() != ir.ONAME {
base.Fatalf("addrsym a op %v", a.Op())
}
s := n.Sym().Linksym()
s.WriteAddr(base.Ctxt, noff, types.PtrSize, a.Sym().Linksym(), aoff)
}
// InitFunc writes the static address of f to n. f must be a global function.
// Neither n nor f is modified.
func InitFunc(n *ir.Name, noff int64, f *ir.Name) {
if n.Op() != ir.ONAME {
base.Fatalf("pfuncsym n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("pfuncsym nil n sym")
}
if f.Class_ != ir.PFUNC {
base.Fatalf("pfuncsym class not PFUNC %d", f.Class_)
}
s := n.Sym().Linksym()
s.WriteAddr(base.Ctxt, noff, types.PtrSize, FuncSym(f.Sym()).Linksym(), 0)
}
// InitSlice writes a static slice symbol {&arr, lencap, lencap} to n+noff.
// InitSlice does not modify n.
func InitSlice(n *ir.Name, noff int64, arr *ir.Name, lencap int64) {
s := n.Sym().Linksym()
if arr.Op() != ir.ONAME {
base.Fatalf("slicesym non-name arr %v", arr)
}
s.WriteAddr(base.Ctxt, noff, types.PtrSize, arr.Sym().Linksym(), 0)
s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap)
s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap)
}
func InitSliceBytes(nam *ir.Name, off int64, s string) {
if nam.Op() != ir.ONAME {
base.Fatalf("slicebytes %v", nam)
}
InitSlice(nam, off, slicedata(nam.Pos(), s), int64(len(s)))
}
const (
stringSymPrefix = "go.string."
stringSymPattern = ".gostring.%d.%x"
)
// StringSym returns a symbol containing the string s.
// The symbol contains the string data, not a string header.
func StringSym(pos src.XPos, s string) (data *obj.LSym) {
var symname string
if len(s) > 100 {
// Huge strings are hashed to avoid long names in object files.
// Indulge in some paranoia by writing the length of s, too,
// as protection against length extension attacks.
// Same pattern is known to fileStringSym below.
h := sha256.New()
io.WriteString(h, s)
symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
} else {
// Small strings get named directly by their contents.
symname = strconv.Quote(s)
}
symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
off := dstringdata(symdata, 0, s, pos, "string")
objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
symdata.Set(obj.AttrContentAddressable, true)
}
return symdata
}
// fileStringSym returns a symbol for the contents and the size of file.
// If readonly is true, the symbol shares storage with any literal string
// or other file with the same content and is placed in a read-only section.
// If readonly is false, the symbol is a read-write copy separate from any other,
// for use as the backing store of a []byte.
// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
// The returned symbol contains the data itself, not a string header.
func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
f, err := os.Open(file)
if err != nil {
return nil, 0, err
}
defer f.Close()
info, err := f.Stat()
if err != nil {
return nil, 0, err
}
if !info.Mode().IsRegular() {
return nil, 0, fmt.Errorf("not a regular file")
}
size := info.Size()
if size <= 1*1024 {
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, 0, err
}
if int64(len(data)) != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
var sym *obj.LSym
if readonly {
sym = StringSym(pos, string(data))
} else {
sym = slicedata(pos, string(data)).Sym().Linksym()
}
if len(hash) > 0 {
sum := sha256.Sum256(data)
copy(hash, sum[:])
}
return sym, size, nil
}
if size > 2e9 {
// ggloblsym takes an int32,
// and probably the rest of the toolchain
// can't handle such big symbols either.
// See golang.org/issue/9862.
return nil, 0, fmt.Errorf("file too large")
}
// File is too big to read and keep in memory.
// Compute hash if needed for read-only content hashing or if the caller wants it.
var sum []byte
if readonly || len(hash) > 0 {
h := sha256.New()
n, err := io.Copy(h, f)
if err != nil {
return nil, 0, err
}
if n != size {
return nil, 0, fmt.Errorf("file changed between reads")
}
sum = h.Sum(nil)
copy(hash, sum)
}
var symdata *obj.LSym
if readonly {
symname := fmt.Sprintf(stringSymPattern, size, sum)
symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
if !symdata.OnList() {
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
// Note: AttrContentAddressable cannot be set here,
// because the content-addressable-handling code
// does not know about file symbols.
}
} else {
// Emit a zero-length data symbol
// and then fix up length and content to use file.
symdata = slicedata(pos, "").Sym().Linksym()
symdata.Size = size
symdata.Type = objabi.SNOPTRDATA
info := symdata.NewFileInfo()
info.Name = file
info.Size = size
}
return symdata, size, nil
}
var slicedataGen int
func slicedata(pos src.XPos, s string) *ir.Name {
slicedataGen++
symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
sym := types.LocalPkg.Lookup(symname)
symnode := typecheck.NewName(sym)
sym.Def = symnode
lsym := sym.Linksym()
off := dstringdata(lsym, 0, s, pos, "slice")
objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
return symnode
}
func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
// Objects that are too large will cause the data section to overflow right away,
// causing a cryptic error message by the linker. Check for oversize objects here
// and provide a useful error message instead.
if int64(len(t)) > 2e9 {
base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
return 0
}
s.WriteString(base.Ctxt, int64(off), len(t), t)
return off + len(t)
}
var (
funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
funcsyms []*types.Sym
)
// FuncSym returns s·f.
func FuncSym(s *types.Sym) *types.Sym {
// funcsymsmu here serves to protect not just mutations of funcsyms (below),
// but also the package lookup of the func sym name,
// since this function gets called concurrently from the backend.
// There are no other concurrent package lookups in the backend,
// except for the types package, which is protected separately.
// Reusing funcsymsmu to also cover this package lookup
// avoids a general, broader, expensive package lookup mutex.
// Note makefuncsym also does package look-up of func sym names,
// but that it is only called serially, from the front end.
funcsymsmu.Lock()
sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s))
// Don't export s·f when compiling for dynamic linking.
// When dynamically linking, the necessary function
// symbols will be created explicitly with makefuncsym.
// See the makefuncsym comment for details.
if !base.Ctxt.Flag_dynlink && !existed {
funcsyms = append(funcsyms, s)
}
funcsymsmu.Unlock()
return sf
}
// NeedFuncSym ensures that s·f is exported.
// It is only used with -dynlink.
// When not compiling for dynamic linking,
// the funcsyms are created as needed by
// the packages that use them.
// Normally we emit the s·f stubs as DUPOK syms,
// but DUPOK doesn't work across shared library boundaries.
// So instead, when dynamic linking, we only create
// the s·f stubs in s's package.
func NeedFuncSym(s *types.Sym) {
if !base.Ctxt.Flag_dynlink {
base.Fatalf("makefuncsym dynlink")
}
if s.IsBlank() {
return
}
if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
// runtime.getg(), getclosureptr(), getcallerpc(), and
// getcallersp() are not real functions and so do not
// get funcsyms.
return
}
if _, existed := s.Pkg.LookupOK(ir.FuncSymName(s)); !existed {
funcsyms = append(funcsyms, s)
}
}
func WriteFuncSyms() {
sort.Slice(funcsyms, func(i, j int) bool {
return funcsyms[i].LinksymName() < funcsyms[j].LinksymName()
})
for _, s := range funcsyms {
sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
objw.SymPtr(sf, 0, s.Linksym(), 0)
objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
}
}
// InitConst writes the static literal c to n.
// Neither n nor c is modified.
func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
if n.Op() != ir.ONAME {
base.Fatalf("litsym n op %v", n.Op())
}
if n.Sym() == nil {
base.Fatalf("litsym nil n sym")
}
if c.Op() == ir.ONIL {
return
}
if c.Op() != ir.OLITERAL {
base.Fatalf("litsym c op %v", c.Op())
}
s := n.Sym().Linksym()
switch u := c.Val(); u.Kind() {
case constant.Bool:
i := int64(obj.Bool2int(constant.BoolVal(u)))
s.WriteInt(base.Ctxt, noff, wid, i)
case constant.Int:
s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
case constant.Float:
f, _ := constant.Float64Val(u)
switch c.Type().Kind() {
case types.TFLOAT32:
s.WriteFloat32(base.Ctxt, noff, float32(f))
case types.TFLOAT64:
s.WriteFloat64(base.Ctxt, noff, f)
}
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
switch c.Type().Kind() {
case types.TCOMPLEX64:
s.WriteFloat32(base.Ctxt, noff, float32(re))
s.WriteFloat32(base.Ctxt, noff+4, float32(im))
case types.TCOMPLEX128:
s.WriteFloat64(base.Ctxt, noff, re)
s.WriteFloat64(base.Ctxt, noff+8, im)
}
case constant.String:
i := constant.StringVal(u)
symdata := StringSym(n.Pos(), i)
s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0)
s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i)))
default:
base.Fatalf("litsym unhandled OLITERAL %v", c)
}
}

View file

@ -2,19 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package staticdata
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/syntax"
"cmd/compile/internal/types"
"cmd/internal/obj"
"path"
"sort"
"strconv"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
const (
@ -24,57 +23,6 @@ const (
embedFiles
)
func varEmbed(p *noder, names []*ir.Name, typ ir.Ntype, exprs []ir.Node, embeds []PragmaEmbed) (newExprs []ir.Node) {
haveEmbed := false
for _, decl := range p.file.DeclList {
imp, ok := decl.(*syntax.ImportDecl)
if !ok {
// imports always come first
break
}
path, _ := strconv.Unquote(imp.Path.Value)
if path == "embed" {
haveEmbed = true
break
}
}
pos := embeds[0].Pos
if !haveEmbed {
p.errorAt(pos, "invalid go:embed: missing import \"embed\"")
return exprs
}
if base.Flag.Cfg.Embed.Patterns == nil {
p.errorAt(pos, "invalid go:embed: build system did not supply embed configuration")
return exprs
}
if len(names) > 1 {
p.errorAt(pos, "go:embed cannot apply to multiple vars")
return exprs
}
if len(exprs) > 0 {
p.errorAt(pos, "go:embed cannot apply to var with initializer")
return exprs
}
if typ == nil {
// Should not happen, since len(exprs) == 0 now.
p.errorAt(pos, "go:embed cannot apply to var without type")
return exprs
}
if dclcontext != ir.PEXTERN {
p.errorAt(pos, "go:embed cannot apply to var inside func")
return exprs
}
v := names[0]
Target.Embeds = append(Target.Embeds, v)
v.Embed = new([]ir.Embed)
for _, e := range embeds {
*v.Embed = append(*v.Embed, ir.Embed{Pos: p.makeXPos(e.Pos), Patterns: e.Patterns})
}
return exprs
}
func embedFileList(v *ir.Name) []string {
kind := embedKind(v.Type())
if kind == embedUnknown {
@ -183,15 +131,9 @@ func embedFileLess(x, y string) bool {
return xdir < ydir || xdir == ydir && xelem < yelem
}
func dumpembeds() {
for _, v := range Target.Embeds {
initEmbed(v)
}
}
// initEmbed emits the init data for a //go:embed variable,
// WriteEmbed emits the init data for a //go:embed variable,
// which is either a string, a []byte, or an embed.FS.
func initEmbed(v *ir.Name) {
func WriteEmbed(v *ir.Name) {
files := embedFileList(v)
switch kind := embedKind(v.Type()); kind {
case embedUnknown:
@ -205,19 +147,19 @@ func initEmbed(v *ir.Name) {
}
sym := v.Sym().Linksym()
off := 0
off = dsymptr(sym, off, fsym, 0) // data string
off = duintptr(sym, off, uint64(size)) // len
off = objw.SymPtr(sym, off, fsym, 0) // data string
off = objw.Uintptr(sym, off, uint64(size)) // len
if kind == embedBytes {
duintptr(sym, off, uint64(size)) // cap for slice
objw.Uintptr(sym, off, uint64(size)) // cap for slice
}
case embedFiles:
slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
off := 0
// []files pointed at by Files
off = dsymptr(slicedata, off, slicedata, 3*Widthptr) // []file, pointing just past slice
off = duintptr(slicedata, off, uint64(len(files)))
off = duintptr(slicedata, off, uint64(len(files)))
off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice
off = objw.Uintptr(slicedata, off, uint64(len(files)))
off = objw.Uintptr(slicedata, off, uint64(len(files)))
// embed/embed.go type file is:
// name string
@ -227,25 +169,25 @@ func initEmbed(v *ir.Name) {
const hashSize = 16
hash := make([]byte, hashSize)
for _, file := range files {
off = dsymptr(slicedata, off, stringsym(v.Pos(), file), 0) // file string
off = duintptr(slicedata, off, uint64(len(file)))
off = objw.SymPtr(slicedata, off, StringSym(v.Pos(), file), 0) // file string
off = objw.Uintptr(slicedata, off, uint64(len(file)))
if strings.HasSuffix(file, "/") {
// entry for directory - no data
off = duintptr(slicedata, off, 0)
off = duintptr(slicedata, off, 0)
off = objw.Uintptr(slicedata, off, 0)
off = objw.Uintptr(slicedata, off, 0)
off += hashSize
} else {
fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
if err != nil {
base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
}
off = dsymptr(slicedata, off, fsym, 0) // data string
off = duintptr(slicedata, off, uint64(size))
off = objw.SymPtr(slicedata, off, fsym, 0) // data string
off = objw.Uintptr(slicedata, off, uint64(size))
off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
}
}
ggloblsym(slicedata, int32(off), obj.RODATA|obj.LOCAL)
objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL)
sym := v.Sym().Linksym()
dsymptr(sym, 0, slicedata, 0)
objw.SymPtr(sym, 0, slicedata, 0)
}
}

View file

@ -0,0 +1,596 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package staticinit
import (
"fmt"
"go/constant"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
)
type Entry struct {
Xoffset int64 // struct, array only
Expr ir.Node // bytes of run-time computed expressions
}
type Plan struct {
E []Entry
}
// An Schedule is used to decompose assignment statements into
// static and dynamic initialization parts. Static initializations are
// handled by populating variables' linker symbol data, while dynamic
// initializations are accumulated to be executed in order.
type Schedule struct {
// Out is the ordered list of dynamic initialization
// statements.
Out []ir.Node
Plans map[ir.Node]*Plan
Temps map[ir.Node]*ir.Name
}
func (s *Schedule) append(n ir.Node) {
s.Out = append(s.Out, n)
}
// StaticInit adds an initialization statement n to the schedule.
func (s *Schedule) StaticInit(n ir.Node) {
if !s.tryStaticInit(n) {
if base.Flag.Percent != 0 {
ir.Dump("nonstatic", n)
}
s.append(n)
}
}
// tryStaticInit attempts to statically execute an initialization
// statement and reports whether it succeeded.
func (s *Schedule) tryStaticInit(nn ir.Node) bool {
// Only worry about simple "l = r" assignments. Multiple
// variable/expression OAS2 assignments have already been
// replaced by multiple simple OAS assignments, and the other
// OAS2* assignments mostly necessitate dynamic execution
// anyway.
if nn.Op() != ir.OAS {
return false
}
n := nn.(*ir.AssignStmt)
if ir.IsBlank(n.X) && !AnySideEffects(n.Y) {
// Discard.
return true
}
lno := ir.SetPos(n)
defer func() { base.Pos = lno }()
nam := n.X.(*ir.Name)
return s.StaticAssign(nam, 0, n.Y, nam.Type())
}
// like staticassign but we are copying an already
// initialized value r.
func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
if rn.Class_ == ir.PFUNC {
// TODO if roff != 0 { panic }
staticdata.InitFunc(l, loff, rn)
return true
}
if rn.Class_ != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
return false
}
if rn.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
return false
}
if rn.Defn.Op() != ir.OAS {
return false
}
if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
return false
}
orig := rn
r := rn.Defn.(*ir.AssignStmt).Y
for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
r = r.(*ir.ConvExpr).X
}
switch r.Op() {
case ir.OMETHEXPR:
r = r.(*ir.MethodExpr).FuncName()
fallthrough
case ir.ONAME:
r := r.(*ir.Name)
if s.staticcopy(l, loff, r, typ) {
return true
}
// We may have skipped past one or more OCONVNOPs, so
// use conv to ensure r is assignable to l (#13263).
dst := ir.Node(l)
if loff != 0 || !types.Identical(typ, l.Type()) {
dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
}
s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ)))
return true
case ir.ONIL:
return true
case ir.OLITERAL:
if ir.IsZero(r) {
return true
}
staticdata.InitConst(l, loff, r, int(typ.Width))
return true
case ir.OADDR:
r := r.(*ir.AddrExpr)
if a := r.X; a.Op() == ir.ONAME {
a := a.(*ir.Name)
staticdata.InitAddr(l, loff, a, 0)
return true
}
case ir.OPTRLIT:
r := r.(*ir.AddrExpr)
switch r.X.Op() {
case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
// copy pointer
staticdata.InitAddr(l, loff, s.Temps[r], 0)
return true
}
case ir.OSLICELIT:
r := r.(*ir.CompLitExpr)
// copy slice
staticdata.InitSlice(l, loff, s.Temps[r], r.Len)
return true
case ir.OARRAYLIT, ir.OSTRUCTLIT:
r := r.(*ir.CompLitExpr)
p := s.Plans[r]
for i := range p.E {
e := &p.E[i]
typ := e.Expr.Type()
if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(typ.Width))
continue
}
x := e.Expr
if x.Op() == ir.OMETHEXPR {
x = x.(*ir.MethodExpr).FuncName()
}
if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
continue
}
// Requires computation, but we're
// copying someone else's computation.
ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
ir.SetPos(rr)
s.append(ir.NewAssignStmt(base.Pos, ll, rr))
}
return true
}
return false
}
func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
for r.Op() == ir.OCONVNOP {
r = r.(*ir.ConvExpr).X
}
switch r.Op() {
case ir.ONAME:
r := r.(*ir.Name)
return s.staticcopy(l, loff, r, typ)
case ir.OMETHEXPR:
r := r.(*ir.MethodExpr)
return s.staticcopy(l, loff, r.FuncName(), typ)
case ir.ONIL:
return true
case ir.OLITERAL:
if ir.IsZero(r) {
return true
}
staticdata.InitConst(l, loff, r, int(typ.Width))
return true
case ir.OADDR:
r := r.(*ir.AddrExpr)
if name, offset, ok := StaticLoc(r.X); ok {
staticdata.InitAddr(l, loff, name, offset)
return true
}
fallthrough
case ir.OPTRLIT:
r := r.(*ir.AddrExpr)
switch r.X.Op() {
case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
// Init pointer.
a := StaticName(r.X.Type())
s.Temps[r] = a
staticdata.InitAddr(l, loff, a, 0)
// Init underlying literal.
if !s.StaticAssign(a, 0, r.X, a.Type()) {
s.append(ir.NewAssignStmt(base.Pos, a, r.X))
}
return true
}
//dump("not static ptrlit", r);
case ir.OSTR2BYTES:
r := r.(*ir.ConvExpr)
if l.Class_ == ir.PEXTERN && r.X.Op() == ir.OLITERAL {
sval := ir.StringVal(r.X)
staticdata.InitSliceBytes(l, loff, sval)
return true
}
case ir.OSLICELIT:
r := r.(*ir.CompLitExpr)
s.initplan(r)
// Init slice.
ta := types.NewArray(r.Type().Elem(), r.Len)
ta.SetNoalg(true)
a := StaticName(ta)
s.Temps[r] = a
staticdata.InitSlice(l, loff, a, r.Len)
// Fall through to init underlying array.
l = a
loff = 0
fallthrough
case ir.OARRAYLIT, ir.OSTRUCTLIT:
r := r.(*ir.CompLitExpr)
s.initplan(r)
p := s.Plans[r]
for i := range p.E {
e := &p.E[i]
if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Width))
continue
}
ir.SetPos(e.Expr)
if !s.StaticAssign(l, loff+e.Xoffset, e.Expr, e.Expr.Type()) {
a := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, e.Expr.Type())
s.append(ir.NewAssignStmt(base.Pos, a, e.Expr))
}
}
return true
case ir.OMAPLIT:
break
case ir.OCLOSURE:
r := r.(*ir.ClosureExpr)
if ir.IsTrivialClosure(r) {
if base.Debug.Closure > 0 {
base.WarnfAt(r.Pos(), "closure converted to global")
}
// Closures with no captured variables are globals,
// so the assignment can be done at link time.
// TODO if roff != 0 { panic }
staticdata.InitFunc(l, loff, r.Func.Nname)
return true
}
ir.ClosureDebugRuntimeCheck(r)
case ir.OCONVIFACE:
// This logic is mirrored in isStaticCompositeLiteral.
// If you change something here, change it there, and vice versa.
// Determine the underlying concrete type and value we are converting from.
r := r.(*ir.ConvExpr)
val := ir.Node(r)
for val.Op() == ir.OCONVIFACE {
val = val.(*ir.ConvExpr).X
}
if val.Type().IsInterface() {
// val is an interface type.
// If val is nil, we can statically initialize l;
// both words are zero and so there no work to do, so report success.
// If val is non-nil, we have no concrete type to record,
// and we won't be able to statically initialize its value, so report failure.
return val.Op() == ir.ONIL
}
reflectdata.MarkTypeUsedInInterface(val.Type(), l.Sym().Linksym())
var itab *ir.AddrExpr
if typ.IsEmptyInterface() {
itab = reflectdata.TypePtr(val.Type())
} else {
itab = reflectdata.ITabAddr(val.Type(), typ)
}
// Create a copy of l to modify while we emit data.
// Emit itab, advance offset.
staticdata.InitAddr(l, loff, itab.X.(*ir.Name), 0)
// Emit data.
if types.IsDirectIface(val.Type()) {
if val.Op() == ir.ONIL {
// Nil is zero, nothing to do.
return true
}
// Copy val directly into n.
ir.SetPos(val)
if !s.StaticAssign(l, loff+int64(types.PtrSize), val, val.Type()) {
a := ir.NewNameOffsetExpr(base.Pos, l, loff+int64(types.PtrSize), val.Type())
s.append(ir.NewAssignStmt(base.Pos, a, val))
}
} else {
// Construct temp to hold val, write pointer to temp into n.
a := StaticName(val.Type())
s.Temps[val] = a
if !s.StaticAssign(a, 0, val, val.Type()) {
s.append(ir.NewAssignStmt(base.Pos, a, val))
}
staticdata.InitAddr(l, loff+int64(types.PtrSize), a, 0)
}
return true
}
//dump("not static", r);
return false
}
func (s *Schedule) initplan(n ir.Node) {
if s.Plans[n] != nil {
return
}
p := new(Plan)
s.Plans[n] = p
switch n.Op() {
default:
base.Fatalf("initplan")
case ir.OARRAYLIT, ir.OSLICELIT:
n := n.(*ir.CompLitExpr)
var k int64
for _, a := range n.List {
if a.Op() == ir.OKEY {
kv := a.(*ir.KeyExpr)
k = typecheck.IndexConst(kv.Key)
if k < 0 {
base.Fatalf("initplan arraylit: invalid index %v", kv.Key)
}
a = kv.Value
}
s.addvalue(p, k*n.Type().Elem().Width, a)
k++
}
case ir.OSTRUCTLIT:
n := n.(*ir.CompLitExpr)
for _, a := range n.List {
if a.Op() != ir.OSTRUCTKEY {
base.Fatalf("initplan structlit")
}
a := a.(*ir.StructKeyExpr)
if a.Field.IsBlank() {
continue
}
s.addvalue(p, a.Offset, a.Value)
}
case ir.OMAPLIT:
n := n.(*ir.CompLitExpr)
for _, a := range n.List {
if a.Op() != ir.OKEY {
base.Fatalf("initplan maplit")
}
a := a.(*ir.KeyExpr)
s.addvalue(p, -1, a.Value)
}
}
}
func (s *Schedule) addvalue(p *Plan, xoffset int64, n ir.Node) {
// special case: zero can be dropped entirely
if ir.IsZero(n) {
return
}
// special case: inline struct and array (not slice) literals
if isvaluelit(n) {
s.initplan(n)
q := s.Plans[n]
for _, qe := range q.E {
// qe is a copy; we are not modifying entries in q.E
qe.Xoffset += xoffset
p.E = append(p.E, qe)
}
return
}
// add to plan
p.E = append(p.E, Entry{Xoffset: xoffset, Expr: n})
}
// from here down is the walk analysis
// of composite literals.
// most of the work is to generate
// data statements for the constant
// part of the composite literal.
var statuniqgen int // name generator for static temps
// StaticName returns a name backed by a (writable) static data symbol.
// Use readonlystaticname for read-only node.
func StaticName(t *types.Type) *ir.Name {
// Don't use lookupN; it interns the resulting string, but these are all unique.
n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
statuniqgen++
typecheck.Declare(n, ir.PEXTERN)
n.SetType(t)
n.Sym().Linksym().Set(obj.AttrLocal, true)
return n
}
// StaticLoc returns the static address of n, if n has one, or else nil.
func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) {
if n == nil {
return nil, 0, false
}
switch n.Op() {
case ir.ONAME:
n := n.(*ir.Name)
return n, 0, true
case ir.OMETHEXPR:
n := n.(*ir.MethodExpr)
return StaticLoc(n.FuncName())
case ir.ODOT:
n := n.(*ir.SelectorExpr)
if name, offset, ok = StaticLoc(n.X); !ok {
break
}
offset += n.Offset
return name, offset, true
case ir.OINDEX:
n := n.(*ir.IndexExpr)
if n.X.Type().IsSlice() {
break
}
if name, offset, ok = StaticLoc(n.X); !ok {
break
}
l := getlit(n.Index)
if l < 0 {
break
}
// Check for overflow.
if n.Type().Width != 0 && types.MaxWidth/n.Type().Width <= int64(l) {
break
}
offset += int64(l) * n.Type().Width
return name, offset, true
}
return nil, 0, false
}
// AnySideEffects reports whether n contains any operations that could have observable side effects.
func AnySideEffects(n ir.Node) bool {
return ir.Any(n, func(n ir.Node) bool {
switch n.Op() {
// Assume side effects unless we know otherwise.
default:
return true
// No side effects here (arguments are checked separately).
case ir.ONAME,
ir.ONONAME,
ir.OTYPE,
ir.OPACK,
ir.OLITERAL,
ir.ONIL,
ir.OADD,
ir.OSUB,
ir.OOR,
ir.OXOR,
ir.OADDSTR,
ir.OADDR,
ir.OANDAND,
ir.OBYTES2STR,
ir.ORUNES2STR,
ir.OSTR2BYTES,
ir.OSTR2RUNES,
ir.OCAP,
ir.OCOMPLIT,
ir.OMAPLIT,
ir.OSTRUCTLIT,
ir.OARRAYLIT,
ir.OSLICELIT,
ir.OPTRLIT,
ir.OCONV,
ir.OCONVIFACE,
ir.OCONVNOP,
ir.ODOT,
ir.OEQ,
ir.ONE,
ir.OLT,
ir.OLE,
ir.OGT,
ir.OGE,
ir.OKEY,
ir.OSTRUCTKEY,
ir.OLEN,
ir.OMUL,
ir.OLSH,
ir.ORSH,
ir.OAND,
ir.OANDNOT,
ir.ONEW,
ir.ONOT,
ir.OBITNOT,
ir.OPLUS,
ir.ONEG,
ir.OOROR,
ir.OPAREN,
ir.ORUNESTR,
ir.OREAL,
ir.OIMAG,
ir.OCOMPLEX:
return false
// Only possible side effect is division by zero.
case ir.ODIV, ir.OMOD:
n := n.(*ir.BinaryExpr)
if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 {
return true
}
// Only possible side effect is panic on invalid size,
// but many makechan and makemap use size zero, which is definitely OK.
case ir.OMAKECHAN, ir.OMAKEMAP:
n := n.(*ir.MakeExpr)
if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 {
return true
}
// Only possible side effect is panic on invalid size.
// TODO(rsc): Merge with previous case (probably breaks toolstash -cmp).
case ir.OMAKESLICE, ir.OMAKESLICECOPY:
return true
}
return false
})
}
func getlit(lit ir.Node) int {
if ir.IsSmallIntConst(lit) {
return int(ir.Int64Val(lit))
}
return -1
}
func isvaluelit(n ir.Node) bool {
return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
}

Some files were not shown because too many files have changed in this diff Show more