[dev.regabi] cmd/compile: split out package ssagen [generated]

[git-generate]

cd src/cmd/compile/internal/gc
rf '
	# maxOpenDefers is declared in ssa.go but used only by walk.
	mv maxOpenDefers walk.go

	# gc.Arch -> ssagen.Arch
	# It is not as nice but will do for now.
	mv Arch ArchInfo
	mv thearch Arch
	mv Arch ArchInfo arch.go

	# Pull dwarf out of pgen.go.
	mv debuginfo declPos createDwarfVars preInliningDcls \
		createSimpleVars createSimpleVar \
		createComplexVars createComplexVar \
		dwarf.go

	# Pull high-level compilation out of pgen.go,
	# leaving only the SSA code.
	mv compilequeue funccompile compile compilenow \
		compileFunctions isInlinableButNotInlined \
		initLSym \
		compile.go

	mv BoundsCheckFunc GCWriteBarrierReg ssa.go
	mv largeStack largeStackFrames CheckLargeStacks pgen.go

	# All that is left in dcl.go is the nowritebarrierrecCheck
	mv dcl.go nowb.go

	# Export API and unexport non-API.
	mv initssaconfig InitConfig
	mv isIntrinsicCall IsIntrinsicCall
	mv ssaDumpInline DumpInline
	mv initSSATables InitTables
	mv initSSAEnv InitEnv
	mv compileSSA Compile
	mv stackOffset StackOffset
	mv canSSAType TypeOK
	mv SSAGenState State
	mv FwdRefAux fwdRefAux

	mv cgoSymABIs CgoSymABIs
	mv readSymABIs ReadSymABIs
	mv initLSym InitLSym
	mv useABIWrapGen symabiDefs CgoSymABIs ReadSymABIs InitLSym selectLSym makeABIWrapper setupTextLSym abi.go

	mv arch.go abi.go nowb.go phi.go pgen.go pgen_test.go ssa.go cmd/compile/internal/ssagen
'
rm go.go gsubr.go

Change-Id: I47fad6cbf1d1e583fd9139003a08401d7cd048a1
Reviewed-on: https://go-review.googlesource.com/c/go/+/279476
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
This commit is contained in:
Russ Cox 2020-12-23 00:57:10 -05:00
parent de65151e50
commit 6c34d2f420
35 changed files with 1202 additions and 1181 deletions

View file

@ -5,13 +5,13 @@
package amd64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/x86"
)
var leaptr = x86.ALEAQ
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &x86.Linkamd64
arch.REGSP = x86.REGSP
arch.MAXWIDTH = 1 << 50

View file

@ -9,17 +9,17 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
@ -112,7 +112,7 @@ func moveByType(t *types.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@ -166,7 +166,7 @@ func duff(size int64) (int64, int64) {
return off, adj
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpAMD64VFMADD231SD:
p := s.Prog(v.Op.Asm())
@ -632,12 +632,12 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_REG
p.To.Reg = o
}
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
@ -673,7 +673,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
@ -681,20 +681,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux2(&p.From, v, sc.Off())
ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[2].Reg()
case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
sc := v.AuxValAndOff()
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux2(&p.From, v, sc.Off())
ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
@ -734,14 +734,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
p := s.Prog(v.Op.Asm())
memIdx(&p.From, v)
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
@ -753,7 +753,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
@ -765,7 +765,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[2].Reg()
memIdx(&p.To, v)
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
sc := v.AuxValAndOff()
off := sc.Off()
@ -788,7 +788,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(asm)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
@ -803,7 +803,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
p := s.Prog(v.Op.Asm())
@ -812,7 +812,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
@ -837,7 +837,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_NONE
}
memIdx(&p.To, v)
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
@ -867,7 +867,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@ -893,7 +893,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@ -951,7 +951,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -963,16 +963,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpAMD64LoweredHasCPUFeature:
p := s.Prog(x86.AMOVBQZX)
p.From.Type = obj.TYPE_MEM
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpAMD64LoweredGetClosurePtr:
// Closure pointer is DX.
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpAMD64LoweredGetG:
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
@ -1029,13 +1029,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
// arg0 is in DI. Set sym to match where regalloc put arg1.
p.To.Sym = gc.GCWriteBarrierReg[v.Args[1].Reg()]
p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
@ -1117,7 +1117,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64SETNEF:
p := s.Prog(v.Op.Asm())
@ -1173,7 +1173,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
@ -1186,7 +1186,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
r := v.Reg0()
if r != v.Args[0].Reg() {
@ -1198,7 +1198,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[1].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
if v.Args[1].Reg() != x86.REG_AX {
v.Fatalf("input[1] not in AX %s", v.LongString())
@ -1209,7 +1209,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p = s.Prog(x86.ASETEQ)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
@ -1220,20 +1220,20 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpClobber:
p := s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p = s.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
p.To.Offset += 4
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
@ -1259,22 +1259,22 @@ var blockJump = [...]struct {
ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
}
var eqfJumps = [2][2]gc.IndexJump{
var eqfJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
}
var nefJumps = [2][2]gc.IndexJump{
var nefJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in rax:
@ -1287,11 +1287,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.To.Reg = x86.REG_AX
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,13 +5,13 @@
package arm
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/arm"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm.Linkarm
arch.REGSP = arm.REGSP
arch.MAXWIDTH = (1 << 32) - 1
@ -20,7 +20,7 @@ func Init(arch *gc.Arch) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -10,10 +10,10 @@ import (
"math/bits"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm"
@ -93,7 +93,7 @@ func makeshift(reg int16, typ int64, s int64) shift {
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeshift(r1, typ, n))
@ -111,7 +111,7 @@ func makeregshift(r1 int16, typ int64, r2 int16) shift {
}
// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
func genregshift(s *gc.SSAGenState, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(makeregshift(r1, typ, r2))
@ -145,7 +145,7 @@ func getBFC(v uint32) (uint32, uint32) {
return 0xffffffff, 0
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARMMOVWreg:
if v.Type.IsMemory() {
@ -183,7 +183,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -194,7 +194,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpARMADD,
ssa.OpARMADC,
ssa.OpARMSUB,
@ -545,10 +545,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
@ -568,7 +568,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARMMOVBstore,
@ -581,7 +581,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
// this is just shift 0 bits
fallthrough
@ -712,13 +712,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.OpARMDUFFZERO:
p := s.Prog(obj.ADUFFZERO)
@ -737,7 +737,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(arm.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm.REGTMP
if logopt.Enabled() {
@ -846,7 +846,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpARMLoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm.AMOVW)
@ -901,24 +901,24 @@ var blockJump = map[ssa.BlockKind]struct {
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
var leJumps = [2][2]gc.IndexJump{
var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
var gtJumps = [2][2]gc.IndexJump{
var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
@ -931,11 +931,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.Reg = arm.REG_R0
p = s.Prog(arm.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:

View file

@ -5,12 +5,12 @@
package arm64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/arm64"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &arm64.Linkarm64
arch.REGSP = arm64.REGSP
arch.MAXWIDTH = 1 << 50
@ -20,7 +20,7 @@ func Init(arch *gc.Arch) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -8,10 +8,10 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/arm64"
@ -83,7 +83,7 @@ func makeshift(reg int16, typ int64, s int64) int64 {
}
// genshift generates a Prog for r = r0 op (r1 shifted by n)
func genshift(s *gc.SSAGenState, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
func genshift(s *ssagen.State, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = makeshift(r1, typ, n)
@ -112,7 +112,7 @@ func genIndexedOperand(v *ssa.Value) obj.Addr {
return mop
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpARM64MOVDreg:
if v.Type.IsMemory() {
@ -150,7 +150,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -161,7 +161,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpARM64ADD,
ssa.OpARM64SUB,
ssa.OpARM64AND,
@ -395,10 +395,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVD $off(SP), R
wantreg = "SP"
@ -419,7 +419,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64MOVBloadidx,
@ -446,7 +446,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpARM64MOVBstore,
@ -463,7 +463,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstoreidx,
ssa.OpARM64MOVHstoreidx,
ssa.OpARM64MOVWstoreidx,
@ -484,7 +484,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = int64(v.Args[2].Reg())
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezero,
ssa.OpARM64MOVHstorezero,
ssa.OpARM64MOVWstorezero,
@ -494,7 +494,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = arm64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64MOVBstorezeroidx,
ssa.OpARM64MOVHstorezeroidx,
ssa.OpARM64MOVWstorezeroidx,
@ -513,7 +513,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = int64(arm64.REGZERO)
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpARM64BFI,
ssa.OpARM64BFXIL:
r := v.Reg()
@ -1027,14 +1027,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpARM64LoweredNilCheck:
// Issue a load which will fault if arg is nil.
p := s.Prog(arm64.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = arm64.REGTMP
if logopt.Enabled() {
@ -1065,7 +1065,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Reg()
case ssa.OpARM64LoweredGetClosurePtr:
// Closure pointer is R26 (arm64.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpARM64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(arm64.AMOVD)
@ -1134,24 +1134,24 @@ var blockJump = map[ssa.BlockKind]struct {
}
// To model a 'LEnoov' ('<=' without overflow checking) branching
var leJumps = [2][2]gc.IndexJump{
var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
}
// To model a 'GTnoov' ('>' without overflow checking) branching
var gtJumps = [2][2]gc.IndexJump{
var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
@ -1164,11 +1164,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.Reg = arm64.REG_R0
p = s.Prog(arm64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:

View file

@ -8,6 +8,7 @@ import (
"bufio"
"cmd/compile/internal/base"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
@ -28,16 +29,16 @@ var configAMD64 = ABIConfig{
}
func TestMain(m *testing.M) {
thearch.LinkArch = &x86.Linkamd64
thearch.REGSP = x86.REGSP
thearch.MAXWIDTH = 1 << 50
types.MaxWidth = thearch.MAXWIDTH
base.Ctxt = obj.Linknew(thearch.LinkArch)
ssagen.Arch.LinkArch = &x86.Linkamd64
ssagen.Arch.REGSP = x86.REGSP
ssagen.Arch.MAXWIDTH = 1 << 50
types.MaxWidth = ssagen.Arch.MAXWIDTH
base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
base.Ctxt.DiagFunc = base.Errorf
base.Ctxt.DiagFlush = base.FlushErrors
base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
types.PtrSize = thearch.LinkArch.PtrSize
types.RegSize = thearch.LinkArch.RegSize
types.PtrSize = ssagen.Arch.LinkArch.PtrSize
types.RegSize = ssagen.Arch.LinkArch.RegSize
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return reflectdata.TypeSym(t).Linksym()
}

View file

@ -0,0 +1,177 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
import (
"internal/race"
"math/rand"
"sort"
"sync"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
)
// "Portable" code generation.
var (
compilequeue []*ir.Func // functions waiting to be compiled
)
func funccompile(fn *ir.Func) {
if ir.CurFunc != nil {
base.Fatalf("funccompile %v inside %v", fn.Sym(), ir.CurFunc.Sym())
}
if fn.Type() == nil {
if base.Errors() == 0 {
base.Fatalf("funccompile missing type")
}
return
}
// assign parameter offsets
types.CalcSize(fn.Type())
if len(fn.Body) == 0 {
// Initialize ABI wrappers if necessary.
ssagen.InitLSym(fn, false)
liveness.WriteFuncMap(fn)
return
}
typecheck.DeclContext = ir.PAUTO
ir.CurFunc = fn
compile(fn)
ir.CurFunc = nil
typecheck.DeclContext = ir.PEXTERN
}
func compile(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
ssagen.InitLSym(fn, true)
errorsBefore := base.Errors()
walk(fn)
if base.Errors() > errorsBefore {
return
}
// From this point, there should be no uses of Curfn. Enforce that.
ir.CurFunc = nil
if ir.FuncName(fn) == "_" {
// We don't need to generate code for this function, just report errors in its body.
// At this point we've generated any errors needed.
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
// See issue 29870.
return
}
// Make sure type syms are declared for all types that might
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
for _, n := range fn.Dcl {
switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if liveness.ShouldTrack(n) && n.Addrtaken() {
reflectdata.WriteType(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.LSym.Func().StackObjects == nil {
fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
}
}
}
}
if compilenow(fn) {
ssagen.Compile(fn, 0)
} else {
compilequeue = append(compilequeue, fn)
}
}
// compilenow reports whether to compile immediately.
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
func compilenow(fn *ir.Func) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
// compiling a function later on in the Target.Decls list.
if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
return false
}
return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
}
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
func compileFunctions() {
if len(compilequeue) != 0 {
types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*ir.Func, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
copy(compilequeue, tmp)
} else {
// Compile the longest functions first,
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
return len(compilequeue[i].Body) > len(compilequeue[j].Body)
})
}
var wg sync.WaitGroup
base.Ctxt.InParallel = true
c := make(chan *ir.Func, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
for fn := range c {
ssagen.Compile(fn, worker)
}
wg.Done()
}(i)
}
for _, fn := range compilequeue {
c <- fn
}
close(c)
compilequeue = nil
wg.Wait()
base.Ctxt.InParallel = false
types.CalcSizeDisabled = false
}
}
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
func isInlinableButNotInlined(fn *ir.Func) bool {
if fn.Inl == nil {
return false
}
if fn.Sym() == nil {
return true
}
return !fn.Sym().Linksym().WasInlined()
}

View file

@ -5,361 +5,19 @@
package gc
import (
"sort"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/dwarf"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
"internal/race"
"math/rand"
"sort"
"sync"
"time"
)
// "Portable" code generation.
var (
compilequeue []*ir.Func // functions waiting to be compiled
)
// cmpstackvarlt reports whether the stack variable a sorts before b.
//
// Sort the list of stack variables. Autos after anything else,
// within autos, unused after used, within used, things with
// pointers first, zeroed things first, and then decreasing size.
// Because autos are laid out in decreasing addresses
// on the stack, pointers first, zeroed things first and decreasing size
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
func cmpstackvarlt(a, b *ir.Name) bool {
if (a.Class_ == ir.PAUTO) != (b.Class_ == ir.PAUTO) {
return b.Class_ == ir.PAUTO
}
if a.Class_ != ir.PAUTO {
return a.FrameOffset() < b.FrameOffset()
}
if a.Used() != b.Used() {
return a.Used()
}
ap := a.Type().HasPointers()
bp := b.Type().HasPointers()
if ap != bp {
return ap
}
ap = a.Needzero()
bp = b.Needzero()
if ap != bp {
return ap
}
if a.Type().Width != b.Type().Width {
return a.Type().Width > b.Type().Width
}
return a.Sym().Name < b.Sym().Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
type byStackVar []*ir.Name
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
fn := s.curfn
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
if ln.Class_ == ir.PAUTO {
ln.SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
ls.N.Name().SetUsed(true)
}
}
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
if n, ok := v.Aux.(*ir.Name); ok {
switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != ir.RegFP {
n.Name().SetUsed(true)
}
case ir.PAUTO:
n.Name().SetUsed(true)
}
}
if !scratchUsed {
scratchUsed = v.Op.UsesScratch()
}
}
}
if f.Config.NeedsFpScratch && scratchUsed {
s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
if n.Op() != ir.ONAME || n.Class_ != ir.PAUTO {
continue
}
if !n.Used() {
fn.Dcl = fn.Dcl[:i]
break
}
types.CalcSize(n.Type())
w := n.Type().Width
if w >= types.MaxWidth || w < 0 {
base.Fatalf("bad width")
}
if w == 0 && lastHasPtr {
// Pad between a pointer-containing object and a zero-sized object.
// This prevents a pointer to the zero-sized object from being interpreted
// as a pointer to the pointer-containing object (and causing it
// to be scanned when it shouldn't be). See issue 24993.
w = 1
}
s.stksize += w
s.stksize = types.Rnd(s.stksize, int64(n.Type().Align))
if n.Type().HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
lastHasPtr = false
}
if thearch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = types.Rnd(s.stksize, int64(types.PtrSize))
}
n.SetFrameOffset(-s.stksize)
}
s.stksize = types.Rnd(s.stksize, int64(types.RegSize))
s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize))
}
func funccompile(fn *ir.Func) {
if ir.CurFunc != nil {
base.Fatalf("funccompile %v inside %v", fn.Sym(), ir.CurFunc.Sym())
}
if fn.Type() == nil {
if base.Errors() == 0 {
base.Fatalf("funccompile missing type")
}
return
}
// assign parameter offsets
types.CalcSize(fn.Type())
if len(fn.Body) == 0 {
// Initialize ABI wrappers if necessary.
initLSym(fn, false)
liveness.WriteFuncMap(fn)
return
}
typecheck.DeclContext = ir.PAUTO
ir.CurFunc = fn
compile(fn)
ir.CurFunc = nil
typecheck.DeclContext = ir.PEXTERN
}
func compile(fn *ir.Func) {
// Set up the function's LSym early to avoid data races with the assemblers.
// Do this before walk, as walk needs the LSym to set attributes/relocations
// (e.g. in markTypeUsedInInterface).
initLSym(fn, true)
errorsBefore := base.Errors()
walk(fn)
if base.Errors() > errorsBefore {
return
}
// From this point, there should be no uses of Curfn. Enforce that.
ir.CurFunc = nil
if ir.FuncName(fn) == "_" {
// We don't need to generate code for this function, just report errors in its body.
// At this point we've generated any errors needed.
// (Beyond here we generate only non-spec errors, like "stack frame too large".)
// See issue 29870.
return
}
// Make sure type syms are declared for all types that might
// be types of stack objects. We need to do this here
// because symbols must be allocated before the parallel
// phase of the compiler.
for _, n := range fn.Dcl {
switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if liveness.ShouldTrack(n) && n.Addrtaken() {
reflectdata.WriteType(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.LSym.Func().StackObjects == nil {
fn.LSym.Func().StackObjects = base.Ctxt.Lookup(fn.LSym.Name + ".stkobj")
}
}
}
}
if compilenow(fn) {
compileSSA(fn, 0)
} else {
compilequeue = append(compilequeue, fn)
}
}
// compilenow reports whether to compile immediately.
// If functions are not compiled immediately,
// they are enqueued in compilequeue,
// which is drained by compileFunctions.
func compilenow(fn *ir.Func) bool {
// Issue 38068: if this function is a method AND an inline
// candidate AND was not inlined (yet), put it onto the compile
// queue instead of compiling it immediately. This is in case we
// wind up inlining it into a method wrapper that is generated by
// compiling a function later on in the Target.Decls list.
if ir.IsMethod(fn) && isInlinableButNotInlined(fn) {
return false
}
return base.Flag.LowerC == 1 && base.Debug.CompileLater == 0
}
// isInlinableButNotInlined returns true if 'fn' was marked as an
// inline candidate but then never inlined (presumably because we
// found no call sites).
func isInlinableButNotInlined(fn *ir.Func) bool {
if fn.Inl == nil {
return false
}
if fn.Sym() == nil {
return true
}
return !fn.Sym().Linksym().WasInlined()
}
const maxStackSize = 1 << 30
// compileSSA builds an SSA backend function,
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
func compileSSA(fn *ir.Func, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp := objw.NewProgs(fn, worker)
defer pp.Free()
genssa(f, pp)
// Check frame size again.
// The check above included only the space needed for local variables.
// After genssa, the space needed includes local variables and the callee arg region.
// We must do this check prior to calling pp.Flush.
// If there are any oversized stack frames,
// the assembler may emit inscrutable complaints about invalid instructions.
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
}
func init() {
if race.Enabled {
rand.Seed(time.Now().UnixNano())
}
}
// compileFunctions compiles all functions in compilequeue.
// It fans out nBackendWorkers to do the work
// and waits for them to complete.
func compileFunctions() {
if len(compilequeue) != 0 {
types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
if race.Enabled {
// Randomize compilation order to try to shake out races.
tmp := make([]*ir.Func, len(compilequeue))
perm := rand.Perm(len(compilequeue))
for i, v := range perm {
tmp[v] = compilequeue[i]
}
copy(compilequeue, tmp)
} else {
// Compile the longest functions first,
// since they're most likely to be the slowest.
// This helps avoid stragglers.
sort.Slice(compilequeue, func(i, j int) bool {
return len(compilequeue[i].Body) > len(compilequeue[j].Body)
})
}
var wg sync.WaitGroup
base.Ctxt.InParallel = true
c := make(chan *ir.Func, base.Flag.LowerC)
for i := 0; i < base.Flag.LowerC; i++ {
wg.Add(1)
go func(worker int) {
for fn := range c {
compileSSA(fn, worker)
}
wg.Done()
}(i)
}
for _, fn := range compilequeue {
c <- fn
}
close(c)
compilequeue = nil
wg.Wait()
base.Ctxt.InParallel = false
types.CalcSizeDisabled = false
}
}
func debuginfo(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
fn := curfn.(*ir.Func)
@ -485,6 +143,126 @@ func declPos(decl *ir.Name) src.XPos {
return decl.Pos()
}
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
var decls []*ir.Name
var selected map[*ir.Name]bool
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(fnsym, apDecls)
}
dcl := apDecls
if fnsym.WasInlined() {
dcl = preInliningDcls(fnsym)
}
// If optimization is enabled, the list above will typically be
// missing some of the original pre-optimization variables in the
// function (they may have been promoted to registers, folded into
// constants, dead-coded away, etc). Input arguments not eligible
// for SSA optimization are also missing. Here we add back in entries
// for selected missing vars. Note that the recipe below creates a
// conservative location. The idea here is that we want to
// communicate to the user that "yes, there is a variable named X
// in this function, but no, I don't have enough information to
// reliably report its contents."
// For non-SSA-able arguments, however, the correct information
// is known -- they have a single home on the stack.
for _, n := range dcl {
if _, found := selected[n]; found {
continue
}
c := n.Sym().Name[0]
if c == '.' || n.Type().IsUntyped() {
continue
}
if n.Class_ == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
// with VARDEF, which creates location lists.
// Args not of SSA-able type are treated here; they
// are homed on the stack in a single place for the
// entire call.
vars = append(vars, createSimpleVar(fnsym, n))
decls = append(decls, n)
continue
}
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
isReturnValue := (n.Class_ == ir.PPARAMOUT)
if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
} else if n.Class_ == ir.PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
// signature in the abstract function DIE, but leaves a
// misleading location for the param (we want pointer-to-heap
// and not stack).
// TODO(thanm): generate a better location expression
stackcopy := n.Name().Stackcopy
if stackcopy != nil && (stackcopy.Class_ == ir.PPARAM || stackcopy.Class_ == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
isReturnValue = (stackcopy.Class_ == ir.PPARAMOUT)
}
}
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.Name().InlFormal() || n.Name().InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := base.Ctxt.InnermostPos(n.Pos())
vars = append(vars, &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
StackOffset: int32(n.FrameOffset()),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
return decls, vars
}
// Given a function that was inlined at some point during the
// compilation, return a sorted list of nodes corresponding to the
// autos/locals in that function prior to inlining. If this is a
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
var rdcl []*ir.Name
for _, n := range fn.Inl.Dcl {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
}
return rdcl
}
// createSimpleVars creates a DWARF entry for every variable declared in the
// function, claiming that they are permanently on the stack.
func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, map[*ir.Name]bool) {
@ -579,148 +357,6 @@ func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var,
return decls, vars, ssaVars
}
// createDwarfVars process fn, returning a list of DWARF variables and the
// Nodes they represent.
func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
// Collect a raw list of DWARF vars.
var vars []*dwarf.Var
var decls []*ir.Name
var selected map[*ir.Name]bool
if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
decls, vars, selected = createComplexVars(fnsym, fn)
} else {
decls, vars, selected = createSimpleVars(fnsym, apDecls)
}
dcl := apDecls
if fnsym.WasInlined() {
dcl = preInliningDcls(fnsym)
}
// If optimization is enabled, the list above will typically be
// missing some of the original pre-optimization variables in the
// function (they may have been promoted to registers, folded into
// constants, dead-coded away, etc). Input arguments not eligible
// for SSA optimization are also missing. Here we add back in entries
// for selected missing vars. Note that the recipe below creates a
// conservative location. The idea here is that we want to
// communicate to the user that "yes, there is a variable named X
// in this function, but no, I don't have enough information to
// reliably report its contents."
// For non-SSA-able arguments, however, the correct information
// is known -- they have a single home on the stack.
for _, n := range dcl {
if _, found := selected[n]; found {
continue
}
c := n.Sym().Name[0]
if c == '.' || n.Type().IsUntyped() {
continue
}
if n.Class_ == ir.PPARAM && !canSSAType(n.Type()) {
// SSA-able args get location lists, and may move in and
// out of registers, so those are handled elsewhere.
// Autos and named output params seem to get handled
// with VARDEF, which creates location lists.
// Args not of SSA-able type are treated here; they
// are homed on the stack in a single place for the
// entire call.
vars = append(vars, createSimpleVar(fnsym, n))
decls = append(decls, n)
continue
}
typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
decls = append(decls, n)
abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
isReturnValue := (n.Class_ == ir.PPARAMOUT)
if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
} else if n.Class_ == ir.PAUTOHEAP {
// If dcl in question has been promoted to heap, do a bit
// of extra work to recover original class (auto or param);
// see issue 30908. This insures that we get the proper
// signature in the abstract function DIE, but leaves a
// misleading location for the param (we want pointer-to-heap
// and not stack).
// TODO(thanm): generate a better location expression
stackcopy := n.Name().Stackcopy
if stackcopy != nil && (stackcopy.Class_ == ir.PPARAM || stackcopy.Class_ == ir.PPARAMOUT) {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
isReturnValue = (stackcopy.Class_ == ir.PPARAMOUT)
}
}
inlIndex := 0
if base.Flag.GenDwarfInl > 1 {
if n.Name().InlFormal() || n.Name().InlLocal() {
inlIndex = posInlIndex(n.Pos()) + 1
if n.Name().InlFormal() {
abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
}
}
}
declpos := base.Ctxt.InnermostPos(n.Pos())
vars = append(vars, &dwarf.Var{
Name: n.Sym().Name,
IsReturnValue: isReturnValue,
Abbrev: abbrev,
StackOffset: int32(n.FrameOffset()),
Type: base.Ctxt.Lookup(typename),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
InlIndex: int32(inlIndex),
ChildIndex: -1,
})
// Record go type of to insure that it gets emitted by the linker.
fnsym.Func().RecordAutoType(ngotype(n).Linksym())
}
return decls, vars
}
// Given a function that was inlined at some point during the
// compilation, return a sorted list of nodes corresponding to the
// autos/locals in that function prior to inlining. If this is a
// function that is not local to the package being compiled, then the
// names of the variables may have been "versioned" to avoid conflicts
// with local vars; disregard this versioning when sorting.
func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
var rdcl []*ir.Name
for _, n := range fn.Inl.Dcl {
c := n.Sym().Name[0]
// Avoid reporting "_" parameters, since if there are more than
// one, it can result in a collision later on, as in #23179.
if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
continue
}
rdcl = append(rdcl, n)
}
return rdcl
}
// stackOffset returns the stack location of a LocalSlot relative to the
// stack pointer, suitable for use in a DWARF location entry. This has nothing
// to do with its offset in the user variable.
func stackOffset(slot ssa.LocalSlot) int32 {
n := slot.N
var off int64
switch n.Class_ {
case ir.PAUTO:
off = n.FrameOffset()
if base.Ctxt.FixedFrameSize() == 0 {
off -= int64(types.PtrSize)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
off -= int64(types.PtrSize)
}
case ir.PPARAM, ir.PPARAMOUT:
off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
}
return int32(off + slot.Off)
}
// createComplexVar builds a single DWARF variable entry and location list.
func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
debug := fn.DebugInfo.(*ssa.FuncDebug)
@ -759,7 +395,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
// variables just give it the first one. It's not used otherwise.
// This won't work well if the first slot hasn't been assigned a stack
// location, but it's not obvious how to do better.
StackOffset: stackOffset(debug.Slots[debug.VarSlots[varID][0]]),
StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
DeclFile: declpos.RelFilename(),
DeclLine: declpos.RelLine(),
DeclCol: declpos.Col(),
@ -774,31 +410,3 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var
}
return dvar
}
// fieldtrack adds R_USEFIELD relocations to fnsym to record any
// struct fields that it used.
func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
if fnsym == nil {
return
}
if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
return
}
trackSyms := make([]*types.Sym, 0, len(tracked))
for sym := range tracked {
trackSyms = append(trackSyms, sym)
}
sort.Sort(symByName(trackSyms))
for _, sym := range trackSyms {
r := obj.Addrel(fnsym)
r.Sym = sym.Linksym()
r.Type = objabi.R_USEFIELD
}
}
type symByName []*types.Sym
func (a symByName) Len() int { return len(a) }
func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }

View file

@ -17,6 +17,7 @@ import (
"cmd/compile/internal/noder"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@ -26,12 +27,9 @@ import (
"cmd/internal/src"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"runtime"
"sort"
"strings"
)
func hidePanic() {
@ -52,14 +50,14 @@ func hidePanic() {
// Main parses flags and Go source files specified in the command-line
// arguments, type-checks the parsed Go package, compiles functions to machine
// code, and finally writes the compiled package definition to disk.
func Main(archInit func(*Arch)) {
func Main(archInit func(*ssagen.ArchInfo)) {
base.Timer.Start("fe", "init")
defer hidePanic()
archInit(&thearch)
archInit(&ssagen.Arch)
base.Ctxt = obj.Linknew(thearch.LinkArch)
base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
base.Ctxt.DiagFunc = base.Errorf
base.Ctxt.DiagFlush = base.FlushErrors
base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
@ -151,7 +149,7 @@ func Main(archInit func(*Arch)) {
types.ParseLangFlag()
if base.Flag.SymABIs != "" {
readSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath)
ssagen.ReadSymABIs(base.Flag.SymABIs, base.Ctxt.Pkgpath)
}
if base.Compiling(base.NoInstrumentPkgs) {
@ -159,7 +157,7 @@ func Main(archInit func(*Arch)) {
base.Flag.MSan = false
}
thearch.LinkArch.Init(base.Ctxt)
ssagen.Arch.LinkArch.Init(base.Ctxt)
startProfile()
if base.Flag.Race {
ir.Pkgs.Race = types.NewPkg("runtime/race", "")
@ -174,7 +172,7 @@ func Main(archInit func(*Arch)) {
dwarf.EnableLogging(base.Debug.DwarfInl != 0)
}
if base.Debug.SoftFloat != 0 {
thearch.SoftFloat = true
ssagen.Arch.SoftFloat = true
}
if base.Flag.JSON != "" { // parse version,destination from json logging optimization.
@ -182,14 +180,14 @@ func Main(archInit func(*Arch)) {
}
ir.EscFmt = escape.Fmt
ir.IsIntrinsicCall = isIntrinsicCall
inline.SSADumpInline = ssaDumpInline
initSSAEnv()
initSSATables()
ir.IsIntrinsicCall = ssagen.IsIntrinsicCall
inline.SSADumpInline = ssagen.DumpInline
ssagen.InitEnv()
ssagen.InitTables()
types.PtrSize = thearch.LinkArch.PtrSize
types.RegSize = thearch.LinkArch.RegSize
types.MaxWidth = thearch.MAXWIDTH
types.PtrSize = ssagen.Arch.LinkArch.PtrSize
types.RegSize = ssagen.Arch.LinkArch.RegSize
types.MaxWidth = ssagen.Arch.MAXWIDTH
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
return reflectdata.TypeSym(t).Linksym()
}
@ -210,7 +208,7 @@ func Main(archInit func(*Arch)) {
// Parse input.
base.Timer.Start("fe", "parse")
lines := noder.ParseFiles(flag.Args())
cgoSymABIs()
ssagen.CgoSymABIs()
base.Timer.Stop()
base.Timer.AddEvent(int64(lines), "lines")
recordPackageName()
@ -257,7 +255,7 @@ func Main(archInit func(*Arch)) {
// We'll do the final check after write barriers are
// inserted.
if base.Flag.CompilingRuntime {
EnableNoWriteBarrierRecCheck()
ssagen.EnableNoWriteBarrierRecCheck()
}
// Transform closure bodies to properly reference captured variables.
@ -277,7 +275,7 @@ func Main(archInit func(*Arch)) {
// Prepare for SSA compilation.
// This must be before peekitabs, because peekitabs
// can trigger function compilation.
initssaconfig()
ssagen.InitConfig()
// Just before compilation, compile itabs found on
// the right side of OCONVIFACE so that methods
@ -302,7 +300,7 @@ func Main(archInit func(*Arch)) {
if base.Flag.CompilingRuntime {
// Write barriers are now known. Check the call graph.
NoWriteBarrierRecCheck()
ssagen.NoWriteBarrierRecCheck()
}
// Finalize DWARF inline routine DIEs, then explicitly turn off
@ -323,7 +321,7 @@ func Main(archInit func(*Arch)) {
dumpasmhdr()
}
CheckLargeStacks()
ssagen.CheckLargeStacks()
typecheck.CheckFuncStack()
if len(compilequeue) != 0 {
@ -343,34 +341,6 @@ func Main(archInit func(*Arch)) {
}
}
func CheckLargeStacks() {
// Check whether any of the functions we have compiled have gigantic stack frames.
sort.Slice(largeStackFrames, func(i, j int) bool {
return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
})
for _, large := range largeStackFrames {
if large.callee != 0 {
base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
} else {
base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
}
}
}
func cgoSymABIs() {
// The linker expects an ABI0 wrapper for all cgo-exported
// functions.
for _, prag := range typecheck.Target.CgoPragmas {
switch prag[0] {
case "cgo_export_static", "cgo_export_dynamic":
if symabiRefs == nil {
symabiRefs = make(map[string]obj.ABI)
}
symabiRefs[prag[1]] = obj.ABI0
}
}
}
func writebench(filename string) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
@ -394,77 +364,6 @@ func writebench(filename string) error {
return f.Close()
}
// symabiDefs and symabiRefs record the defined and referenced ABIs of
// symbols required by non-Go code. These are keyed by link symbol
// name, where the local package prefix is always `"".`
var symabiDefs, symabiRefs map[string]obj.ABI
// readSymABIs reads a symabis file that specifies definitions and
// references of text symbols by ABI.
//
// The symabis format is a set of lines, where each line is a sequence
// of whitespace-separated fields. The first field is a verb and is
// either "def" for defining a symbol ABI or "ref" for referencing a
// symbol using an ABI. For both "def" and "ref", the second field is
// the symbol name and the third field is the ABI name, as one of the
// named cmd/internal/obj.ABI constants.
func readSymABIs(file, myimportpath string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-symabis: %v", err)
}
symabiDefs = make(map[string]obj.ABI)
symabiRefs = make(map[string]obj.ABI)
localPrefix := ""
if myimportpath != "" {
// Symbols in this package may be written either as
// "".X or with the package's import path already in
// the symbol.
localPrefix = objabi.PathToPrefix(myimportpath) + "."
}
for lineNum, line := range strings.Split(string(data), "\n") {
lineNum++ // 1-based
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
parts := strings.Fields(line)
switch parts[0] {
case "def", "ref":
// Parse line.
if len(parts) != 3 {
log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
}
sym, abistr := parts[1], parts[2]
abi, valid := obj.ParseABI(abistr)
if !valid {
log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
}
// If the symbol is already prefixed with
// myimportpath, rewrite it to start with ""
// so it matches the compiler's internal
// symbol names.
if localPrefix != "" && strings.HasPrefix(sym, localPrefix) {
sym = `"".` + sym[len(localPrefix):]
}
// Record for later.
if parts[0] == "def" {
symabiDefs[sym] = abi
} else {
symabiRefs[sym] = abi
}
default:
log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
}
}
}
// recordFlags records the specified command-line flags to be placed
// in the DWARF info.
func recordFlags(flags ...string) {
@ -532,29 +431,6 @@ func recordPackageName() {
s.P = []byte(types.LocalPkg.Name)
}
// useNewABIWrapGen returns TRUE if the compiler should generate an
// ABI wrapper for the function 'f'.
func useABIWrapGen(f *ir.Func) bool {
if !base.Flag.ABIWrap {
return false
}
// Support limit option for bisecting.
if base.Flag.ABIWrapLimit == 1 {
return false
}
if base.Flag.ABIWrapLimit < 1 {
return true
}
base.Flag.ABIWrapLimit--
if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 {
fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n",
f.LSym.Name)
}
return true
}
func makePos(b *src.PosBase, line, col uint) src.XPos {
return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
}

View file

@ -7,6 +7,7 @@ package gc
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/src"
"cmd/internal/sys"
@ -25,7 +26,7 @@ func instrument(fn *ir.Func) {
lno := base.Pos
base.Pos = src.NoXPos
if thearch.LinkArch.Arch.Family != sys.AMD64 {
if ssagen.Arch.LinkArch.Arch.Family != sys.AMD64 {
fn.Enter.Prepend(mkcall("racefuncenterfp", nil, nil))
fn.Exit.Append(mkcall("racefuncexit", nil, nil))
} else {

View file

@ -8,6 +8,7 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/sys"
@ -15,7 +16,7 @@ import (
)
func cheapComputableIndex(width int64) bool {
switch thearch.LinkArch.Family {
switch ssagen.Arch.LinkArch.Family {
// MIPS does not have R+R addressing
// Arm64 may lack ability to generate this code in our assembler,
// but the architecture supports it.

View file

@ -8,24 +8,11 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"sync"
)
// largeStack is info about a function whose stack frame is too large (rare).
type largeStack struct {
locals int64
args int64
callee int64
pos src.XPos
}
var (
largeStackFramesMu sync.Mutex // protects largeStackFrames
largeStackFrames []largeStack
)
// backingArrayPtrLen extracts the pointer and length from a slice or string.
@ -91,25 +78,25 @@ func calcHasCall(n ir.Node) bool {
// so we ensure they are evaluated first.
case ir.OADD, ir.OSUB, ir.OMUL:
n := n.(*ir.BinaryExpr)
if thearch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
return true
}
return n.X.HasCall() || n.Y.HasCall()
case ir.ONEG:
n := n.(*ir.UnaryExpr)
if thearch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
if ssagen.Arch.SoftFloat && (types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) {
return true
}
return n.X.HasCall()
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
n := n.(*ir.BinaryExpr)
if thearch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) {
if ssagen.Arch.SoftFloat && (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()]) {
return true
}
return n.X.HasCall() || n.Y.HasCall()
case ir.OCONV:
n := n.(*ir.ConvExpr)
if thearch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) {
if ssagen.Arch.SoftFloat && ((types.IsFloat[n.Type().Kind()] || types.IsComplex[n.Type().Kind()]) || (types.IsFloat[n.X.Type().Kind()] || types.IsComplex[n.X.Type().Kind()])) {
return true
}
return n.X.HasCall()

View file

@ -9,6 +9,7 @@ import (
"cmd/compile/internal/escape"
"cmd/compile/internal/ir"
"cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
@ -977,7 +978,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
n.X = cheapexpr(n.X, init)
// byteindex widens n.Left so that the multiplication doesn't overflow.
index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n.X), ir.NewInt(3))
if thearch.LinkArch.ByteOrder == binary.BigEndian {
if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7))
}
xe := ir.NewIndexExpr(base.Pos, ir.Names.Staticuint64s, index)
@ -1675,7 +1676,7 @@ func walkexpr1(n ir.Node, init *ir.Nodes) ir.Node {
return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
if isStaticCompositeLiteral(n) && !canSSAType(n.Type()) {
if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) {
n := n.(*ir.CompLitExpr) // not OPTRLIT
// n can be directly represented in the read-only data section.
// Make direct reference to the static data. See issue 12841.
@ -1739,11 +1740,11 @@ func markUsedIfaceMethod(n *ir.CallExpr) {
//
// If no such function is necessary, it returns (Txxx, Txxx).
func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
if thearch.SoftFloat {
if ssagen.Arch.SoftFloat {
return types.Txxx, types.Txxx
}
switch thearch.LinkArch.Family {
switch ssagen.Arch.LinkArch.Family {
case sys.ARM, sys.MIPS:
if src.IsFloat() {
switch dst.Kind() {
@ -3229,7 +3230,7 @@ func walkcompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
unalignedLoad := canMergeLoads()
if unalignedLoad {
// Keep this low enough to generate less code than a function call.
maxcmpsize = 2 * int64(thearch.LinkArch.RegSize)
maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize)
}
switch t.Kind() {
@ -3469,8 +3470,8 @@ func walkcompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
combine64bit := false
if canCombineLoads {
// Keep this low enough to generate less code than a function call.
maxRewriteLen = 2 * thearch.LinkArch.RegSize
combine64bit = thearch.LinkArch.RegSize >= 8
maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize
combine64bit = ssagen.Arch.LinkArch.RegSize >= 8
}
var and ir.Op
@ -3909,12 +3910,12 @@ func wrapCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
// larger, possibly unaligned, load. Note that currently the
// optimizations must be able to handle little endian byte order.
func canMergeLoads() bool {
switch thearch.LinkArch.Family {
switch ssagen.Arch.LinkArch.Family {
case sys.ARM64, sys.AMD64, sys.I386, sys.S390X:
return true
case sys.PPC64:
// Load combining only supported on ppc64le.
return thearch.LinkArch.ByteOrder == binary.LittleEndian
return ssagen.Arch.LinkArch.ByteOrder == binary.LittleEndian
}
return false
}
@ -4032,3 +4033,7 @@ func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
}
init.Append(n)
}
// The max number of defers in a function using open-coded defers. We enforce this
// limit because the deferBits bitmask is currently a single byte (to minimize code size)
const maxOpenDefers = 8

View file

@ -5,13 +5,13 @@
package mips
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/mips"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &mips.Linkmips
if objabi.GOARCH == "mipsle" {
arch.LinkArch = &mips.Linkmipsle
@ -22,7 +22,7 @@ func Init(arch *gc.Arch) {
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -8,10 +8,10 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
@ -77,7 +77,7 @@ func storeByType(t *types.Type, r int16) obj.As {
panic("bad store type")
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpMIPSMOVWreg:
t := v.Type
@ -123,7 +123,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
r := v.Reg()
p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
@ -153,7 +153,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpMIPSADD,
ssa.OpMIPSSUB,
ssa.OpMIPSAND,
@ -288,10 +288,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
@ -312,7 +312,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPSMOVBstore,
@ -325,7 +325,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpMIPSMOVBstorezero,
ssa.OpMIPSMOVHstorezero,
ssa.OpMIPSMOVWstorezero:
@ -334,7 +334,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpMIPSMOVBreg,
ssa.OpMIPSMOVBUreg,
ssa.OpMIPSMOVHreg,
@ -492,13 +492,13 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.OpMIPSLoweredAtomicLoad8,
ssa.OpMIPSLoweredAtomicLoad32:
@ -762,7 +762,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
if logopt.Enabled() {
@ -793,7 +793,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpMIPSLoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpMIPSLoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVW)
@ -826,13 +826,13 @@ var blockJump = map[ssa.BlockKind]struct {
ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT},
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R1:
@ -843,11 +843,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,13 +5,13 @@
package mips64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/mips"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &mips.Linkmips64
if objabi.GOARCH == "mips64le" {
arch.LinkArch = &mips.Linkmips64le
@ -23,7 +23,7 @@ func Init(arch *gc.Arch) {
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = func(s *gc.SSAGenState, b *ssa.Block) {}
arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}

View file

@ -8,10 +8,10 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/mips"
@ -85,7 +85,7 @@ func storeByType(t *types.Type, r int16) obj.As {
panic("bad store type")
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy, ssa.OpMIPS64MOVVreg:
if v.Type.IsMemory() {
@ -126,7 +126,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
r := v.Reg()
p := s.Prog(loadByType(v.Type, r))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = r
if isHILO(r) {
@ -156,7 +156,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type, r))
p.From.Type = obj.TYPE_REG
p.From.Reg = r
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpMIPS64ADDV,
ssa.OpMIPS64SUBV,
ssa.OpMIPS64AND,
@ -262,10 +262,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVV $off(SP), R
wantreg = "SP"
@ -288,7 +288,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpMIPS64MOVBstore,
@ -302,7 +302,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpMIPS64MOVBstorezero,
ssa.OpMIPS64MOVHstorezero,
ssa.OpMIPS64MOVWstorezero,
@ -312,7 +312,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = mips.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpMIPS64MOVBreg,
ssa.OpMIPS64MOVBUreg,
ssa.OpMIPS64MOVHreg,
@ -502,7 +502,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
as := mips.AMOVV
@ -720,7 +720,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(mips.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = mips.REGTMP
if logopt.Enabled() {
@ -754,7 +754,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p2.To.SetTarget(p4)
case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpMIPS64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
p := s.Prog(mips.AMOVV)
@ -787,13 +787,13 @@ var blockJump = map[ssa.BlockKind]struct {
ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in R1:
@ -804,11 +804,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Reg = mips.REGZERO
p.Reg = mips.REG_R1
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,12 +5,12 @@
package ppc64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/ppc64"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &ppc64.Linkppc64
if objabi.GOARCH == "ppc64le" {
arch.LinkArch = &ppc64.Linkppc64le

View file

@ -6,10 +6,10 @@ package ppc64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
@ -19,7 +19,7 @@ import (
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
// flive := b.FlagsLiveAtEnd
// if b.Control != nil && b.Control.Type.IsFlags() {
// flive = true
@ -101,7 +101,7 @@ func storeByType(t *types.Type) obj.As {
panic("bad store type")
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpCopy:
t := v.Type
@ -469,7 +469,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpPPC64LoweredGetClosurePtr:
// Closure pointer is R11 (already)
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpPPC64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
@ -491,7 +491,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpLoadReg:
loadOp := loadByType(v.Type)
p := s.Prog(loadOp)
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -500,7 +500,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeOp)
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpPPC64DIVD:
// For now,
@ -758,7 +758,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
}
@ -819,7 +819,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
// Load go.string using 0 offset
@ -837,7 +837,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -871,7 +871,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = ppc64.REGZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
p := s.Prog(v.Op.Asm())
@ -879,7 +879,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
@ -1809,7 +1809,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpPPC64LoweredNilCheck:
@ -1847,7 +1847,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(ppc64.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = ppc64.REGTMP
}
@ -1893,7 +1893,7 @@ var blockJump = [...]struct {
ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockDefer:
// defer returns in R3:
@ -1907,18 +1907,18 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p = s.Prog(ppc64.ABNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,11 +5,11 @@
package riscv64
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/riscv"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &riscv.LinkRISCV64
arch.REGSP = riscv.REG_SP

View file

@ -6,9 +6,9 @@ package riscv64
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/riscv"
@ -180,9 +180,9 @@ func largestMove(alignment int64) (obj.As, int64) {
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
// RISC-V has no flags, so this is a no-op.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {}
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
s.SetPos(v.Pos)
switch v.Op {
@ -191,7 +191,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpArg:
// input args need no code
case ssa.OpPhi:
gc.CheckLoweredPhi(v)
ssagen.CheckLoweredPhi(v)
case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
if v.Type.IsMemory() {
return
@ -221,7 +221,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -232,7 +232,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
// nothing to do
case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
@ -323,10 +323,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
v.Fatalf("aux is of unknown type %T", v.Aux)
case *obj.LSym:
wantreg = "SB"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
wantreg = "SP"
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case nil:
// No sym, just MOVW $off(SP), R
wantreg = "SP"
@ -342,7 +342,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
@ -352,14 +352,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
p.From.Reg = riscv.REG_ZERO
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
@ -377,7 +377,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpRISCV64LoweredAtomicLoad8:
@ -585,7 +585,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(riscv.AMOVB)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = riscv.REG_ZERO
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
@ -594,7 +594,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
case ssa.OpRISCV64LoweredGetClosurePtr:
// Closure pointer is S4 (riscv.REG_CTXT).
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpRISCV64LoweredGetCallerSP:
// caller's SP is FixedFrameSize below the address of the first arg
@ -644,7 +644,7 @@ var blockBranch = [...]obj.As{
ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
s.SetPos(b.Pos)
switch b.Kind {
@ -657,17 +657,17 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.From.Type = obj.TYPE_REG
p.From.Reg = riscv.REG_ZERO
p.Reg = riscv.REG_A0
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -5,11 +5,11 @@
package s390x
import (
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/s390x"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &s390x.Links390x
arch.REGSP = s390x.REGSP
arch.MAXWIDTH = 1 << 50

View file

@ -8,16 +8,16 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/s390x"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
@ -135,7 +135,7 @@ func moveByType(t *types.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@ -148,7 +148,7 @@ func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
// dest := src(From) op off
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.Prog {
func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_CONST
p.From.Offset = off
@ -158,7 +158,7 @@ func opregregimm(s *gc.SSAGenState, op obj.As, dest, src int16, off int64) *obj.
return p
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpS390XSLD, ssa.OpS390XSLW,
ssa.OpS390XSRD, ssa.OpS390XSRW,
@ -395,14 +395,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_ADDR
p.From.Reg = r
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVDaddr:
p := s.Prog(s390x.AMOVD)
p.From.Type = obj.TYPE_ADDR
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
@ -448,7 +448,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = r
case ssa.OpS390XMOVDload,
@ -459,7 +459,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx,
@ -476,7 +476,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = r
p.From.Scale = 1
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
@ -487,7 +487,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx,
ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
@ -503,7 +503,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = r
p.To.Scale = 1
p.To.Index = i
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
@ -511,7 +511,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
ssa.OpS390XLDGR, ssa.OpS390XLGDR,
@ -530,7 +530,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.OpCopy:
if v.Type.IsMemory() {
return
@ -546,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpStoreReg:
@ -557,10 +557,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.OpS390XLoweredGetClosurePtr:
// Closure pointer is R12 (already)
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F:
// input is already rounded
case ssa.OpS390XLoweredGetG:
@ -593,7 +593,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
ssa.OpS390XNEG, ssa.OpS390XNEGW,
@ -637,7 +637,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(s390x.AMOVBZ)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = s390x.REGTMP
if logopt.Enabled() {
@ -672,7 +672,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.Reg = v.Args[len(v.Args)-2].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XLoweredMove:
// Inputs must be valid pointers to memory,
// so adjust arg0 and arg1 as part of the expansion.
@ -764,7 +764,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg0()
case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
@ -773,7 +773,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XLAN, ssa.OpS390XLAO:
// LA(N|O) Ry, TMP, 0(Rx)
op := s.Prog(v.Op.Asm())
@ -808,7 +808,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64:
// Convert the flags output of CS{,G} into a bool.
// CS{,G} arg1, arg2, arg0
@ -824,7 +824,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
cs.Reg = v.Args[2].Reg() // new
cs.To.Type = obj.TYPE_MEM
cs.To.Reg = v.Args[0].Reg()
gc.AddAux(&cs.To, v)
ssagen.AddAux(&cs.To, v)
// MOVD $0, ret
movd := s.Prog(s390x.AMOVD)
@ -859,7 +859,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
load.From.Reg = v.Args[0].Reg()
load.To.Type = obj.TYPE_REG
load.To.Reg = v.Reg0()
gc.AddAux(&load.From, v)
ssagen.AddAux(&load.From, v)
// CS{,G} ret, arg1, arg0
cs := s.Prog(v.Op.Asm())
@ -868,7 +868,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
cs.Reg = v.Args[1].Reg() // new
cs.To.Type = obj.TYPE_MEM
cs.To.Reg = v.Args[0].Reg()
gc.AddAux(&cs.To, v)
ssagen.AddAux(&cs.To, v)
// BNE cs
bne := s.Prog(s390x.ABNE)
@ -908,14 +908,14 @@ func blockAsm(b *ssa.Block) obj.As {
panic("unreachable")
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
// Handle generic blocks first.
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(s390x.ABR)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
return
case ssa.BlockDefer:

View file

@ -1,36 +1,18 @@
// Derived from Inferno utils/6c/txt.c
// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
//
// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
// Portions Copyright © 1997-1999 Vita Nuova Limited
// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
// Portions Copyright © 2004,2006 Bruce Ellis
// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
// Portions Copyright © 2009 The Go Authors. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
//go:generate go run mkbuiltin.go
package ssagen
import (
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"cmd/compile/internal/base"
"cmd/compile/internal/escape"
"cmd/compile/internal/ir"
@ -38,10 +20,211 @@ import (
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"fmt"
"os"
)
// useNewABIWrapGen returns TRUE if the compiler should generate an
// ABI wrapper for the function 'f'.
func useABIWrapGen(f *ir.Func) bool {
if !base.Flag.ABIWrap {
return false
}
// Support limit option for bisecting.
if base.Flag.ABIWrapLimit == 1 {
return false
}
if base.Flag.ABIWrapLimit < 1 {
return true
}
base.Flag.ABIWrapLimit--
if base.Debug.ABIWrap != 0 && base.Flag.ABIWrapLimit == 1 {
fmt.Fprintf(os.Stderr, "=-= limit reached after new wrapper for %s\n",
f.LSym.Name)
}
return true
}
// symabiDefs and symabiRefs record the defined and referenced ABIs of
// symbols required by non-Go code. These are keyed by link symbol
// name, where the local package prefix is always `"".`
var symabiDefs, symabiRefs map[string]obj.ABI
func CgoSymABIs() {
// The linker expects an ABI0 wrapper for all cgo-exported
// functions.
for _, prag := range typecheck.Target.CgoPragmas {
switch prag[0] {
case "cgo_export_static", "cgo_export_dynamic":
if symabiRefs == nil {
symabiRefs = make(map[string]obj.ABI)
}
symabiRefs[prag[1]] = obj.ABI0
}
}
}
// ReadSymABIs reads a symabis file that specifies definitions and
// references of text symbols by ABI.
//
// The symabis format is a set of lines, where each line is a sequence
// of whitespace-separated fields. The first field is a verb and is
// either "def" for defining a symbol ABI or "ref" for referencing a
// symbol using an ABI. For both "def" and "ref", the second field is
// the symbol name and the third field is the ABI name, as one of the
// named cmd/internal/obj.ABI constants.
func ReadSymABIs(file, myimportpath string) {
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("-symabis: %v", err)
}
symabiDefs = make(map[string]obj.ABI)
symabiRefs = make(map[string]obj.ABI)
localPrefix := ""
if myimportpath != "" {
// Symbols in this package may be written either as
// "".X or with the package's import path already in
// the symbol.
localPrefix = objabi.PathToPrefix(myimportpath) + "."
}
for lineNum, line := range strings.Split(string(data), "\n") {
lineNum++ // 1-based
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
parts := strings.Fields(line)
switch parts[0] {
case "def", "ref":
// Parse line.
if len(parts) != 3 {
log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
}
sym, abistr := parts[1], parts[2]
abi, valid := obj.ParseABI(abistr)
if !valid {
log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
}
// If the symbol is already prefixed with
// myimportpath, rewrite it to start with ""
// so it matches the compiler's internal
// symbol names.
if localPrefix != "" && strings.HasPrefix(sym, localPrefix) {
sym = `"".` + sym[len(localPrefix):]
}
// Record for later.
if parts[0] == "def" {
symabiDefs[sym] = abi
} else {
symabiRefs[sym] = abi
}
default:
log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
}
}
}
// InitLSym defines f's obj.LSym and initializes it based on the
// properties of f. This includes setting the symbol flags and ABI and
// creating and initializing related DWARF symbols.
//
// InitLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
// For body-less functions, we only create the LSym; for functions
// with bodies call a helper to setup up / populate the LSym.
func InitLSym(f *ir.Func, hasBody bool) {
// FIXME: for new-style ABI wrappers, we set up the lsym at the
// point the wrapper is created.
if f.LSym != nil && base.Flag.ABIWrap {
return
}
selectLSym(f, hasBody)
if hasBody {
setupTextLSym(f, 0)
}
}
// selectLSym sets up the LSym for a given function, and
// makes calls to helpers to create ABI wrappers if needed.
func selectLSym(f *ir.Func, hasBody bool) {
if f.LSym != nil {
base.Fatalf("Func.initLSym called twice")
}
if nam := f.Nname; !ir.IsBlank(nam) {
var wrapperABI obj.ABI
needABIWrapper := false
defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
f.LSym = nam.Sym().LinksymABI0()
needABIWrapper, wrapperABI = true, obj.ABIInternal
} else {
f.LSym = nam.Sym().Linksym()
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
if f.LSym.ABI() != want {
base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
}
}
if f.Pragma&ir.Systemstack != 0 {
f.LSym.Set(obj.AttrCFunc, true)
}
isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
// given a linkname, indicating that it may be
// referenced from another package. Create an
// ABI0 -> Internal wrapper so it can be
// called as ABI0. In case 2, it's important
// that we know it's defined in this package
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
if f.LSym.ABI() != obj.ABI0 {
needABIWrapper, wrapperABI = true, obj.ABI0
}
}
if needABIWrapper {
if !useABIWrapGen(f) {
// Fallback: use alias instead. FIXME.
// These LSyms have the same name as the
// native function, so we create them directly
// rather than looking them up. The uniqueness
// of f.lsym ensures uniqueness of asym.
asym := &obj.LSym{
Name: f.LSym.Name,
Type: objabi.SABIALIAS,
R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
}
asym.SetABI(wrapperABI)
asym.Set(obj.AttrDuplicateOK, true)
base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
} else {
if base.Debug.ABIWrap != 0 {
fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n",
wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name)
}
makeABIWrapper(f, wrapperABI)
}
}
}
}
// makeABIWrapper creates a new function that wraps a cross-ABI call
// to "f". The wrapper is marked as an ABIWRAPPER.
func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
@ -152,101 +335,6 @@ func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
ir.CurFunc = savedcurfn
}
// initLSym defines f's obj.LSym and initializes it based on the
// properties of f. This includes setting the symbol flags and ABI and
// creating and initializing related DWARF symbols.
//
// initLSym must be called exactly once per function and must be
// called for both functions with bodies and functions without bodies.
// For body-less functions, we only create the LSym; for functions
// with bodies call a helper to setup up / populate the LSym.
func initLSym(f *ir.Func, hasBody bool) {
// FIXME: for new-style ABI wrappers, we set up the lsym at the
// point the wrapper is created.
if f.LSym != nil && base.Flag.ABIWrap {
return
}
selectLSym(f, hasBody)
if hasBody {
setupTextLSym(f, 0)
}
}
// selectLSym sets up the LSym for a given function, and
// makes calls to helpers to create ABI wrappers if needed.
func selectLSym(f *ir.Func, hasBody bool) {
if f.LSym != nil {
base.Fatalf("Func.initLSym called twice")
}
if nam := f.Nname; !ir.IsBlank(nam) {
var wrapperABI obj.ABI
needABIWrapper := false
defABI, hasDefABI := symabiDefs[nam.Sym().LinksymName()]
if hasDefABI && defABI == obj.ABI0 {
// Symbol is defined as ABI0. Create an
// Internal -> ABI0 wrapper.
f.LSym = nam.Sym().LinksymABI0()
needABIWrapper, wrapperABI = true, obj.ABIInternal
} else {
f.LSym = nam.Sym().Linksym()
// No ABI override. Check that the symbol is
// using the expected ABI.
want := obj.ABIInternal
if f.LSym.ABI() != want {
base.Fatalf("function symbol %s has the wrong ABI %v, expected %v", f.LSym.Name, f.LSym.ABI(), want)
}
}
if f.Pragma&ir.Systemstack != 0 {
f.LSym.Set(obj.AttrCFunc, true)
}
isLinknameExported := nam.Sym().Linkname != "" && (hasBody || hasDefABI)
if abi, ok := symabiRefs[f.LSym.Name]; (ok && abi == obj.ABI0) || isLinknameExported {
// Either 1) this symbol is definitely
// referenced as ABI0 from this package; or 2)
// this symbol is defined in this package but
// given a linkname, indicating that it may be
// referenced from another package. Create an
// ABI0 -> Internal wrapper so it can be
// called as ABI0. In case 2, it's important
// that we know it's defined in this package
// since other packages may "pull" symbols
// using linkname and we don't want to create
// duplicate ABI wrappers.
if f.LSym.ABI() != obj.ABI0 {
needABIWrapper, wrapperABI = true, obj.ABI0
}
}
if needABIWrapper {
if !useABIWrapGen(f) {
// Fallback: use alias instead. FIXME.
// These LSyms have the same name as the
// native function, so we create them directly
// rather than looking them up. The uniqueness
// of f.lsym ensures uniqueness of asym.
asym := &obj.LSym{
Name: f.LSym.Name,
Type: objabi.SABIALIAS,
R: []obj.Reloc{{Sym: f.LSym}}, // 0 size, so "informational"
}
asym.SetABI(wrapperABI)
asym.Set(obj.AttrDuplicateOK, true)
base.Ctxt.ABIAliases = append(base.Ctxt.ABIAliases, asym)
} else {
if base.Debug.ABIWrap != 0 {
fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %s.%s\n",
wrapperABI, 1-wrapperABI, types.LocalPkg.Path, f.LSym.Name)
}
makeABIWrapper(f, wrapperABI)
}
}
}
}
// setupTextLsym initializes the LSym for a with-body text symbol.
func setupTextLSym(f *ir.Func, flag int) {
if f.Dupok() {

View file

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ssagen
import (
"cmd/compile/internal/objw"
@ -10,11 +10,11 @@ import (
"cmd/internal/obj"
)
var pragcgobuf [][]string
var Arch ArchInfo
// interface to back end
type Arch struct {
type ArchInfo struct {
LinkArch *obj.LinkArch
REGSP int
@ -31,22 +31,12 @@ type Arch struct {
Ginsnopdefer func(*objw.Progs) *obj.Prog // special ginsnop for deferreturn
// SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
SSAMarkMoves func(*SSAGenState, *ssa.Block)
SSAMarkMoves func(*State, *ssa.Block)
// SSAGenValue emits Prog(s) for the Value.
SSAGenValue func(*SSAGenState, *ssa.Value)
SSAGenValue func(*State, *ssa.Value)
// SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
// for all values in the block before SSAGenBlock.
SSAGenBlock func(s *SSAGenState, b, next *ssa.Block)
SSAGenBlock func(s *State, b, next *ssa.Block)
}
var thearch Arch
var (
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
)
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
var GCWriteBarrierReg map[int16]*obj.LSym

View file

@ -2,17 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ssagen
import (
"bytes"
"fmt"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/src"
"fmt"
)
func EnableNoWriteBarrierRecCheck() {

View file

@ -0,0 +1,279 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssagen
import (
"internal/race"
"math/rand"
"sort"
"sync"
"time"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
"cmd/internal/sys"
)
// cmpstackvarlt reports whether the stack variable a sorts before b.
//
// Sort the list of stack variables. Autos after anything else,
// within autos, unused after used, within used, things with
// pointers first, zeroed things first, and then decreasing size.
// Because autos are laid out in decreasing addresses
// on the stack, pointers first, zeroed things first and decreasing size
// really means, in memory, things with pointers needing zeroing at
// the top of the stack and increasing in size.
// Non-autos sort on offset.
func cmpstackvarlt(a, b *ir.Name) bool {
if (a.Class_ == ir.PAUTO) != (b.Class_ == ir.PAUTO) {
return b.Class_ == ir.PAUTO
}
if a.Class_ != ir.PAUTO {
return a.FrameOffset() < b.FrameOffset()
}
if a.Used() != b.Used() {
return a.Used()
}
ap := a.Type().HasPointers()
bp := b.Type().HasPointers()
if ap != bp {
return ap
}
ap = a.Needzero()
bp = b.Needzero()
if ap != bp {
return ap
}
if a.Type().Width != b.Type().Width {
return a.Type().Width > b.Type().Width
}
return a.Sym().Name < b.Sym().Name
}
// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
type byStackVar []*ir.Name
func (s byStackVar) Len() int { return len(s) }
func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s *ssafn) AllocFrame(f *ssa.Func) {
s.stksize = 0
s.stkptrsize = 0
fn := s.curfn
// Mark the PAUTO's unused.
for _, ln := range fn.Dcl {
if ln.Class_ == ir.PAUTO {
ln.SetUsed(false)
}
}
for _, l := range f.RegAlloc {
if ls, ok := l.(ssa.LocalSlot); ok {
ls.N.Name().SetUsed(true)
}
}
scratchUsed := false
for _, b := range f.Blocks {
for _, v := range b.Values {
if n, ok := v.Aux.(*ir.Name); ok {
switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT:
// Don't modify nodfp; it is a global.
if n != ir.RegFP {
n.Name().SetUsed(true)
}
case ir.PAUTO:
n.Name().SetUsed(true)
}
}
if !scratchUsed {
scratchUsed = v.Op.UsesScratch()
}
}
}
if f.Config.NeedsFpScratch && scratchUsed {
s.scratchFpMem = typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT64])
}
sort.Sort(byStackVar(fn.Dcl))
// Reassign stack offsets of the locals that are used.
lastHasPtr := false
for i, n := range fn.Dcl {
if n.Op() != ir.ONAME || n.Class_ != ir.PAUTO {
continue
}
if !n.Used() {
fn.Dcl = fn.Dcl[:i]
break
}
types.CalcSize(n.Type())
w := n.Type().Width
if w >= types.MaxWidth || w < 0 {
base.Fatalf("bad width")
}
if w == 0 && lastHasPtr {
// Pad between a pointer-containing object and a zero-sized object.
// This prevents a pointer to the zero-sized object from being interpreted
// as a pointer to the pointer-containing object (and causing it
// to be scanned when it shouldn't be). See issue 24993.
w = 1
}
s.stksize += w
s.stksize = types.Rnd(s.stksize, int64(n.Type().Align))
if n.Type().HasPointers() {
s.stkptrsize = s.stksize
lastHasPtr = true
} else {
lastHasPtr = false
}
if Arch.LinkArch.InFamily(sys.MIPS, sys.MIPS64, sys.ARM, sys.ARM64, sys.PPC64, sys.S390X) {
s.stksize = types.Rnd(s.stksize, int64(types.PtrSize))
}
n.SetFrameOffset(-s.stksize)
}
s.stksize = types.Rnd(s.stksize, int64(types.RegSize))
s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize))
}
const maxStackSize = 1 << 30
// Compile builds an SSA backend function,
// uses it to generate a plist,
// and flushes that plist to machine code.
// worker indicates which of the backend workers is doing the processing.
func Compile(fn *ir.Func, worker int) {
f := buildssa(fn, worker)
// Note: check arg size to fix issue 25507.
if f.Frontend().(*ssafn).stksize >= maxStackSize || fn.Type().ArgWidth() >= maxStackSize {
largeStackFramesMu.Lock()
largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: fn.Type().ArgWidth(), pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp := objw.NewProgs(fn, worker)
defer pp.Free()
genssa(f, pp)
// Check frame size again.
// The check above included only the space needed for local variables.
// After genssa, the space needed includes local variables and the callee arg region.
// We must do this check prior to calling pp.Flush.
// If there are any oversized stack frames,
// the assembler may emit inscrutable complaints about invalid instructions.
if pp.Text.To.Offset >= maxStackSize {
largeStackFramesMu.Lock()
locals := f.Frontend().(*ssafn).stksize
largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: fn.Type().ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
largeStackFramesMu.Unlock()
return
}
pp.Flush() // assemble, fill in boilerplate, etc.
// fieldtrack must be called after pp.Flush. See issue 20014.
fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
}
func init() {
if race.Enabled {
rand.Seed(time.Now().UnixNano())
}
}
// StackOffset returns the stack location of a LocalSlot relative to the
// stack pointer, suitable for use in a DWARF location entry. This has nothing
// to do with its offset in the user variable.
func StackOffset(slot ssa.LocalSlot) int32 {
n := slot.N
var off int64
switch n.Class_ {
case ir.PAUTO:
off = n.FrameOffset()
if base.Ctxt.FixedFrameSize() == 0 {
off -= int64(types.PtrSize)
}
if objabi.Framepointer_enabled || objabi.GOARCH == "arm64" {
// There is a word space for FP on ARM64 even if the frame pointer is disabled
off -= int64(types.PtrSize)
}
case ir.PPARAM, ir.PPARAMOUT:
off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
}
return int32(off + slot.Off)
}
// fieldtrack adds R_USEFIELD relocations to fnsym to record any
// struct fields that it used.
func fieldtrack(fnsym *obj.LSym, tracked map[*types.Sym]struct{}) {
if fnsym == nil {
return
}
if objabi.Fieldtrack_enabled == 0 || len(tracked) == 0 {
return
}
trackSyms := make([]*types.Sym, 0, len(tracked))
for sym := range tracked {
trackSyms = append(trackSyms, sym)
}
sort.Sort(symByName(trackSyms))
for _, sym := range trackSyms {
r := obj.Addrel(fnsym)
r.Sym = sym.Linksym()
r.Type = objabi.R_USEFIELD
}
}
type symByName []*types.Sym
func (a symByName) Len() int { return len(a) }
func (a symByName) Less(i, j int) bool { return a[i].Name < a[j].Name }
func (a symByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// largeStack is info about a function whose stack frame is too large (rare).
type largeStack struct {
locals int64
args int64
callee int64
pos src.XPos
}
var (
largeStackFramesMu sync.Mutex // protects largeStackFrames
largeStackFrames []largeStack
)
func CheckLargeStacks() {
// Check whether any of the functions we have compiled have gigantic stack frames.
sort.Slice(largeStackFrames, func(i, j int) bool {
return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
})
for _, large := range largeStackFrames {
if large.callee != 0 {
base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
} else {
base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
}
}
}

View file

@ -2,16 +2,17 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ssagen
import (
"reflect"
"sort"
"testing"
"cmd/compile/internal/ir"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
"reflect"
"sort"
"testing"
)
func typeWithoutPointers() *types.Type {

View file

@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ssagen
import (
"container/heap"
"fmt"
"cmd/compile/internal/ir"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/src"
"container/heap"
"fmt"
)
// This file contains the algorithm to place phi nodes in a function.
@ -23,13 +24,13 @@ const smallBlocks = 500
const debugPhi = false
// FwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
type FwdRefAux struct {
// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
type fwdRefAux struct {
_ [0]func() // ensure ir.Node isn't compared for equality
N ir.Node
}
func (FwdRefAux) CanBeAnSSAAux() {}
func (fwdRefAux) CanBeAnSSAAux() {}
// insertPhis finds all the places in the function where a phi is
// necessary and inserts them.
@ -87,7 +88,7 @@ func (s *phiState) insertPhis() {
if v.Op != ssa.OpFwdRef {
continue
}
var_ := v.Aux.(FwdRefAux).N
var_ := v.Aux.(fwdRefAux).N
// Optimization: look back 1 block for the definition.
if len(b.Preds) == 1 {
@ -334,7 +335,7 @@ func (s *phiState) resolveFwdRefs() {
if v.Op != ssa.OpFwdRef {
continue
}
n := s.varnum[v.Aux.(FwdRefAux).N]
n := s.varnum[v.Aux.(fwdRefAux).N]
v.Op = ssa.OpCopy
v.Aux = nil
v.AddArg(values[n])
@ -465,7 +466,7 @@ func (s *simplePhiState) insertPhis() {
continue
}
s.fwdrefs = append(s.fwdrefs, v)
var_ := v.Aux.(FwdRefAux).N
var_ := v.Aux.(fwdRefAux).N
if _, ok := s.defvars[b.ID][var_]; !ok {
s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
}
@ -479,7 +480,7 @@ loop:
v := s.fwdrefs[len(s.fwdrefs)-1]
s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
b := v.Block
var_ := v.Aux.(FwdRefAux).N
var_ := v.Aux.(fwdRefAux).N
if b == s.f.Entry {
// No variable should be live at entry.
s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
@ -546,7 +547,7 @@ func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.
}
}
// Generate a FwdRef for the variable and return that.
v := b.NewValue0A(line, ssa.OpFwdRef, t, FwdRefAux{N: var_})
v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_})
s.defvars[b.ID][var_] = v
if var_.Op() == ir.ONAME {
s.s.addNamedValue(var_.(*ir.Name), v)

View file

@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gc
package ssagen
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"go/constant"
@ -14,8 +16,6 @@ import (
"sort"
"strings"
"bufio"
"bytes"
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
@ -41,20 +41,16 @@ var ssaDumpStdout bool // whether to dump to stdout
var ssaDumpCFG string // generate CFGs for these phases
const ssaDumpFile = "ssa.html"
// The max number of defers in a function using open-coded defers. We enforce this
// limit because the deferBits bitmask is currently a single byte (to minimize code size)
const maxOpenDefers = 8
// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
var ssaDumpInlined []*ir.Func
func ssaDumpInline(fn *ir.Func) {
func DumpInline(fn *ir.Func) {
if ssaDump != "" && ssaDump == ir.FuncName(fn) {
ssaDumpInlined = append(ssaDumpInlined, fn)
}
}
func initSSAEnv() {
func InitEnv() {
ssaDump = os.Getenv("GOSSAFUNC")
ssaDir = os.Getenv("GOSSADIR")
if ssaDump != "" {
@ -70,10 +66,10 @@ func initSSAEnv() {
}
}
func initssaconfig() {
func InitConfig() {
types_ := ssa.NewTypes()
if thearch.SoftFloat {
if Arch.SoftFloat {
softfloatInit()
}
@ -91,7 +87,7 @@ func initssaconfig() {
_ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
ssaConfig.SoftFloat = Arch.SoftFloat
ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
@ -148,7 +144,7 @@ func initssaconfig() {
}
}
if thearch.LinkArch.Family == sys.Wasm {
if Arch.LinkArch.Family == sys.Wasm {
BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
@ -183,7 +179,7 @@ func initssaconfig() {
BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
}
if thearch.LinkArch.PtrSize == 4 {
if Arch.LinkArch.PtrSize == 4 {
ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
@ -1215,7 +1211,7 @@ func (s *state) stmt(n ir.Node) {
n := n.(*ir.AssignListStmt)
res, resok := s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
deref := false
if !canSSAType(n.Rhs[0].Type()) {
if !TypeOK(n.Rhs[0].Type()) {
if res.Op != ssa.OpLoad {
s.Fatalf("dottype of non-load")
}
@ -1351,7 +1347,7 @@ func (s *state) stmt(n ir.Node) {
}
var r *ssa.Value
deref := !canSSAType(t)
deref := !TypeOK(t)
if deref {
if rhs == nil {
r = nil // Signal assign to use OpZero.
@ -2133,7 +2129,7 @@ func (s *state) expr(n ir.Node) *ssa.Value {
return s.load(n.Type(), addr)
case ir.ONAMEOFFSET:
n := n.(*ir.NameOffsetExpr)
if s.canSSAName(n.Name_) && canSSAType(n.Type()) {
if s.canSSAName(n.Name_) && TypeOK(n.Type()) {
return s.variable(n, n.Type())
}
addr := s.addr(n)
@ -2352,18 +2348,18 @@ func (s *state) expr(n ir.Node) *ssa.Value {
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS && !s.softFloat {
if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if thearch.LinkArch.Family == sys.ARM64 || thearch.LinkArch.Family == sys.Wasm || thearch.LinkArch.Family == sys.S390X || s.softFloat {
if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
conv = conv1
}
}
if thearch.LinkArch.Family == sys.MIPS && !s.softFloat {
if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
if tt.Size() == 4 {
@ -2713,7 +2709,7 @@ func (s *state) expr(n ir.Node) *ssa.Value {
addr := s.constOffPtrSP(types.NewPtr(n.Type()), n.Offset)
return s.rawLoad(n.Type(), addr)
}
if canSSAType(n.Type()) {
if TypeOK(n.Type()) {
return s.newValue1I(ssa.OpSelectN, n.Type(), which, s.prevCall)
} else {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(n.Type()), which, s.prevCall)
@ -2779,7 +2775,7 @@ func (s *state) expr(n ir.Node) *ssa.Value {
p := s.addr(n)
return s.load(n.X.Type().Elem(), p)
case n.X.Type().IsArray():
if canSSAType(n.X.Type()) {
if TypeOK(n.X.Type()) {
// SSA can handle arrays of length at most 1.
bound := n.X.Type().NumElem()
a := s.expr(n.X)
@ -3055,7 +3051,7 @@ func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
}
args := make([]argRec, 0, nargs)
for _, n := range n.Args[1:] {
if canSSAType(n.Type()) {
if TypeOK(n.Type()) {
args = append(args, argRec{v: s.expr(n), store: true})
} else {
v := s.addr(n)
@ -3418,7 +3414,7 @@ type intrinsicKey struct {
fn string
}
func initSSATables() {
func InitTables() {
intrinsics = map[intrinsicKey]intrinsicBuilder{}
var all []*sys.Arch
@ -4297,7 +4293,7 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder {
}
// Skip intrinsifying math functions (which may contain hard-float
// instructions) when soft-float
if thearch.SoftFloat && pkg == "math" {
if Arch.SoftFloat && pkg == "math" {
return nil
}
@ -4309,10 +4305,10 @@ func findIntrinsic(sym *types.Sym) intrinsicBuilder {
return nil
}
}
return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
}
func isIntrinsicCall(n *ir.CallExpr) bool {
func IsIntrinsicCall(n *ir.CallExpr) bool {
if n == nil {
return false
}
@ -4427,7 +4423,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
}
for _, argn := range n.Rargs {
var v *ssa.Value
if canSSAType(argn.Type()) {
if TypeOK(argn.Type()) {
v = s.openDeferSave(nil, argn.Type(), s.expr(argn))
} else {
v = s.openDeferSave(argn, argn.Type(), nil)
@ -4456,7 +4452,7 @@ func (s *state) openDeferRecord(n *ir.CallExpr) {
// evaluated (via s.addr() below) to get the value that is to be stored. The
// function returns an SSA value representing a pointer to the autotmp location.
func (s *state) openDeferSave(n ir.Node, t *types.Type, val *ssa.Value) *ssa.Value {
canSSA := canSSAType(t)
canSSA := TypeOK(t)
var pos src.XPos
if canSSA {
pos = val.Pos
@ -4570,7 +4566,7 @@ func (s *state) openDeferExit() {
ACArgs = append(ACArgs, ssa.Param{Type: f.Type, Offset: int32(argStart + f.Offset)})
if testLateExpansion {
var a *ssa.Value
if !canSSAType(f.Type) {
if !TypeOK(f.Type) {
a = s.newValue2(ssa.OpDereference, f.Type, argAddrVal, s.mem())
} else {
a = s.load(f.Type, argAddrVal)
@ -4578,7 +4574,7 @@ func (s *state) openDeferExit() {
callArgs = append(callArgs, a)
} else {
addr := s.constOffPtrSP(pt, argStart+f.Offset)
if !canSSAType(f.Type) {
if !TypeOK(f.Type) {
s.move(f.Type, addr, argAddrVal)
} else {
argVal := s.load(f.Type, argAddrVal)
@ -4946,7 +4942,7 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
// maybeNilCheckClosure checks if a nil check of a closure is needed in some
// architecture-dependent situations and, if so, emits the nil check.
func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
if thearch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
if Arch.LinkArch.Family == sys.Wasm || objabi.GOOS == "aix" && k != callGo {
// On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
// TODO(neelance): On other architectures this should be eliminated by the optimization steps
s.nilCheck(closure)
@ -5139,7 +5135,7 @@ func (s *state) canSSA(n ir.Node) bool {
if n.Op() != ir.ONAME {
return false
}
return s.canSSAName(n.(*ir.Name)) && canSSAType(n.Type())
return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
}
func (s *state) canSSAName(name *ir.Name) bool {
@ -5181,7 +5177,7 @@ func (s *state) canSSAName(name *ir.Name) bool {
}
// canSSA reports whether variables of type t are SSA-able.
func canSSAType(t *types.Type) bool {
func TypeOK(t *types.Type) bool {
types.CalcSize(t)
if t.Width > int64(4*types.PtrSize) {
// 4*Widthptr is an arbitrary constant. We want it
@ -5195,7 +5191,7 @@ func canSSAType(t *types.Type) bool {
// not supported on SSA variables.
// TODO: allow if all indexes are constant.
if t.NumElem() <= 1 {
return canSSAType(t.Elem())
return TypeOK(t.Elem())
}
return false
case types.TSTRUCT:
@ -5203,7 +5199,7 @@ func canSSAType(t *types.Type) bool {
return false
}
for _, t1 := range t.Fields().Slice() {
if !canSSAType(t1.Type) {
if !TypeOK(t1.Type) {
return false
}
}
@ -5307,7 +5303,7 @@ func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bo
b.AddEdgeTo(bPanic)
s.startBlock(bPanic)
if thearch.LinkArch.Family == sys.Wasm {
if Arch.LinkArch.Family == sys.Wasm {
// TODO(khr): figure out how to do "register" based calling convention for bounds checks.
// Should be similar to gcWriteBarrier, but I can't make it work.
s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
@ -5435,7 +5431,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
if testLateExpansion {
for i, t := range results {
off = types.Rnd(off, t.Alignment())
if canSSAType(t) {
if TypeOK(t) {
res[i] = s.newValue1I(ssa.OpSelectN, t, int64(i), call)
} else {
addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), int64(i), call)
@ -5575,7 +5571,7 @@ func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
func (s *state) putArg(n ir.Node, t *types.Type, off int64, forLateExpandedCall bool) (ssa.Param, *ssa.Value) {
var a *ssa.Value
if forLateExpandedCall {
if !canSSAType(t) {
if !TypeOK(t) {
a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
} else {
a = s.expr(n)
@ -5596,7 +5592,7 @@ func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off
addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
}
if !canSSAType(t) {
if !TypeOK(t) {
a := s.addr(n)
s.move(t, addr, a)
return
@ -6146,7 +6142,7 @@ func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Val
var tmp ir.Node // temporary for use with large types
var addr *ssa.Value // address of tmp
if commaok && !canSSAType(n.Type()) {
if commaok && !TypeOK(n.Type()) {
// unSSAable type, use temporary.
// TODO: get rid of some of these temporaries.
tmp = typecheck.TempAt(n.Pos(), s.curfn, n.Type())
@ -6250,7 +6246,7 @@ func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
}
// Make a FwdRef, which records a value that's live on block input.
// We'll find the matching definition as part of insertPhis.
v = s.newValue0A(ssa.OpFwdRef, t, FwdRefAux{N: n})
v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
s.fwdVars[n] = v
if n.Op() == ir.ONAME {
s.addNamedValue(n.(*ir.Name), v)
@ -6300,8 +6296,8 @@ type Branch struct {
B *ssa.Block // target
}
// SSAGenState contains state needed during Prog generation.
type SSAGenState struct {
// State contains state needed during Prog generation.
type State struct {
pp *objw.Progs
// Branches remembers all the branch instructions we've seen
@ -6330,7 +6326,7 @@ type SSAGenState struct {
}
// Prog appends a new Prog.
func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
func (s *State) Prog(as obj.As) *obj.Prog {
p := s.pp.Prog(as)
if ssa.LosesStmtMark(as) {
return p
@ -6347,19 +6343,19 @@ func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
}
// Pc returns the current Prog.
func (s *SSAGenState) Pc() *obj.Prog {
func (s *State) Pc() *obj.Prog {
return s.pp.Next
}
// SetPos sets the current source position.
func (s *SSAGenState) SetPos(pos src.XPos) {
func (s *State) SetPos(pos src.XPos) {
s.pp.Pos = pos
}
// Br emits a single branch instruction and returns the instruction.
// Not all architectures need the returned instruction, but otherwise
// the boilerplate is common to all.
func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog {
func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
p := s.Prog(op)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, Branch{P: p, B: target})
@ -6371,7 +6367,7 @@ func (s *SSAGenState) Br(op obj.As, target *ssa.Block) *obj.Prog {
// Spill/fill/copy instructions from the register allocator,
// phi functions, and instructions with a no-pos position
// are examples of instructions that can cause churn.
func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
switch v.Op {
case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
// These are not statements
@ -6447,7 +6443,7 @@ func emitStackObjects(e *ssafn, pp *objw.Progs) {
// genssa appends entries to pp for each instruction in f.
func genssa(f *ssa.Func, pp *objw.Progs) {
var s SSAGenState
var s State
e := f.Frontend().(*ssafn)
@ -6525,7 +6521,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
// Emit values in block
thearch.SSAMarkMoves(&s, b)
Arch.SSAMarkMoves(&s, b)
for _, v := range b.Values {
x := s.pp.Next
s.DebugFriendlySetPosFrom(v)
@ -6552,7 +6548,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
}
case ssa.OpInlMark:
p := thearch.Ginsnop(s.pp)
p := Arch.Ginsnop(s.pp)
if inlMarks == nil {
inlMarks = map[*obj.Prog]int32{}
inlMarksByPos = map[src.XPos][]*obj.Prog{}
@ -6573,7 +6569,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
firstPos = src.NoXPos
}
// let the backend handle it
thearch.SSAGenValue(&s, v)
Arch.SSAGenValue(&s, v)
}
if base.Ctxt.Flag_locationlists {
@ -6588,7 +6584,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
}
// If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
p := thearch.Ginsnop(s.pp)
p := Arch.Ginsnop(s.pp)
p.Pos = p.Pos.WithIsStmt()
if b.Pos == src.NoXPos {
b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
@ -6609,7 +6605,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
}
x := s.pp.Next
s.SetPos(b.Pos)
thearch.SSAGenBlock(&s, b, next)
Arch.SSAGenBlock(&s, b, next)
if f.PrintOrHtmlSSA {
for ; x != s.pp.Next; x = x.Link {
progToBlock[x] = b
@ -6621,7 +6617,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// still be inside the function in question. So if
// it ends in a call which doesn't return, add a
// nop (which will never execute) after the call.
thearch.Ginsnop(pp)
Arch.Ginsnop(pp)
}
if openDeferInfo != nil {
// When doing open-coded defers, generate a disconnected call to
@ -6636,7 +6632,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
// going to emit anyway, and use those instructions instead of the
// inline marks.
for p := pp.Text; p != nil; p = p.Link {
if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || thearch.LinkArch.Family == sys.Wasm {
if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
// Don't use 0-sized instructions as inline marks, because we need
// to identify inline mark instructions by pc offset.
// (Some of these instructions are sometimes zero-sized, sometimes not.
@ -6677,7 +6673,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
}
if base.Ctxt.Flag_locationlists {
debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, stackOffset)
debugInfo := ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
e.curfn.DebugInfo = debugInfo
bstart := s.bstart
// Note that at this moment, Prog.Pc is a sequence number; it's
@ -6766,12 +6762,12 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
f.HTMLWriter = nil
}
func defframe(s *SSAGenState, e *ssafn) {
func defframe(s *State, e *ssafn) {
pp := s.pp
frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
if thearch.PadFrame != nil {
frame = thearch.PadFrame(frame)
if Arch.PadFrame != nil {
frame = Arch.PadFrame(frame)
}
// Fill in argument and frame size.
@ -6808,7 +6804,7 @@ func defframe(s *SSAGenState, e *ssafn) {
}
// Zero old range
p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
// Set new range.
lo = n.FrameOffset()
@ -6816,7 +6812,7 @@ func defframe(s *SSAGenState, e *ssafn) {
}
// Zero final range.
thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
}
// For generating consecutive jump instructions to model a specific branching
@ -6825,14 +6821,14 @@ type IndexJump struct {
Index int
}
func (s *SSAGenState) oneJump(b *ssa.Block, jump *IndexJump) {
func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
p.Pos = b.Pos
}
// CombJump generates combinational instructions (2 at present) for a block jump,
// thereby the behaviour of non-standard condition codes could be simulated
func (s *SSAGenState) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
switch next {
case b.Succs[0].Block():
s.oneJump(b, &jumps[0][0])
@ -7019,7 +7015,7 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) {
n, off := ssa.AutoVar(v)
a.Type = obj.TYPE_MEM
a.Sym = n.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
a.Reg = int16(Arch.REGSP)
a.Offset = n.FrameOffset() + off
if n.Class_ == ir.PPARAM || n.Class_ == ir.PPARAMOUT {
a.Name = obj.NAME_PARAM
@ -7028,20 +7024,20 @@ func AddrAuto(a *obj.Addr, v *ssa.Value) {
}
}
func (s *SSAGenState) AddrScratch(a *obj.Addr) {
func (s *State) AddrScratch(a *obj.Addr) {
if s.ScratchFpMem == nil {
panic("no scratch memory available; forgot to declare usesScratch for Op?")
}
a.Type = obj.TYPE_MEM
a.Name = obj.NAME_AUTO
a.Sym = s.ScratchFpMem.Sym().Linksym()
a.Reg = int16(thearch.REGSP)
a.Reg = int16(Arch.REGSP)
a.Offset = s.ScratchFpMem.Offset_
}
// Call returns a new CALL instruction for the SSA value v.
// It uses PrepareCall to prepare the call.
func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
func (s *State) Call(v *ssa.Value) *obj.Prog {
pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
s.PrepareCall(v)
@ -7057,7 +7053,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
p.To.Sym = sym.Fn
} else {
// TODO(mdempsky): Can these differences be eliminated?
switch thearch.LinkArch.Family {
switch Arch.LinkArch.Family {
case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
p.To.Type = obj.TYPE_REG
case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
@ -7073,7 +7069,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
// It must be called immediately before emitting the actual CALL instruction,
// since it emits PCDATA for the stack map at the call (calls are safe points).
func (s *SSAGenState) PrepareCall(v *ssa.Value) {
func (s *State) PrepareCall(v *ssa.Value) {
idx := s.livenessMap.Get(v)
if !idx.StackMapValid() {
// See Liveness.hasStackMap.
@ -7093,7 +7089,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) {
// insert an actual hardware NOP that will have the right line number.
// This is different from obj.ANOP, which is a virtual no-op
// that doesn't make it into the instruction stream.
thearch.Ginsnopdefer(s.pp)
Arch.Ginsnopdefer(s.pp)
}
if ok {
@ -7111,7 +7107,7 @@ func (s *SSAGenState) PrepareCall(v *ssa.Value) {
// UseArgs records the fact that an instruction needs a certain amount of
// callee args space for its use.
func (s *SSAGenState) UseArgs(n int64) {
func (s *State) UseArgs(n int64) {
if s.maxarg < n {
s.maxarg = n
}
@ -7223,7 +7219,7 @@ func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
} else {
t = types.Types[types.TUINT32]
}
if thearch.LinkArch.ByteOrder == binary.BigEndian {
if Arch.LinkArch.ByteOrder == binary.BigEndian {
return e.SplitSlot(&name, ".hi", 0, t), e.SplitSlot(&name, ".lo", t.Size(), types.Types[types.TUINT32])
}
return e.SplitSlot(&name, ".hi", t.Size(), t), e.SplitSlot(&name, ".lo", 0, types.Types[types.TUINT32])
@ -7274,7 +7270,7 @@ func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t
}
func (e *ssafn) CanSSA(t *types.Type) bool {
return canSSAType(t)
return TypeOK(t)
}
func (e *ssafn) Line(pos src.XPos) string {
@ -7453,3 +7449,11 @@ func deferstruct(stksize int64) *types.Type {
types.CalcStructSize(s)
return s
}
var (
BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
)
// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
var GCWriteBarrierReg map[int16]*obj.LSym

View file

@ -6,18 +6,18 @@ package wasm
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/objw"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/wasm"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &wasm.Linkwasm
arch.REGSP = wasm.REG_SP
arch.MAXWIDTH = 1 << 50
@ -52,10 +52,10 @@ func ginsnop(pp *objw.Progs) *obj.Prog {
return pp.Prog(wasm.ANop)
}
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if next != b.Succs[0].Block() {
@ -121,7 +121,7 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
}
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
s.PrepareCall(v)
@ -188,7 +188,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
getReg(s, wasm.REG_SP)
getValue64(s, v.Args[0])
p := s.Prog(storeOp(v.Type))
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
default:
if v.Type.IsMemory() {
@ -208,7 +208,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
}
func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
switch v.Op {
case ssa.OpWasmLoweredGetClosurePtr:
getReg(s, wasm.REG_CTXT)
@ -243,10 +243,10 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
p.From.Type = obj.TYPE_ADDR
switch v.Aux.(type) {
case *obj.LSym:
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
case *ir.Name:
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
default:
panic("wasm: bad LoweredAddr")
}
@ -363,7 +363,7 @@ func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
case ssa.OpLoadReg:
p := s.Prog(loadOp(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
case ssa.OpCopy:
getValue64(s, v.Args[0])
@ -385,7 +385,7 @@ func isCmp(v *ssa.Value) bool {
}
}
func getValue32(s *gc.SSAGenState, v *ssa.Value) {
func getValue32(s *ssagen.State, v *ssa.Value) {
if v.OnWasmStack {
s.OnWasmStackSkipped--
ssaGenValueOnStack(s, v, false)
@ -402,7 +402,7 @@ func getValue32(s *gc.SSAGenState, v *ssa.Value) {
}
}
func getValue64(s *gc.SSAGenState, v *ssa.Value) {
func getValue64(s *ssagen.State, v *ssa.Value) {
if v.OnWasmStack {
s.OnWasmStackSkipped--
ssaGenValueOnStack(s, v, true)
@ -416,32 +416,32 @@ func getValue64(s *gc.SSAGenState, v *ssa.Value) {
}
}
func i32Const(s *gc.SSAGenState, val int32) {
func i32Const(s *ssagen.State, val int32) {
p := s.Prog(wasm.AI32Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
}
func i64Const(s *gc.SSAGenState, val int64) {
func i64Const(s *ssagen.State, val int64) {
p := s.Prog(wasm.AI64Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
}
func f32Const(s *gc.SSAGenState, val float64) {
func f32Const(s *ssagen.State, val float64) {
p := s.Prog(wasm.AF32Const)
p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
}
func f64Const(s *gc.SSAGenState, val float64) {
func f64Const(s *ssagen.State, val float64) {
p := s.Prog(wasm.AF64Const)
p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
}
func getReg(s *gc.SSAGenState, reg int16) {
func getReg(s *ssagen.State, reg int16) {
p := s.Prog(wasm.AGet)
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
func setReg(s *gc.SSAGenState, reg int16) {
func setReg(s *ssagen.State, reg int16) {
p := s.Prog(wasm.ASet)
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}

View file

@ -6,14 +6,14 @@ package x86
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ssagen"
"cmd/internal/obj/x86"
"cmd/internal/objabi"
"fmt"
"os"
)
func Init(arch *gc.Arch) {
func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &x86.Link386
arch.REGSP = x86.REGSP
arch.SSAGenValue = ssaGenValue

View file

@ -9,17 +9,17 @@ import (
"math"
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/x86"
)
// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
flive := b.FlagsLiveAtEnd
for _, c := range b.ControlValues() {
flive = c.Type.IsFlags() || flive
@ -109,7 +109,7 @@ func moveByType(t *types.Type) obj.As {
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
p := s.Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
@ -118,7 +118,7 @@ func opregreg(s *gc.SSAGenState, op obj.As, dest, src int16) *obj.Prog {
return p
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
func ssaGenValue(s *ssagen.State, v *ssa.Value) {
switch v.Op {
case ssa.Op386ADDL:
r := v.Reg()
@ -406,14 +406,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Type = obj.TYPE_MEM
p.From.Reg = r
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386LEAL:
p := s.Prog(x86.ALEAL)
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
@ -439,7 +439,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Args[1].Reg()
case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload:
@ -447,7 +447,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux2(&p.From, v, sc.Off())
ssagen.AddAux2(&p.From, v, sc.Off())
p.To.Type = obj.TYPE_CONST
p.To.Offset = sc.Val()
case ssa.Op386MOVLconst:
@ -499,7 +499,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1,
@ -523,7 +523,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p.From.Reg = r
p.From.Index = i
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4,
@ -533,7 +533,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.From.Index = v.Args[2].Reg()
p.From.Scale = 4
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@ -546,7 +546,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[1].Reg()
gc.AddAux(&p.From, v)
ssagen.AddAux(&p.From, v)
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
if v.Reg() != v.Args[0].Reg() {
@ -559,7 +559,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = v.Args[1].Reg()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.Op386ADDLconstmodify:
sc := v.AuxValAndOff()
val := sc.Val()
@ -573,7 +573,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
off := sc.Off()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
@ -586,7 +586,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = val
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1,
ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2,
ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
@ -612,7 +612,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
}
p.To.Reg = r
p.To.Index = i
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_CONST
@ -620,7 +620,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = sc.Val()
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.Op386ADDLconstmodifyidx4:
sc := v.AuxValAndOff()
val := sc.Val()
@ -636,7 +636,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Reg = v.Args[0].Reg()
p.To.Scale = 4
p.To.Index = v.Args[1].Reg()
gc.AddAux2(&p.To, v, off)
ssagen.AddAux2(&p.To, v, off)
break
}
fallthrough
@ -663,7 +663,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.To.Type = obj.TYPE_MEM
p.To.Reg = r
p.To.Index = i
gc.AddAux2(&p.To, v, sc.Off())
ssagen.AddAux2(&p.To, v, sc.Off())
case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX,
ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
@ -695,7 +695,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
return
}
p := s.Prog(loadByType(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
ssagen.AddrAuto(&p.From, v.Args[0])
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
@ -707,10 +707,10 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(storeByType(v.Type))
p.From.Type = obj.TYPE_REG
p.From.Reg = v.Args[0].Reg()
gc.AddrAuto(&p.To, v)
ssagen.AddrAuto(&p.To, v)
case ssa.Op386LoweredGetClosurePtr:
// Closure pointer is DX.
gc.CheckLoweredGetClosurePtr(v)
ssagen.CheckLoweredGetClosurePtr(v)
case ssa.Op386LoweredGetG:
r := v.Reg()
// See the comments in cmd/internal/obj/x86/obj6.go
@ -766,14 +766,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
s.UseArgs(8) // space used in callee args area by assembly stubs
case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC:
p := s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.ExtendCheckFunc[v.AuxInt]
p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
s.UseArgs(12) // space used in callee args area by assembly stubs
case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
@ -848,7 +848,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Reg = x86.REG_AX
p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
@ -861,7 +861,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
p.From.Offset = 0xdeaddead
p.To.Type = obj.TYPE_MEM
p.To.Reg = x86.REG_SP
gc.AddAux(&p.To, v)
ssagen.AddAux(&p.To, v)
default:
v.Fatalf("genValue not implemented: %s", v.LongString())
}
@ -886,22 +886,22 @@ var blockJump = [...]struct {
ssa.Block386NAN: {x86.AJPS, x86.AJPC},
}
var eqfJumps = [2][2]gc.IndexJump{
var eqfJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
}
var nefJumps = [2][2]gc.IndexJump{
var nefJumps = [2][2]ssagen.IndexJump{
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
{{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockDefer:
// defer returns in rax:
@ -914,11 +914,11 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
p.To.Reg = x86.REG_AX
p = s.Prog(x86.AJNE)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
if b.Succs[0].Block() != next {
p := s.Prog(obj.AJMP)
p.To.Type = obj.TYPE_BRANCH
s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
}
case ssa.BlockExit:
case ssa.BlockRet:

View file

@ -15,6 +15,7 @@ import (
"cmd/compile/internal/ppc64"
"cmd/compile/internal/riscv64"
"cmd/compile/internal/s390x"
"cmd/compile/internal/ssagen"
"cmd/compile/internal/wasm"
"cmd/compile/internal/x86"
"cmd/internal/objabi"
@ -23,7 +24,7 @@ import (
"os"
)
var archInits = map[string]func(*gc.Arch){
var archInits = map[string]func(*ssagen.ArchInfo){
"386": x86.Init,
"amd64": amd64.Init,
"arm": arm.Init,