go/src/cmd/compile/internal/wasm/ssa.go
Russ Cox 65c4c6dfb2 [dev.regabi] cmd/compile: group known symbols, packages, names [generated]
There are a handful of pre-computed magic symbols known by
package gc, and we need a place to store them.

If we keep them together, the need for type *ir.Name means that
package ir is the lowest package in the import hierarchy that they
can go in. And package ir needs gopkg for methodSymSuffix
(in a later CL), so they can't go any higher either, at least not all together.
So package ir it is.

Rather than dump them all into the top-level package ir
namespace, however, we introduce global structs, Syms, Pkgs, and Names,
and make the known symbols, packages, and names fields of those.

[git-generate]
cd src/cmd/compile/internal/gc

rf '
	add go.go:$ \
		// Names holds known names. \
		var Names struct{} \
		\
		// Syms holds known symbols. \
		var Syms struct {} \
		\
		// Pkgs holds known packages. \
		var Pkgs struct {} \

	mv staticuint64s Names.Staticuint64s
	mv zerobase Names.Zerobase

	mv assertE2I Syms.AssertE2I
	mv assertE2I2 Syms.AssertE2I2
	mv assertI2I Syms.AssertI2I
	mv assertI2I2 Syms.AssertI2I2
	mv deferproc Syms.Deferproc
	mv deferprocStack Syms.DeferprocStack
	mv Deferreturn Syms.Deferreturn
	mv Duffcopy Syms.Duffcopy
	mv Duffzero Syms.Duffzero
	mv gcWriteBarrier Syms.GCWriteBarrier
	mv goschedguarded Syms.Goschedguarded
	mv growslice Syms.Growslice
	mv msanread Syms.Msanread
	mv msanwrite Syms.Msanwrite
	mv msanmove Syms.Msanmove
	mv newobject Syms.Newobject
	mv newproc Syms.Newproc
	mv panicdivide Syms.Panicdivide
	mv panicshift Syms.Panicshift
	mv panicdottypeE Syms.PanicdottypeE
	mv panicdottypeI Syms.PanicdottypeI
	mv panicnildottype Syms.Panicnildottype
	mv panicoverflow Syms.Panicoverflow
	mv raceread Syms.Raceread
	mv racereadrange Syms.Racereadrange
	mv racewrite Syms.Racewrite
	mv racewriterange Syms.Racewriterange
	mv SigPanic Syms.SigPanic
	mv typedmemclr Syms.Typedmemclr
	mv typedmemmove Syms.Typedmemmove
	mv Udiv Syms.Udiv
	mv writeBarrier Syms.WriteBarrier
	mv zerobaseSym Syms.Zerobase
	mv arm64HasATOMICS Syms.ARM64HasATOMICS
	mv armHasVFPv4 Syms.ARMHasVFPv4
	mv x86HasFMA Syms.X86HasFMA
	mv x86HasPOPCNT Syms.X86HasPOPCNT
	mv x86HasSSE41 Syms.X86HasSSE41
	mv WasmDiv Syms.WasmDiv
	mv WasmMove Syms.WasmMove
	mv WasmZero Syms.WasmZero
	mv WasmTruncS Syms.WasmTruncS
	mv WasmTruncU Syms.WasmTruncU

	mv gopkg Pkgs.Go
	mv itabpkg Pkgs.Itab
	mv itablinkpkg Pkgs.Itablink
	mv mappkg Pkgs.Map
	mv msanpkg Pkgs.Msan
	mv racepkg Pkgs.Race
	mv Runtimepkg Pkgs.Runtime
	mv trackpkg Pkgs.Track
	mv unsafepkg Pkgs.Unsafe

	mv Names Syms Pkgs symtab.go
	mv symtab.go cmd/compile/internal/ir
'

Change-Id: Ic143862148569a3bcde8e70b26d75421aa2d00f3
Reviewed-on: https://go-review.googlesource.com/c/go/+/279235
Trust: Russ Cox <rsc@golang.org>
Run-TryBot: Russ Cox <rsc@golang.org>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Matthew Dempsky <mdempsky@google.com>
2020-12-23 06:38:07 +00:00

507 lines
13 KiB
Go

// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wasm
import (
"cmd/compile/internal/base"
"cmd/compile/internal/gc"
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/ssa"
"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/wasm"
"cmd/internal/objabi"
)
func Init(arch *gc.Arch) {
arch.LinkArch = &wasm.Linkwasm
arch.REGSP = wasm.REG_SP
arch.MAXWIDTH = 1 << 50
arch.ZeroRange = zeroRange
arch.Ginsnop = ginsnop
arch.Ginsnopdefer = ginsnop
arch.SSAMarkMoves = ssaMarkMoves
arch.SSAGenValue = ssaGenValue
arch.SSAGenBlock = ssaGenBlock
}
func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
if cnt == 0 {
return p
}
if cnt%8 != 0 {
base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
}
for i := int64(0); i < cnt; i += 8 {
p = pp.Appendpp(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
p = pp.Appendpp(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
p = pp.Appendpp(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
}
return p
}
func ginsnop(pp *gc.Progs) *obj.Prog {
return pp.Prog(wasm.ANop)
}
func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
}
func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
switch b.Kind {
case ssa.BlockPlain:
if next != b.Succs[0].Block() {
s.Br(obj.AJMP, b.Succs[0].Block())
}
case ssa.BlockIf:
switch next {
case b.Succs[0].Block():
// if false, jump to b.Succs[1]
getValue32(s, b.Controls[0])
s.Prog(wasm.AI32Eqz)
s.Prog(wasm.AIf)
s.Br(obj.AJMP, b.Succs[1].Block())
s.Prog(wasm.AEnd)
case b.Succs[1].Block():
// if true, jump to b.Succs[0]
getValue32(s, b.Controls[0])
s.Prog(wasm.AIf)
s.Br(obj.AJMP, b.Succs[0].Block())
s.Prog(wasm.AEnd)
default:
// if true, jump to b.Succs[0], else jump to b.Succs[1]
getValue32(s, b.Controls[0])
s.Prog(wasm.AIf)
s.Br(obj.AJMP, b.Succs[0].Block())
s.Prog(wasm.AEnd)
s.Br(obj.AJMP, b.Succs[1].Block())
}
case ssa.BlockRet:
s.Prog(obj.ARET)
case ssa.BlockRetJmp:
p := s.Prog(obj.ARET)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
p.To.Sym = b.Aux.(*obj.LSym)
case ssa.BlockExit:
case ssa.BlockDefer:
p := s.Prog(wasm.AGet)
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0}
s.Prog(wasm.AI64Eqz)
s.Prog(wasm.AI32Eqz)
s.Prog(wasm.AIf)
s.Br(obj.AJMP, b.Succs[1].Block())
s.Prog(wasm.AEnd)
if next != b.Succs[0].Block() {
s.Br(obj.AJMP, b.Succs[0].Block())
}
default:
panic("unexpected block")
}
// Entry point for the next block. Used by the JMP in goToBlock.
s.Prog(wasm.ARESUMEPOINT)
if s.OnWasmStackSkipped != 0 {
panic("wasm: bad stack")
}
}
func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
switch v.Op {
case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall:
s.PrepareCall(v)
if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
// add a resume point before call to deferreturn so it can be called again via jmpdefer
s.Prog(wasm.ARESUMEPOINT)
}
if v.Op == ssa.OpWasmLoweredClosureCall {
getValue64(s, v.Args[1])
setReg(s, wasm.REG_CTXT)
}
if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil {
sym := call.Fn
p := s.Prog(obj.ACALL)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
p.Pos = v.Pos
} else {
getValue64(s, v.Args[0])
p := s.Prog(obj.ACALL)
p.To = obj.Addr{Type: obj.TYPE_NONE}
p.Pos = v.Pos
}
case ssa.OpWasmLoweredMove:
getValue32(s, v.Args[0])
getValue32(s, v.Args[1])
i32Const(s, int32(v.AuxInt))
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmMove}
case ssa.OpWasmLoweredZero:
getValue32(s, v.Args[0])
i32Const(s, int32(v.AuxInt))
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmZero}
case ssa.OpWasmLoweredNilCheck:
getValue64(s, v.Args[0])
s.Prog(wasm.AI64Eqz)
s.Prog(wasm.AIf)
p := s.Prog(wasm.ACALLNORESUME)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic}
s.Prog(wasm.AEnd)
if logopt.Enabled() {
logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
}
if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
base.WarnfAt(v.Pos, "generated nil check")
}
case ssa.OpWasmLoweredWB:
getValue64(s, v.Args[0])
getValue64(s, v.Args[1])
p := s.Prog(wasm.ACALLNORESUME) // TODO(neelance): If possible, turn this into a simple wasm.ACall).
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: v.Aux.(*obj.LSym)}
case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store:
getValue32(s, v.Args[0])
getValue64(s, v.Args[1])
p := s.Prog(v.Op.Asm())
p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
case ssa.OpStoreReg:
getReg(s, wasm.REG_SP)
getValue64(s, v.Args[0])
p := s.Prog(storeOp(v.Type))
gc.AddrAuto(&p.To, v)
default:
if v.Type.IsMemory() {
return
}
if v.OnWasmStack {
s.OnWasmStackSkipped++
// If a Value is marked OnWasmStack, we don't generate the value and store it to a register now.
// Instead, we delay the generation to when the value is used and then directly generate it on the WebAssembly stack.
return
}
ssaGenValueOnStack(s, v, true)
if s.OnWasmStackSkipped != 0 {
panic("wasm: bad stack")
}
setReg(s, v.Reg())
}
}
func ssaGenValueOnStack(s *gc.SSAGenState, v *ssa.Value, extend bool) {
switch v.Op {
case ssa.OpWasmLoweredGetClosurePtr:
getReg(s, wasm.REG_CTXT)
case ssa.OpWasmLoweredGetCallerPC:
p := s.Prog(wasm.AI64Load)
// Caller PC is stored 8 bytes below first parameter.
p.From = obj.Addr{
Type: obj.TYPE_MEM,
Name: obj.NAME_PARAM,
Offset: -8,
}
case ssa.OpWasmLoweredGetCallerSP:
p := s.Prog(wasm.AGet)
// Caller SP is the address of the first parameter.
p.From = obj.Addr{
Type: obj.TYPE_ADDR,
Name: obj.NAME_PARAM,
Reg: wasm.REG_SP,
Offset: 0,
}
case ssa.OpWasmLoweredAddr:
if v.Aux == nil { // address of off(SP), no symbol
getValue64(s, v.Args[0])
i64Const(s, v.AuxInt)
s.Prog(wasm.AI64Add)
break
}
p := s.Prog(wasm.AGet)
p.From.Type = obj.TYPE_ADDR
switch v.Aux.(type) {
case *obj.LSym:
gc.AddAux(&p.From, v)
case *ir.Name:
p.From.Reg = v.Args[0].Reg()
gc.AddAux(&p.From, v)
default:
panic("wasm: bad LoweredAddr")
}
case ssa.OpWasmLoweredConvert:
getValue64(s, v.Args[0])
case ssa.OpWasmSelect:
getValue64(s, v.Args[0])
getValue64(s, v.Args[1])
getValue32(s, v.Args[2])
s.Prog(v.Op.Asm())
case ssa.OpWasmI64AddConst:
getValue64(s, v.Args[0])
i64Const(s, v.AuxInt)
s.Prog(v.Op.Asm())
case ssa.OpWasmI64Const:
i64Const(s, v.AuxInt)
case ssa.OpWasmF32Const:
f32Const(s, v.AuxFloat())
case ssa.OpWasmF64Const:
f64Const(s, v.AuxFloat())
case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load:
getValue32(s, v.Args[0])
p := s.Prog(v.Op.Asm())
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
case ssa.OpWasmI64Eqz:
getValue64(s, v.Args[0])
s.Prog(v.Op.Asm())
if extend {
s.Prog(wasm.AI64ExtendI32U)
}
case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
getValue64(s, v.Args[0])
getValue64(s, v.Args[1])
s.Prog(v.Op.Asm())
if extend {
s.Prog(wasm.AI64ExtendI32U)
}
case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmI64Rotl,
ssa.OpWasmF32Add, ssa.OpWasmF32Sub, ssa.OpWasmF32Mul, ssa.OpWasmF32Div, ssa.OpWasmF32Copysign,
ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div, ssa.OpWasmF64Copysign:
getValue64(s, v.Args[0])
getValue64(s, v.Args[1])
s.Prog(v.Op.Asm())
case ssa.OpWasmI32Rotl:
getValue32(s, v.Args[0])
getValue32(s, v.Args[1])
s.Prog(wasm.AI32Rotl)
s.Prog(wasm.AI64ExtendI32U)
case ssa.OpWasmI64DivS:
getValue64(s, v.Args[0])
getValue64(s, v.Args[1])
if v.Type.Size() == 8 {
// Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv}
break
}
s.Prog(wasm.AI64DivS)
case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S:
getValue64(s, v.Args[0])
if objabi.GOWASM.SatConv {
s.Prog(v.Op.Asm())
} else {
if v.Op == ssa.OpWasmI64TruncSatF32S {
s.Prog(wasm.AF64PromoteF32)
}
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS}
}
case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
getValue64(s, v.Args[0])
if objabi.GOWASM.SatConv {
s.Prog(v.Op.Asm())
} else {
if v.Op == ssa.OpWasmI64TruncSatF32U {
s.Prog(wasm.AF64PromoteF32)
}
p := s.Prog(wasm.ACall)
p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU}
}
case ssa.OpWasmF32DemoteF64:
getValue64(s, v.Args[0])
s.Prog(v.Op.Asm())
case ssa.OpWasmF64PromoteF32:
getValue64(s, v.Args[0])
s.Prog(v.Op.Asm())
case ssa.OpWasmF32ConvertI64S, ssa.OpWasmF32ConvertI64U,
ssa.OpWasmF64ConvertI64S, ssa.OpWasmF64ConvertI64U,
ssa.OpWasmI64Extend8S, ssa.OpWasmI64Extend16S, ssa.OpWasmI64Extend32S,
ssa.OpWasmF32Neg, ssa.OpWasmF32Sqrt, ssa.OpWasmF32Trunc, ssa.OpWasmF32Ceil, ssa.OpWasmF32Floor, ssa.OpWasmF32Nearest, ssa.OpWasmF32Abs,
ssa.OpWasmF64Neg, ssa.OpWasmF64Sqrt, ssa.OpWasmF64Trunc, ssa.OpWasmF64Ceil, ssa.OpWasmF64Floor, ssa.OpWasmF64Nearest, ssa.OpWasmF64Abs,
ssa.OpWasmI64Ctz, ssa.OpWasmI64Clz, ssa.OpWasmI64Popcnt:
getValue64(s, v.Args[0])
s.Prog(v.Op.Asm())
case ssa.OpLoadReg:
p := s.Prog(loadOp(v.Type))
gc.AddrAuto(&p.From, v.Args[0])
case ssa.OpCopy:
getValue64(s, v.Args[0])
default:
v.Fatalf("unexpected op: %s", v.Op)
}
}
func isCmp(v *ssa.Value) bool {
switch v.Op {
case ssa.OpWasmI64Eqz, ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
return true
default:
return false
}
}
func getValue32(s *gc.SSAGenState, v *ssa.Value) {
if v.OnWasmStack {
s.OnWasmStackSkipped--
ssaGenValueOnStack(s, v, false)
if !isCmp(v) {
s.Prog(wasm.AI32WrapI64)
}
return
}
reg := v.Reg()
getReg(s, reg)
if reg != wasm.REG_SP {
s.Prog(wasm.AI32WrapI64)
}
}
func getValue64(s *gc.SSAGenState, v *ssa.Value) {
if v.OnWasmStack {
s.OnWasmStackSkipped--
ssaGenValueOnStack(s, v, true)
return
}
reg := v.Reg()
getReg(s, reg)
if reg == wasm.REG_SP {
s.Prog(wasm.AI64ExtendI32U)
}
}
func i32Const(s *gc.SSAGenState, val int32) {
p := s.Prog(wasm.AI32Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
}
func i64Const(s *gc.SSAGenState, val int64) {
p := s.Prog(wasm.AI64Const)
p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
}
func f32Const(s *gc.SSAGenState, val float64) {
p := s.Prog(wasm.AF32Const)
p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
}
func f64Const(s *gc.SSAGenState, val float64) {
p := s.Prog(wasm.AF64Const)
p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
}
func getReg(s *gc.SSAGenState, reg int16) {
p := s.Prog(wasm.AGet)
p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
func setReg(s *gc.SSAGenState, reg int16) {
p := s.Prog(wasm.ASet)
p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
}
func loadOp(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
return wasm.AF32Load
case 8:
return wasm.AF64Load
default:
panic("bad load type")
}
}
switch t.Size() {
case 1:
if t.IsSigned() {
return wasm.AI64Load8S
}
return wasm.AI64Load8U
case 2:
if t.IsSigned() {
return wasm.AI64Load16S
}
return wasm.AI64Load16U
case 4:
if t.IsSigned() {
return wasm.AI64Load32S
}
return wasm.AI64Load32U
case 8:
return wasm.AI64Load
default:
panic("bad load type")
}
}
func storeOp(t *types.Type) obj.As {
if t.IsFloat() {
switch t.Size() {
case 4:
return wasm.AF32Store
case 8:
return wasm.AF64Store
default:
panic("bad store type")
}
}
switch t.Size() {
case 1:
return wasm.AI64Store8
case 2:
return wasm.AI64Store16
case 4:
return wasm.AI64Store32
case 8:
return wasm.AI64Store
default:
panic("bad store type")
}
}