2020-07-09 15:47:26 -04:00
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
2020-07-27 16:46:35 -04:00
import (
2021-02-01 13:26:47 -05:00
"cmd/compile/internal/abi"
"cmd/compile/internal/ir"
2020-07-27 16:46:35 -04:00
"cmd/compile/internal/types"
"cmd/internal/src"
"fmt"
"sort"
)
2020-07-09 15:47:26 -04:00
2020-08-17 16:57:22 -04:00
type selKey struct {
from * Value
offset int64
size int64
2020-10-13 19:24:04 -04:00
typ * types . Type
2020-08-17 16:57:22 -04:00
}
type offsetKey struct {
from * Value
offset int64
pt * types . Type
}
2021-02-01 13:26:47 -05:00
type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1.
2020-12-29 22:44:30 -05:00
func isBlockMultiValueExit ( b * Block ) bool {
return ( b . Kind == BlockRet || b . Kind == BlockRetJmp ) && len ( b . Controls ) > 0 && b . Controls [ 0 ] . Op == OpMakeResult
}
2021-01-26 19:33:34 -05:00
// removeTrivialWrapperTypes unwraps layers of
// struct { singleField SomeType } and [1]SomeType
// until a non-wrapper type is reached. This is useful
// for working with assignments to/from interface data
// fields (either second operand to OpIMake or OpIData)
// where the wrapping or type conversion can be elided
// because of type conversions/assertions in source code
// that do not appear in SSA.
func removeTrivialWrapperTypes ( t * types . Type ) * types . Type {
for {
if t . IsStruct ( ) && t . NumFields ( ) == 1 {
t = t . Field ( 0 ) . Type
continue
}
if t . IsArray ( ) && t . NumElem ( ) == 1 {
t = t . Elem ( )
continue
}
break
2020-10-13 19:24:04 -04:00
}
2021-01-26 19:33:34 -05:00
return t
}
2020-10-13 19:24:04 -04:00
2021-02-01 13:26:47 -05:00
// A registerCursor tracks which register is used for an Arg or regValues, or a piece of such.
type registerCursor struct {
// TODO(register args) convert this to a generalized target cursor.
regsLen int // the number of registers available for this Arg/result (which is all in registers or not at all)
nextSlice Abi1RO // the next register/register-slice offset
config * abi . ABIConfig
regValues * [ ] * Value // values assigned to registers accumulate here
}
// next effectively post-increments the register cursor; the receiver is advanced,
// the old value is returned.
func ( c * registerCursor ) next ( t * types . Type ) registerCursor {
rc := * c
if int ( c . nextSlice ) < c . regsLen {
w := c . config . NumParamRegs ( t )
c . nextSlice += Abi1RO ( w )
}
return rc
}
// plus returns a register cursor offset from the original, without modifying the original.
func ( c * registerCursor ) plus ( regWidth Abi1RO ) registerCursor {
rc := * c
rc . nextSlice += regWidth
return rc
}
const (
// Register offsets for fields of built-in aggregate types; the ones not listed are zero.
RO_complex_imag = 1
RO_string_len = 1
RO_slice_len = 1
RO_slice_cap = 2
RO_iface_data = 1
)
func ( x * expandState ) regWidth ( t * types . Type ) Abi1RO {
return Abi1RO ( x . abi1 . NumParamRegs ( t ) )
}
// regOffset returns the register offset of the i'th element of type t
func ( x * expandState ) regOffset ( t * types . Type , i int ) Abi1RO {
// TODO maybe cache this in a map if profiling recommends.
if i == 0 {
return 0
}
if t . IsArray ( ) {
return Abi1RO ( i ) * x . regWidth ( t . Elem ( ) )
}
if t . IsStruct ( ) {
k := Abi1RO ( 0 )
for j := 0 ; j < i ; j ++ {
k += x . regWidth ( t . FieldType ( j ) )
}
return k
}
panic ( "Haven't implemented this case yet, do I need to?" )
}
// at returns the register cursor for component i of t, where the first
// component is numbered 0.
func ( c * registerCursor ) at ( t * types . Type , i int ) registerCursor {
rc := * c
if i == 0 || c . regsLen == 0 {
return rc
}
if t . IsArray ( ) {
w := c . config . NumParamRegs ( t . Elem ( ) )
rc . nextSlice += Abi1RO ( i * w )
return rc
}
if t . IsStruct ( ) {
for j := 0 ; j < i ; j ++ {
rc . next ( t . FieldType ( j ) )
}
return rc
}
panic ( "Haven't implemented this case yet, do I need to?" )
}
func ( c * registerCursor ) init ( regs [ ] abi . RegIndex , info * abi . ABIParamResultInfo , result * [ ] * Value ) {
c . regsLen = len ( regs )
c . nextSlice = 0
if len ( regs ) == 0 {
return
}
c . config = info . Config ( )
c . regValues = result
}
func ( c * registerCursor ) addArg ( v * Value ) {
* c . regValues = append ( * c . regValues , v )
}
func ( c * registerCursor ) hasRegs ( ) bool {
return c . regsLen > 0
}
2021-01-26 19:33:34 -05:00
type expandState struct {
f * Func
2021-02-01 13:26:47 -05:00
abi1 * abi . ABIConfig
2021-01-26 19:33:34 -05:00
debug bool
canSSAType func ( * types . Type ) bool
regSize int64
sp * Value
typs * Types
ptrSize int64
hiOffset int64
lowOffset int64
2021-02-01 13:26:47 -05:00
hiRo Abi1RO
loRo Abi1RO
2021-01-26 19:33:34 -05:00
namedSelects map [ * Value ] [ ] namedVal
sdom SparseTree
common map [ selKey ] * Value
offsets map [ offsetKey ] * Value
}
2020-07-27 16:46:35 -04:00
2021-01-26 19:33:34 -05:00
// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
// that has no 64-bit integer registers.
func ( x * expandState ) intPairTypes ( et types . Kind ) ( tHi , tLo * types . Type ) {
tHi = x . typs . UInt32
if et == types . TINT64 {
tHi = x . typs . Int32
2020-07-27 16:46:35 -04:00
}
2021-01-26 19:33:34 -05:00
tLo = x . typs . UInt32
return
}
2020-08-07 22:46:43 -04:00
2021-01-26 19:33:34 -05:00
// isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
// that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
// so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
// integer on 32-bit).
func ( x * expandState ) isAlreadyExpandedAggregateType ( t * types . Type ) bool {
if ! x . canSSAType ( t ) {
return false
2020-07-27 16:46:35 -04:00
}
2021-01-26 19:33:34 -05:00
return t . IsStruct ( ) || t . IsArray ( ) || t . IsComplex ( ) || t . IsInterface ( ) || t . IsString ( ) || t . IsSlice ( ) ||
t . Size ( ) > x . regSize && t . IsInteger ( )
}
2020-07-27 16:46:35 -04:00
2021-01-26 19:33:34 -05:00
// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
// TODO should also optimize offsets from SB?
func ( x * expandState ) offsetFrom ( from * Value , offset int64 , pt * types . Type ) * Value {
if offset == 0 && from . Type == pt { // this is not actually likely
return from
2020-08-17 16:57:22 -04:00
}
2021-01-26 19:33:34 -05:00
// Simplify, canonicalize
for from . Op == OpOffPtr {
offset += from . AuxInt
from = from . Args [ 0 ]
}
if from == x . sp {
return x . f . ConstOffPtrSP ( pt , offset , x . sp )
}
key := offsetKey { from , offset , pt }
v := x . offsets [ key ]
if v != nil {
2020-08-17 16:57:22 -04:00
return v
}
2021-01-26 19:33:34 -05:00
v = from . Block . NewValue1I ( from . Pos . WithNotStmt ( ) , OpOffPtr , pt , offset , from )
x . offsets [ key ] = v
return v
}
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
// splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
func ( x * expandState ) splitSlots ( ls [ ] LocalSlot , sfx string , offset int64 , ty * types . Type ) [ ] LocalSlot {
var locs [ ] LocalSlot
for i := range ls {
locs = append ( locs , x . f . fe . SplitSlot ( & ls [ i ] , sfx , offset , ty ) )
2020-07-27 16:46:35 -04:00
}
2021-01-26 19:33:34 -05:00
return locs
}
2020-07-27 16:46:35 -04:00
2021-02-01 13:26:47 -05:00
// prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg.
func ( x * expandState ) prAssignForArg ( v * Value ) abi . ABIParamAssignment {
name := v . Aux . ( * ir . Name )
fPri := x . f . OwnAux . abiInfo
for _ , a := range fPri . InParams ( ) {
if a . Name == name {
return a
}
}
panic ( fmt . Errorf ( "Did not match param %v in prInfo %+v" , name , fPri . InParams ( ) ) )
}
2021-01-26 19:33:34 -05:00
// Calls that need lowering have some number of inputs, including a memory input,
// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
// With the current ABI those inputs need to be converted into stores to memory,
// rethreading the call's memory input to the first, and the new call now receiving the last.
// With the current ABI, the outputs need to be converted to loads, which will all use the call's
// memory output as their input.
// rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
// through a chain of Struct/Array/builtin Select operations. If the chain of selectors does not
// end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
// The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
// accumulates the offset.
// It emits the code necessary to implement the leaf select operation that leads to the root.
//
// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
2021-02-01 13:26:47 -05:00
func ( x * expandState ) rewriteSelect ( leaf * Value , selector * Value , offset int64 , regOffset Abi1RO ) [ ] LocalSlot {
2021-01-26 19:33:34 -05:00
if x . debug {
fmt . Printf ( "rewriteSelect(%s, %s, %d)\n" , leaf . LongString ( ) , selector . LongString ( ) , offset )
2020-10-02 14:53:48 -04:00
}
2021-01-26 19:33:34 -05:00
var locs [ ] LocalSlot
leafType := leaf . Type
if len ( selector . Args ) > 0 {
w := selector . Args [ 0 ]
if w . Op == OpCopy {
for w . Op == OpCopy {
w = w . Args [ 0 ]
2020-10-13 19:24:04 -04:00
}
2021-01-26 19:33:34 -05:00
selector . SetArg ( 0 , w )
2020-10-13 19:24:04 -04:00
}
2021-01-26 19:33:34 -05:00
}
switch selector . Op {
case OpArg :
2021-02-01 13:26:47 -05:00
paramAssignment := x . prAssignForArg ( selector )
_ = paramAssignment
// TODO(register args)
2021-01-26 19:33:34 -05:00
if ! x . isAlreadyExpandedAggregateType ( selector . Type ) {
if leafType == selector . Type { // OpIData leads us here, sometimes.
leaf . copyOf ( selector )
2021-02-01 13:26:47 -05:00
2020-10-13 19:24:04 -04:00
} else {
2021-01-26 19:33:34 -05:00
x . f . Fatalf ( "Unexpected OpArg type, selector=%s, leaf=%s\n" , selector . LongString ( ) , leaf . LongString ( ) )
2020-10-13 19:24:04 -04:00
}
2021-01-26 19:33:34 -05:00
if x . debug {
fmt . Printf ( "\tOpArg, break\n" )
2020-08-17 16:57:22 -04:00
}
2021-01-26 19:33:34 -05:00
break
}
switch leaf . Op {
case OpIData , OpStructSelect , OpArraySelect :
leafType = removeTrivialWrapperTypes ( leaf . Type )
}
aux := selector . Aux
auxInt := selector . AuxInt + offset
if leaf . Block == selector . Block {
leaf . reset ( OpArg )
leaf . Aux = aux
leaf . AuxInt = auxInt
leaf . Type = leafType
} else {
w := selector . Block . NewValue0IA ( leaf . Pos , OpArg , leafType , auxInt , aux )
leaf . copyOf ( w )
if x . debug {
fmt . Printf ( "\tnew %s\n" , w . LongString ( ) )
2020-10-13 19:24:04 -04:00
}
2021-01-26 19:33:34 -05:00
}
for _ , s := range x . namedSelects [ selector ] {
locs = append ( locs , x . f . Names [ s . locIndex ] )
}
2020-10-13 19:24:04 -04:00
2021-01-26 19:33:34 -05:00
case OpLoad : // We end up here because of IData of immediate structures.
// Failure case:
// (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
// the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
//
// GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
// cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
// b2: ← b1
// v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
// v21 (142) = SelectN <mem> [1] v20
// v22 (142) = SelectN <interface {}> [0] v20
// b15: ← b8
// v71 (+143) = IData <Nodes> v22 (v[Nodes])
// v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
//
// translates (w/o the "case OpLoad:" above) to:
//
// b2: ← b1
// v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
// v23 (142) = Load <*uintptr> v19 v20
// v823 (142) = IsNonNil <bool> v23
// v67 (+143) = Load <*[]*Node> v880 v20
// b15: ← b8
// v827 (146) = StructSelect <*[]*Node> [0] v67
// v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
// v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
// i.e., the struct select is generated and remains in because it is not applied to an actual structure.
// The OpLoad was created to load the single field of the IData
// This case removes that StructSelect.
if leafType != selector . Type {
x . f . Fatalf ( "Unexpected Load as selector, leaf=%s, selector=%s\n" , leaf . LongString ( ) , selector . LongString ( ) )
}
leaf . copyOf ( selector )
for _ , s := range x . namedSelects [ selector ] {
locs = append ( locs , x . f . Names [ s . locIndex ] )
}
case OpSelectN :
2021-02-01 13:26:47 -05:00
// TODO(register args) result case
// if applied to Op-mumble-call, the Aux tells us which result, regOffset specifies offset within result. If a register, should rewrite to OpSelectN for new call.
2021-01-26 19:33:34 -05:00
// TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
call := selector . Args [ 0 ]
aux := call . Aux . ( * AuxCall )
which := selector . AuxInt
if which == aux . NResults ( ) { // mem is after the results.
// rewrite v as a Copy of call -- the replacement call will produce a mem.
leaf . copyOf ( call )
} else {
leafType := removeTrivialWrapperTypes ( leaf . Type )
if x . canSSAType ( leafType ) {
pt := types . NewPtr ( leafType )
off := x . offsetFrom ( x . sp , offset + aux . OffsetOfResult ( which ) , pt )
// Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
if leaf . Block == call . Block {
leaf . reset ( OpLoad )
leaf . SetArgs2 ( off , call )
leaf . Type = leafType
2020-07-27 16:46:35 -04:00
} else {
2021-01-26 19:33:34 -05:00
w := call . Block . NewValue2 ( leaf . Pos , OpLoad , leafType , off , call )
leaf . copyOf ( w )
if x . debug {
fmt . Printf ( "\tnew %s\n" , w . LongString ( ) )
}
}
for _ , s := range x . namedSelects [ selector ] {
locs = append ( locs , x . f . Names [ s . locIndex ] )
2020-07-09 15:47:26 -04:00
}
2021-01-26 19:33:34 -05:00
} else {
x . f . Fatalf ( "Should not have non-SSA-able OpSelectN, selector=%s" , selector . LongString ( ) )
2020-07-27 16:46:35 -04:00
}
2021-01-26 19:33:34 -05:00
}
2020-10-13 19:24:04 -04:00
2021-01-26 19:33:34 -05:00
case OpStructSelect :
w := selector . Args [ 0 ]
var ls [ ] LocalSlot
if w . Type . Kind ( ) != types . TSTRUCT { // IData artifact
2021-02-01 13:26:47 -05:00
ls = x . rewriteSelect ( leaf , w , offset , regOffset )
2021-01-26 19:33:34 -05:00
} else {
2021-02-01 13:26:47 -05:00
fldi := int ( selector . AuxInt )
ls = x . rewriteSelect ( leaf , w , offset + w . Type . FieldOff ( fldi ) , regOffset + x . regOffset ( w . Type , fldi ) )
2021-01-26 19:33:34 -05:00
if w . Op != OpIData {
for _ , l := range ls {
locs = append ( locs , x . f . fe . SplitStruct ( l , int ( selector . AuxInt ) ) )
2020-08-17 16:57:22 -04:00
}
2020-07-27 16:46:35 -04:00
}
2021-01-26 19:33:34 -05:00
}
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpArraySelect :
w := selector . Args [ 0 ]
2021-02-01 13:26:47 -05:00
index := selector . AuxInt
x . rewriteSelect ( leaf , w , offset + selector . Type . Size ( ) * index , regOffset + x . regOffset ( w . Type , int ( index ) ) )
2021-01-26 19:33:34 -05:00
case OpInt64Hi :
w := selector . Args [ 0 ]
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , w , offset + x . hiOffset , regOffset + x . hiRo )
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".hi" , x . hiOffset , leafType )
case OpInt64Lo :
w := selector . Args [ 0 ]
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , w , offset + x . lowOffset , regOffset + x . loRo )
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".lo" , x . lowOffset , leafType )
case OpStringPtr :
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , selector . Args [ 0 ] , offset , regOffset )
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".ptr" , 0 , x . typs . BytePtr )
case OpSlicePtr :
w := selector . Args [ 0 ]
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , w , offset , regOffset )
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".ptr" , 0 , types . NewPtr ( w . Type . Elem ( ) ) )
case OpITab :
w := selector . Args [ 0 ]
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , w , offset , regOffset )
2021-01-26 19:33:34 -05:00
sfx := ".itab"
if w . Type . IsEmptyInterface ( ) {
sfx = ".type"
}
locs = x . splitSlots ( ls , sfx , 0 , x . typs . Uintptr )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpComplexReal :
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , selector . Args [ 0 ] , offset , regOffset )
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".real" , 0 , leafType )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpComplexImag :
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , selector . Args [ 0 ] , offset + leafType . Width , regOffset + RO_complex_imag ) // result is FloatNN, width of result is offset of imaginary part.
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".imag" , leafType . Width , leafType )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpStringLen , OpSliceLen :
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , selector . Args [ 0 ] , offset + x . ptrSize , regOffset + RO_slice_len )
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".len" , x . ptrSize , leafType )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpIData :
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , selector . Args [ 0 ] , offset + x . ptrSize , regOffset + RO_iface_data )
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".data" , x . ptrSize , leafType )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpSliceCap :
2021-02-01 13:26:47 -05:00
ls := x . rewriteSelect ( leaf , selector . Args [ 0 ] , offset + 2 * x . ptrSize , regOffset + RO_slice_cap )
2021-01-26 19:33:34 -05:00
locs = x . splitSlots ( ls , ".cap" , 2 * x . ptrSize , leafType )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpCopy : // If it's an intermediate result, recurse
2021-02-01 13:26:47 -05:00
locs = x . rewriteSelect ( leaf , selector . Args [ 0 ] , offset , regOffset )
2021-01-26 19:33:34 -05:00
for _ , s := range x . namedSelects [ selector ] {
// this copy may have had its own name, preserve that, too.
locs = append ( locs , x . f . Names [ s . locIndex ] )
2020-07-27 16:46:35 -04:00
}
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
default :
// Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
2020-07-27 16:46:35 -04:00
}
2021-01-26 19:33:34 -05:00
return locs
}
2020-10-13 19:24:04 -04:00
2021-01-26 19:33:34 -05:00
func ( x * expandState ) rewriteDereference ( b * Block , base , a , mem * Value , offset , size int64 , typ * types . Type , pos src . XPos ) * Value {
source := a . Args [ 0 ]
dst := x . offsetFrom ( base , offset , source . Type )
if a . Uses == 1 && a . Block == b {
a . reset ( OpMove )
a . Pos = pos
a . Type = types . TypeMem
a . Aux = typ
a . AuxInt = size
a . SetArgs3 ( dst , source , mem )
mem = a
} else {
mem = b . NewValue3A ( pos , OpMove , types . TypeMem , typ , dst , source , mem )
mem . AuxInt = size
}
return mem
}
// decomposeArgOrLoad is a helper for storeArgOrLoad.
// It decomposes a Load or an Arg into smaller parts, parameterized by the decomposeOne and decomposeTwo functions
// passed to it, and returns the new mem. If the type does not match one of the expected aggregate types, it returns nil instead.
2021-02-01 13:26:47 -05:00
func ( x * expandState ) decomposeArgOrLoad ( pos src . XPos , b * Block , base , source , mem * Value , t * types . Type , offset int64 , loadRegOffset Abi1RO , storeRc registerCursor ,
decomposeOne func ( x * expandState , pos src . XPos , b * Block , base , source , mem * Value , t1 * types . Type , offArg , offStore int64 , loadRegOffset Abi1RO , storeRc registerCursor ) * Value ,
decomposeTwo func ( x * expandState , pos src . XPos , b * Block , base , source , mem * Value , t1 , t2 * types . Type , offArg , offStore int64 , loadRegOffset Abi1RO , storeRc registerCursor ) * Value ) * Value {
2021-01-26 19:33:34 -05:00
u := source . Type
switch u . Kind ( ) {
case types . TARRAY :
elem := u . Elem ( )
2021-02-01 13:26:47 -05:00
elemRO := x . regWidth ( elem )
2021-01-26 19:33:34 -05:00
for i := int64 ( 0 ) ; i < u . NumElem ( ) ; i ++ {
elemOff := i * elem . Size ( )
2021-02-01 13:26:47 -05:00
mem = decomposeOne ( x , pos , b , base , source , mem , elem , source . AuxInt + elemOff , offset + elemOff , loadRegOffset , storeRc . next ( elem ) )
loadRegOffset += elemRO
2020-10-13 19:24:04 -04:00
pos = pos . WithNotStmt ( )
2021-01-26 19:33:34 -05:00
}
return mem
case types . TSTRUCT :
for i := 0 ; i < u . NumFields ( ) ; i ++ {
fld := u . Field ( i )
2021-02-01 13:26:47 -05:00
mem = decomposeOne ( x , pos , b , base , source , mem , fld . Type , source . AuxInt + fld . Offset , offset + fld . Offset , loadRegOffset , storeRc . next ( fld . Type ) )
loadRegOffset += x . regWidth ( fld . Type )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
}
return mem
case types . TINT64 , types . TUINT64 :
if t . Width == x . regSize {
break
}
tHi , tLo := x . intPairTypes ( t . Kind ( ) )
2021-02-01 13:26:47 -05:00
mem = decomposeOne ( x , pos , b , base , source , mem , tHi , source . AuxInt + x . hiOffset , offset + x . hiOffset , loadRegOffset + x . hiRo , storeRc . plus ( x . hiRo ) )
2020-10-13 19:24:04 -04:00
pos = pos . WithNotStmt ( )
2021-02-01 13:26:47 -05:00
return decomposeOne ( x , pos , b , base , source , mem , tLo , source . AuxInt + x . lowOffset , offset + x . lowOffset , loadRegOffset + x . loRo , storeRc . plus ( x . loRo ) )
2021-01-26 19:33:34 -05:00
case types . TINTER :
2021-02-01 13:26:47 -05:00
return decomposeTwo ( x , pos , b , base , source , mem , x . typs . Uintptr , x . typs . BytePtr , source . AuxInt , offset , loadRegOffset , storeRc )
2021-01-26 19:33:34 -05:00
case types . TSTRING :
2021-02-01 13:26:47 -05:00
return decomposeTwo ( x , pos , b , base , source , mem , x . typs . BytePtr , x . typs . Int , source . AuxInt , offset , loadRegOffset , storeRc )
2021-01-26 19:33:34 -05:00
case types . TCOMPLEX64 :
2021-02-01 13:26:47 -05:00
return decomposeTwo ( x , pos , b , base , source , mem , x . typs . Float32 , x . typs . Float32 , source . AuxInt , offset , loadRegOffset , storeRc )
2021-01-26 19:33:34 -05:00
case types . TCOMPLEX128 :
2021-02-01 13:26:47 -05:00
return decomposeTwo ( x , pos , b , base , source , mem , x . typs . Float64 , x . typs . Float64 , source . AuxInt , offset , loadRegOffset , storeRc )
2021-01-26 19:33:34 -05:00
case types . TSLICE :
2021-02-01 13:26:47 -05:00
mem = decomposeOne ( x , pos , b , base , source , mem , x . typs . BytePtr , source . AuxInt , offset , loadRegOffset , storeRc . next ( x . typs . BytePtr ) )
return decomposeTwo ( x , pos , b , base , source , mem , x . typs . Int , x . typs . Int , source . AuxInt + x . ptrSize , offset + x . ptrSize , loadRegOffset + RO_slice_len , storeRc )
2020-10-13 19:24:04 -04:00
}
2021-01-26 19:33:34 -05:00
return nil
}
2020-10-13 19:24:04 -04:00
2021-01-26 19:33:34 -05:00
// storeOneArg creates a decomposed (one step) arg that is then stored.
// pos and b locate the store instruction, base is the base of the store target, source is the "base" of the value input,
// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
2021-02-01 13:26:47 -05:00
func storeOneArg ( x * expandState , pos src . XPos , b * Block , base , source , mem * Value , t * types . Type , offArg , offStore int64 , loadRegOffset Abi1RO , storeRc registerCursor ) * Value {
paramAssignment := x . prAssignForArg ( source )
_ = paramAssignment
// TODO(register args)
2021-01-26 19:33:34 -05:00
w := x . common [ selKey { source , offArg , t . Width , t } ]
if w == nil {
w = source . Block . NewValue0IA ( source . Pos , OpArg , t , offArg , source . Aux )
x . common [ selKey { source , offArg , t . Width , t } ] = w
2020-10-13 19:24:04 -04:00
}
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , w , mem , t , offStore , loadRegOffset , storeRc )
2021-01-26 19:33:34 -05:00
}
2020-10-13 19:24:04 -04:00
2021-01-26 19:33:34 -05:00
// storeOneLoad creates a decomposed (one step) load that is then stored.
2021-02-01 13:26:47 -05:00
func storeOneLoad ( x * expandState , pos src . XPos , b * Block , base , source , mem * Value , t * types . Type , offArg , offStore int64 , loadRegOffset Abi1RO , storeRc registerCursor ) * Value {
2021-01-26 19:33:34 -05:00
from := x . offsetFrom ( source . Args [ 0 ] , offArg , types . NewPtr ( t ) )
w := source . Block . NewValue2 ( source . Pos , OpLoad , t , from , mem )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , w , mem , t , offStore , loadRegOffset , storeRc )
2021-01-26 19:33:34 -05:00
}
2020-08-17 16:57:22 -04:00
2021-02-01 13:26:47 -05:00
func storeTwoArg ( x * expandState , pos src . XPos , b * Block , base , source , mem * Value , t1 , t2 * types . Type , offArg , offStore int64 , loadRegOffset Abi1RO , storeRc registerCursor ) * Value {
mem = storeOneArg ( x , pos , b , base , source , mem , t1 , offArg , offStore , loadRegOffset , storeRc . next ( t1 ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
t1Size := t1 . Size ( )
2021-02-01 13:26:47 -05:00
return storeOneArg ( x , pos , b , base , source , mem , t2 , offArg + t1Size , offStore + t1Size , loadRegOffset + 1 , storeRc )
2021-01-26 19:33:34 -05:00
}
2020-10-13 19:24:04 -04:00
2021-02-01 13:26:47 -05:00
// storeTwoLoad creates a pair of decomposed (one step) loads that are then stored.
// the elements of the pair must not require any additional alignment.
func storeTwoLoad ( x * expandState , pos src . XPos , b * Block , base , source , mem * Value , t1 , t2 * types . Type , offArg , offStore int64 , loadRegOffset Abi1RO , storeRc registerCursor ) * Value {
mem = storeOneLoad ( x , pos , b , base , source , mem , t1 , offArg , offStore , loadRegOffset , storeRc . next ( t1 ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
t1Size := t1 . Size ( )
2021-02-01 13:26:47 -05:00
return storeOneLoad ( x , pos , b , base , source , mem , t2 , offArg + t1Size , offStore + t1Size , loadRegOffset + 1 , storeRc )
2021-01-26 19:33:34 -05:00
}
2020-10-13 19:24:04 -04:00
2021-02-01 13:26:47 -05:00
// storeArgOrLoad converts stores of SSA-able potentially aggregatable arguments (passed to a call) into a series of primitive-typed
2021-01-26 19:33:34 -05:00
// stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg.
// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
2021-02-01 13:26:47 -05:00
func ( x * expandState ) storeArgOrLoad ( pos src . XPos , b * Block , base , source , mem * Value , t * types . Type , offset int64 , loadRegOffset Abi1RO , storeRc registerCursor ) * Value {
2021-01-26 19:33:34 -05:00
if x . debug {
fmt . Printf ( "\tstoreArgOrLoad(%s; %s; %s; %s; %d)\n" , base . LongString ( ) , source . LongString ( ) , mem . String ( ) , t . String ( ) , offset )
}
2020-10-13 19:24:04 -04:00
2021-01-26 19:33:34 -05:00
switch source . Op {
case OpCopy :
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , source . Args [ 0 ] , mem , t , offset , loadRegOffset , storeRc )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpLoad :
2021-02-01 13:26:47 -05:00
ret := x . decomposeArgOrLoad ( pos , b , base , source , mem , t , offset , loadRegOffset , storeRc , storeOneLoad , storeTwoLoad )
2021-01-26 19:33:34 -05:00
if ret != nil {
return ret
}
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpArg :
2021-02-01 13:26:47 -05:00
ret := x . decomposeArgOrLoad ( pos , b , base , source , mem , t , offset , loadRegOffset , storeRc , storeOneArg , storeTwoArg )
2021-01-26 19:33:34 -05:00
if ret != nil {
return ret
}
2020-07-27 16:46:35 -04:00
2021-01-26 19:33:34 -05:00
case OpArrayMake0 , OpStructMake0 :
2021-02-01 13:26:47 -05:00
// TODO(register args) is this correct for registers?
2021-01-26 19:33:34 -05:00
return mem
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpStructMake1 , OpStructMake2 , OpStructMake3 , OpStructMake4 :
for i := 0 ; i < t . NumFields ( ) ; i ++ {
fld := t . Field ( i )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , source . Args [ i ] , mem , fld . Type , offset + fld . Offset , 0 , storeRc . next ( fld . Type ) )
2020-10-13 19:24:04 -04:00
pos = pos . WithNotStmt ( )
2021-01-26 19:33:34 -05:00
}
return mem
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpArrayMake1 :
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , source . Args [ 0 ] , mem , t . Elem ( ) , offset , 0 , storeRc . at ( t , 0 ) )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpInt64Make :
tHi , tLo := x . intPairTypes ( t . Kind ( ) )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , source . Args [ 0 ] , mem , tHi , offset + x . hiOffset , 0 , storeRc . next ( tHi ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , source . Args [ 1 ] , mem , tLo , offset + x . lowOffset , 0 , storeRc )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpComplexMake :
tPart := x . typs . Float32
wPart := t . Width / 2
if wPart == 8 {
tPart = x . typs . Float64
}
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , source . Args [ 0 ] , mem , tPart , offset , 0 , storeRc . next ( tPart ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , source . Args [ 1 ] , mem , tPart , offset + wPart , 0 , storeRc )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpIMake :
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , source . Args [ 0 ] , mem , x . typs . Uintptr , offset , 0 , storeRc . next ( x . typs . Uintptr ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , source . Args [ 1 ] , mem , x . typs . BytePtr , offset + x . ptrSize , 0 , storeRc )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpStringMake :
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , source . Args [ 0 ] , mem , x . typs . BytePtr , offset , 0 , storeRc . next ( x . typs . BytePtr ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , source . Args [ 1 ] , mem , x . typs . Int , offset + x . ptrSize , 0 , storeRc )
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case OpSliceMake :
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , source . Args [ 0 ] , mem , x . typs . BytePtr , offset , 0 , storeRc . next ( x . typs . BytePtr ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , source . Args [ 1 ] , mem , x . typs . Int , offset + x . ptrSize , 0 , storeRc . next ( x . typs . Int ) )
return x . storeArgOrLoad ( pos , b , base , source . Args [ 2 ] , mem , x . typs . Int , offset + 2 * x . ptrSize , 0 , storeRc )
2021-01-26 19:33:34 -05:00
}
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
// For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
switch t . Kind ( ) {
case types . TARRAY :
elt := t . Elem ( )
if source . Type != t && t . NumElem ( ) == 1 && elt . Width == t . Width && t . Width == x . regSize {
t = removeTrivialWrapperTypes ( t )
// it could be a leaf type, but the "leaf" could be complex64 (for example)
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , source , mem , t , offset , loadRegOffset , storeRc )
2021-01-26 19:33:34 -05:00
}
2021-02-01 13:26:47 -05:00
eltRO := x . regWidth ( elt )
2021-01-26 19:33:34 -05:00
for i := int64 ( 0 ) ; i < t . NumElem ( ) ; i ++ {
sel := source . Block . NewValue1I ( pos , OpArraySelect , elt , i , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , elt , offset + i * elt . Width , loadRegOffset , storeRc . at ( t , 0 ) )
loadRegOffset += eltRO
2020-10-13 19:24:04 -04:00
pos = pos . WithNotStmt ( )
2021-01-26 19:33:34 -05:00
}
return mem
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
case types . TSTRUCT :
if source . Type != t && t . NumFields ( ) == 1 && t . Field ( 0 ) . Type . Width == t . Width && t . Width == x . regSize {
// This peculiar test deals with accesses to immediate interface data.
// It works okay because everything is the same size.
// Example code that triggers this can be found in go/constant/value.go, function ToComplex
// v119 (+881) = IData <intVal> v6
// v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
// This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
// Guard against "struct{struct{*foo}}"
// Other rewriting phases create minor glitches when they transform IData, for instance the
// interface-typed Arg "x" of ToFloat in go/constant/value.go
// v6 (858) = Arg <Value> {x} (x[Value], x[Value])
// is rewritten by decomposeArgs into
// v141 (858) = Arg <uintptr> {x}
// v139 (858) = Arg <*uint8> {x} [8]
// because of a type case clause on line 862 of go/constant/value.go
// case intVal:
// return itof(x)
// v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
// of a *uint8, which does not succeed.
t = removeTrivialWrapperTypes ( t )
// it could be a leaf type, but the "leaf" could be complex64 (for example)
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , source , mem , t , offset , loadRegOffset , storeRc )
2021-01-26 19:33:34 -05:00
}
2020-08-17 16:57:22 -04:00
2021-01-26 19:33:34 -05:00
for i := 0 ; i < t . NumFields ( ) ; i ++ {
fld := t . Field ( i )
sel := source . Block . NewValue1I ( pos , OpStructSelect , fld . Type , int64 ( i ) , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , fld . Type , offset + fld . Offset , loadRegOffset , storeRc . next ( fld . Type ) )
loadRegOffset += x . regWidth ( fld . Type )
2020-10-13 19:24:04 -04:00
pos = pos . WithNotStmt ( )
2020-07-09 15:47:26 -04:00
}
2021-01-26 19:33:34 -05:00
return mem
2020-10-13 19:24:04 -04:00
2021-01-26 19:33:34 -05:00
case types . TINT64 , types . TUINT64 :
if t . Width == x . regSize {
break
2020-07-27 16:46:35 -04:00
}
2021-01-26 19:33:34 -05:00
tHi , tLo := x . intPairTypes ( t . Kind ( ) )
sel := source . Block . NewValue1 ( pos , OpInt64Hi , tHi , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , tHi , offset + x . hiOffset , loadRegOffset + x . hiRo , storeRc . plus ( x . hiRo ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
sel = source . Block . NewValue1 ( pos , OpInt64Lo , tLo , source )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , sel , mem , tLo , offset + x . lowOffset , loadRegOffset + x . loRo , storeRc . plus ( x . hiRo ) )
2021-01-26 19:33:34 -05:00
case types . TINTER :
sel := source . Block . NewValue1 ( pos , OpITab , x . typs . BytePtr , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . BytePtr , offset , loadRegOffset , storeRc . next ( x . typs . BytePtr ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
sel = source . Block . NewValue1 ( pos , OpIData , x . typs . BytePtr , source )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . BytePtr , offset + x . ptrSize , loadRegOffset + RO_iface_data , storeRc )
2021-01-26 19:33:34 -05:00
case types . TSTRING :
sel := source . Block . NewValue1 ( pos , OpStringPtr , x . typs . BytePtr , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . BytePtr , offset , loadRegOffset , storeRc . next ( x . typs . BytePtr ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
sel = source . Block . NewValue1 ( pos , OpStringLen , x . typs . Int , source )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . Int , offset + x . ptrSize , loadRegOffset + RO_string_len , storeRc )
2021-01-26 19:33:34 -05:00
case types . TSLICE :
et := types . NewPtr ( t . Elem ( ) )
sel := source . Block . NewValue1 ( pos , OpSlicePtr , et , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , et , offset , loadRegOffset , storeRc . next ( et ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
sel = source . Block . NewValue1 ( pos , OpSliceLen , x . typs . Int , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . Int , offset + x . ptrSize , loadRegOffset + RO_slice_len , storeRc . next ( x . typs . Int ) )
2021-01-26 19:33:34 -05:00
sel = source . Block . NewValue1 ( pos , OpSliceCap , x . typs . Int , source )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . Int , offset + 2 * x . ptrSize , loadRegOffset + RO_slice_cap , storeRc )
2021-01-26 19:33:34 -05:00
case types . TCOMPLEX64 :
sel := source . Block . NewValue1 ( pos , OpComplexReal , x . typs . Float32 , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . Float32 , offset , loadRegOffset , storeRc . next ( x . typs . Float32 ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
sel = source . Block . NewValue1 ( pos , OpComplexImag , x . typs . Float32 , source )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . Float32 , offset + 4 , loadRegOffset + RO_complex_imag , storeRc )
2021-01-26 19:33:34 -05:00
case types . TCOMPLEX128 :
sel := source . Block . NewValue1 ( pos , OpComplexReal , x . typs . Float64 , source )
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . Float64 , offset , loadRegOffset , storeRc . next ( x . typs . Float64 ) )
2021-01-26 19:33:34 -05:00
pos = pos . WithNotStmt ( )
sel = source . Block . NewValue1 ( pos , OpComplexImag , x . typs . Float64 , source )
2021-02-01 13:26:47 -05:00
return x . storeArgOrLoad ( pos , b , base , sel , mem , x . typs . Float64 , offset + 8 , loadRegOffset + RO_complex_imag , storeRc )
2020-07-09 15:47:26 -04:00
}
2021-02-01 13:26:47 -05:00
s := mem
if storeRc . hasRegs ( ) {
// TODO(register args)
storeRc . addArg ( source )
} else {
dst := x . offsetFrom ( base , offset , types . NewPtr ( t ) )
s = b . NewValue3A ( pos , OpStore , types . TypeMem , t , dst , source , mem )
}
2021-01-26 19:33:34 -05:00
if x . debug {
fmt . Printf ( "\t\tstoreArg returns %s\n" , s . LongString ( ) )
2020-12-29 22:44:30 -05:00
}
2021-01-26 19:33:34 -05:00
return s
}
2020-12-29 22:44:30 -05:00
2021-01-26 19:33:34 -05:00
// rewriteArgs removes all the Args from a call and converts the call args into appropriate
// stores (or later, register movement). Extra args for interface and closure calls are ignored,
// but removed.
func ( x * expandState ) rewriteArgs ( v * Value , firstArg int ) * Value {
// Thread the stores on the memory arg
aux := v . Aux . ( * AuxCall )
pos := v . Pos . WithNotStmt ( )
m0 := v . MemoryArg ( )
mem := m0
2021-02-01 13:26:47 -05:00
allResults := [ ] * Value { }
2021-01-26 19:33:34 -05:00
for i , a := range v . Args {
if i < firstArg {
continue
}
if a == m0 { // mem is last.
break
}
auxI := int64 ( i - firstArg )
2021-02-01 13:26:47 -05:00
aRegs := aux . RegsOfArg ( auxI )
aOffset := aux . OffsetOfArg ( auxI )
aType := aux . TypeOfArg ( auxI )
2021-01-26 19:33:34 -05:00
if a . Op == OpDereference {
if a . MemoryArg ( ) != m0 {
x . f . Fatalf ( "Op...LECall and OpDereference have mismatched mem, %s and %s" , v . LongString ( ) , a . LongString ( ) )
2020-08-07 22:46:43 -04:00
}
2021-02-01 13:26:47 -05:00
if len ( aRegs ) > 0 {
x . f . Fatalf ( "Not implemented yet, not-SSA-type %v passed in registers" , aType )
}
2021-01-26 19:33:34 -05:00
// "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
2021-02-01 13:26:47 -05:00
// TODO(register args) this will be more complicated with registers in the picture.
mem = x . rewriteDereference ( v . Block , x . sp , a , mem , aOffset , aux . SizeOfArg ( auxI ) , aType , pos )
2021-01-26 19:33:34 -05:00
} else {
if x . debug {
2021-02-01 13:26:47 -05:00
fmt . Printf ( "storeArg %s, %v, %d\n" , a . LongString ( ) , aType , aOffset )
}
var rc registerCursor
var result * [ ] * Value
if len ( aRegs ) > 0 {
result = & allResults
2020-08-07 22:46:43 -04:00
}
2021-02-01 13:26:47 -05:00
rc . init ( aRegs , aux . abiInfo , result )
mem = x . storeArgOrLoad ( pos , v . Block , x . sp , a , mem , aType , aOffset , 0 , rc )
// TODO append mem to Result, update type
2020-08-07 22:46:43 -04:00
}
2021-01-26 19:33:34 -05:00
}
v . resetArgs ( )
return mem
}
// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
// reached. On the callee side, OpArg nodes are not decomposed until this phase is run.
// TODO results should not be lowered until this phase.
func expandCalls ( f * Func ) {
// Calls that need lowering have some number of inputs, including a memory input,
// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
// With the current ABI those inputs need to be converted into stores to memory,
// rethreading the call's memory input to the first, and the new call now receiving the last.
// With the current ABI, the outputs need to be converted to loads, which will all use the call's
// memory output as their input.
sp , _ := f . spSb ( )
x := & expandState {
f : f ,
2021-02-01 13:26:47 -05:00
abi1 : f . ABI1 ,
2021-01-26 19:33:34 -05:00
debug : f . pass . debug > 0 ,
canSSAType : f . fe . CanSSA ,
regSize : f . Config . RegSize ,
sp : sp ,
typs : & f . Config . Types ,
ptrSize : f . Config . PtrSize ,
namedSelects : make ( map [ * Value ] [ ] namedVal ) ,
sdom : f . Sdom ( ) ,
common : make ( map [ selKey ] * Value ) ,
offsets : make ( map [ offsetKey ] * Value ) ,
}
// For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
if f . Config . BigEndian {
2021-02-01 13:26:47 -05:00
x . lowOffset , x . hiOffset = 4 , 0
x . loRo , x . hiRo = 1 , 0
2021-01-26 19:33:34 -05:00
} else {
2021-02-01 13:26:47 -05:00
x . lowOffset , x . hiOffset = 0 , 4
x . loRo , x . hiRo = 0 , 1
2021-01-26 19:33:34 -05:00
}
if x . debug {
fmt . Printf ( "\nexpandsCalls(%s)\n" , f . Name )
2020-08-07 22:46:43 -04:00
}
2020-08-17 16:57:22 -04:00
// TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here.
2021-02-01 13:26:47 -05:00
// Step 0: rewrite the calls to convert args to calls into stores/register movement.
2020-07-09 15:47:26 -04:00
for _ , b := range f . Blocks {
for _ , v := range b . Values {
switch v . Op {
case OpStaticLECall :
2021-01-26 19:33:34 -05:00
mem := x . rewriteArgs ( v , 0 )
2020-07-09 15:47:26 -04:00
v . SetArgs1 ( mem )
2020-08-07 22:46:43 -04:00
case OpClosureLECall :
code := v . Args [ 0 ]
context := v . Args [ 1 ]
2021-01-26 19:33:34 -05:00
mem := x . rewriteArgs ( v , 2 )
2020-08-07 22:46:43 -04:00
v . SetArgs3 ( code , context , mem )
case OpInterLECall :
code := v . Args [ 0 ]
2021-01-26 19:33:34 -05:00
mem := x . rewriteArgs ( v , 1 )
2020-08-07 22:46:43 -04:00
v . SetArgs2 ( code , mem )
2020-07-09 15:47:26 -04:00
}
}
2020-12-29 22:44:30 -05:00
if isBlockMultiValueExit ( b ) {
// Very similar to code in rewriteArgs, but results instead of args.
v := b . Controls [ 0 ]
m0 := v . MemoryArg ( )
mem := m0
aux := f . OwnAux
pos := v . Pos . WithNotStmt ( )
2021-02-01 13:26:47 -05:00
allResults := [ ] * Value { }
2020-12-29 22:44:30 -05:00
for j , a := range v . Args {
i := int64 ( j )
if a == m0 {
break
}
auxType := aux . TypeOfResult ( i )
2021-01-26 19:33:34 -05:00
auxBase := b . NewValue2A ( v . Pos , OpLocalAddr , types . NewPtr ( auxType ) , aux . results [ i ] . Name , x . sp , mem )
2020-12-29 22:44:30 -05:00
auxOffset := int64 ( 0 )
auxSize := aux . SizeOfResult ( i )
2021-02-01 13:26:47 -05:00
aRegs := aux . RegsOfResult ( int64 ( j ) )
2020-12-29 22:44:30 -05:00
if a . Op == OpDereference {
2021-02-01 13:26:47 -05:00
if len ( aRegs ) > 0 {
x . f . Fatalf ( "Not implemented yet, not-SSA-type %v returned in register" , auxType )
}
2020-12-29 22:44:30 -05:00
// Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen.
if dAddr , dMem := a . Args [ 0 ] , a . Args [ 1 ] ; dAddr . Op == OpLocalAddr && dAddr . Args [ 0 ] . Op == OpSP &&
dAddr . Args [ 1 ] == dMem && dAddr . Aux == aux . results [ i ] . Name {
if dMem . Op == OpVarDef && dMem . Aux == dAddr . Aux {
dMem . copyOf ( dMem . MemoryArg ( ) ) // elide the VarDef
}
continue
}
2021-01-26 19:33:34 -05:00
mem = x . rewriteDereference ( v . Block , auxBase , a , mem , auxOffset , auxSize , auxType , pos )
2020-12-29 22:44:30 -05:00
} else {
if a . Op == OpLoad && a . Args [ 0 ] . Op == OpLocalAddr {
2021-02-01 13:26:47 -05:00
addr := a . Args [ 0 ] // This is a self-move. // TODO(register args) do what here for registers?
2020-12-29 22:44:30 -05:00
if addr . MemoryArg ( ) == a . MemoryArg ( ) && addr . Aux == aux . results [ i ] . Name {
continue
}
}
2021-02-01 13:26:47 -05:00
var rc registerCursor
var result * [ ] * Value
if len ( aRegs ) > 0 {
result = & allResults
}
rc . init ( aRegs , aux . abiInfo , result )
// TODO(register args)
mem = x . storeArgOrLoad ( v . Pos , b , auxBase , a , mem , aux . TypeOfResult ( i ) , auxOffset , 0 , rc )
// TODO append mem to Result, update type
2020-12-29 22:44:30 -05:00
}
}
b . SetControl ( mem )
v . reset ( OpInvalid ) // otherwise it can have a mem operand which will fail check(), even though it is dead.
}
2020-07-09 15:47:26 -04:00
}
2020-07-27 16:46:35 -04:00
2020-08-17 16:57:22 -04:00
for i , name := range f . Names {
t := name . Type
2021-01-26 19:33:34 -05:00
if x . isAlreadyExpandedAggregateType ( t ) {
2020-08-17 16:57:22 -04:00
for j , v := range f . NamedValues [ name ] {
2021-01-26 19:33:34 -05:00
if v . Op == OpSelectN || v . Op == OpArg && x . isAlreadyExpandedAggregateType ( v . Type ) {
ns := x . namedSelects [ v ]
x . namedSelects [ v ] = append ( ns , namedVal { locIndex : i , valIndex : j } )
2020-08-17 16:57:22 -04:00
}
}
}
}
2020-10-13 19:24:04 -04:00
// Step 1: any stores of aggregates remaining are believed to be sourced from call results or args.
2020-07-27 16:46:35 -04:00
// Decompose those stores into a series of smaller stores, adding selection ops as necessary.
for _ , b := range f . Blocks {
for _ , v := range b . Values {
if v . Op == OpStore {
t := v . Aux . ( * types . Type )
2020-10-13 19:24:04 -04:00
source := v . Args [ 1 ]
tSrc := source . Type
2021-01-26 19:33:34 -05:00
iAEATt := x . isAlreadyExpandedAggregateType ( t )
2020-10-13 19:24:04 -04:00
2020-08-17 16:57:22 -04:00
if ! iAEATt {
// guarding against store immediate struct into interface data field -- store type is *uint8
// TODO can this happen recursively?
2021-01-26 19:33:34 -05:00
iAEATt = x . isAlreadyExpandedAggregateType ( tSrc )
2020-08-17 16:57:22 -04:00
if iAEATt {
t = tSrc
}
}
if iAEATt {
2021-01-26 19:33:34 -05:00
if x . debug {
2020-08-17 16:57:22 -04:00
fmt . Printf ( "Splitting store %s\n" , v . LongString ( ) )
}
2020-10-13 19:24:04 -04:00
dst , mem := v . Args [ 0 ] , v . Args [ 2 ]
2021-02-01 13:26:47 -05:00
mem = x . storeArgOrLoad ( v . Pos , b , dst , source , mem , t , 0 , 0 , registerCursor { } )
2020-07-27 16:46:35 -04:00
v . copyOf ( mem )
}
}
}
}
val2Preds := make ( map [ * Value ] int32 ) // Used to accumulate dependency graph of selection operations for topological ordering.
2020-08-17 16:57:22 -04:00
// Step 2: transform or accumulate selection operations for rewrite in topological order.
//
// Aggregate types that have already (in earlier phases) been transformed must be lowered comprehensively to finish
// the transformation (user-defined structs and arrays, slices, strings, interfaces, complex, 64-bit on 32-bit architectures),
//
2020-07-27 16:46:35 -04:00
// Any select-for-addressing applied to call results can be transformed directly.
for _ , b := range f . Blocks {
for _ , v := range b . Values {
// Accumulate chains of selectors for processing in topological order
switch v . Op {
2020-08-17 16:57:22 -04:00
case OpStructSelect , OpArraySelect ,
OpIData , OpITab ,
OpStringPtr , OpStringLen ,
OpSlicePtr , OpSliceLen , OpSliceCap ,
OpComplexReal , OpComplexImag ,
OpInt64Hi , OpInt64Lo :
2020-07-27 16:46:35 -04:00
w := v . Args [ 0 ]
switch w . Op {
2020-10-13 19:24:04 -04:00
case OpStructSelect , OpArraySelect , OpSelectN , OpArg :
2020-07-27 16:46:35 -04:00
val2Preds [ w ] += 1
2021-01-26 19:33:34 -05:00
if x . debug {
2020-07-27 16:46:35 -04:00
fmt . Printf ( "v2p[%s] = %d\n" , w . LongString ( ) , val2Preds [ w ] )
}
}
fallthrough
2020-08-17 16:57:22 -04:00
2020-07-27 16:46:35 -04:00
case OpSelectN :
if _ , ok := val2Preds [ v ] ; ! ok {
val2Preds [ v ] = 0
2021-01-26 19:33:34 -05:00
if x . debug {
2020-07-27 16:46:35 -04:00
fmt . Printf ( "v2p[%s] = %d\n" , v . LongString ( ) , val2Preds [ v ] )
}
}
2020-08-17 16:57:22 -04:00
2020-10-13 19:24:04 -04:00
case OpArg :
2021-01-26 19:33:34 -05:00
if ! x . isAlreadyExpandedAggregateType ( v . Type ) {
2020-10-13 19:24:04 -04:00
continue
}
if _ , ok := val2Preds [ v ] ; ! ok {
val2Preds [ v ] = 0
2021-01-26 19:33:34 -05:00
if x . debug {
2020-10-13 19:24:04 -04:00
fmt . Printf ( "v2p[%s] = %d\n" , v . LongString ( ) , val2Preds [ v ] )
}
}
2020-07-27 16:46:35 -04:00
case OpSelectNAddr :
// Do these directly, there are no chains of selectors.
call := v . Args [ 0 ]
which := v . AuxInt
aux := call . Aux . ( * AuxCall )
pt := v . Type
2021-01-26 19:33:34 -05:00
off := x . offsetFrom ( x . sp , aux . OffsetOfResult ( which ) , pt )
2020-07-27 16:46:35 -04:00
v . copyOf ( off )
}
}
}
2020-08-17 16:57:22 -04:00
// Step 3: Compute topological order of selectors,
// then process it in reverse to eliminate duplicates,
// then forwards to rewrite selectors.
//
// All chains of selectors end up in same block as the call.
// Compilation must be deterministic, so sort after extracting first zeroes from map.
// Sorting allows dominators-last order within each batch,
// so that the backwards scan for duplicates will most often find copies from dominating blocks (it is best-effort).
var toProcess [ ] * Value
less := func ( i , j int ) bool {
vi , vj := toProcess [ i ] , toProcess [ j ]
bi , bj := vi . Block , vj . Block
if bi == bj {
return vi . ID < vj . ID
}
2021-01-26 19:33:34 -05:00
return x . sdom . domorder ( bi ) > x . sdom . domorder ( bj ) // reverse the order to put dominators last.
2020-08-17 16:57:22 -04:00
}
2020-07-27 16:46:35 -04:00
2020-08-17 16:57:22 -04:00
// Accumulate order in allOrdered
var allOrdered [ ] * Value
for v , n := range val2Preds {
if n == 0 {
allOrdered = append ( allOrdered , v )
}
}
last := 0 // allOrdered[0:last] has been top-sorted and processed
2020-07-27 16:46:35 -04:00
for len ( val2Preds ) > 0 {
2020-08-17 16:57:22 -04:00
toProcess = allOrdered [ last : ]
last = len ( allOrdered )
sort . SliceStable ( toProcess , less )
for _ , v := range toProcess {
delete ( val2Preds , v )
2020-10-13 19:24:04 -04:00
if v . Op == OpArg {
continue // no Args[0], hence done.
}
w := v . Args [ 0 ]
2020-08-17 16:57:22 -04:00
n , ok := val2Preds [ w ]
if ! ok {
continue
2020-07-27 16:46:35 -04:00
}
2020-08-17 16:57:22 -04:00
if n == 1 {
allOrdered = append ( allOrdered , w )
delete ( val2Preds , w )
continue
}
val2Preds [ w ] = n - 1
2020-07-27 16:46:35 -04:00
}
2020-08-17 16:57:22 -04:00
}
2021-01-26 19:33:34 -05:00
x . common = make ( map [ selKey ] * Value )
2020-08-17 16:57:22 -04:00
// Rewrite duplicate selectors as copies where possible.
for i := len ( allOrdered ) - 1 ; i >= 0 ; i -- {
v := allOrdered [ i ]
2020-10-13 19:24:04 -04:00
if v . Op == OpArg {
continue
}
2020-08-17 16:57:22 -04:00
w := v . Args [ 0 ]
2020-10-13 19:24:04 -04:00
if w . Op == OpCopy {
for w . Op == OpCopy {
w = w . Args [ 0 ]
}
v . SetArg ( 0 , w )
2020-08-17 16:57:22 -04:00
}
typ := v . Type
if typ . IsMemory ( ) {
continue // handled elsewhere, not an indexable result
}
size := typ . Width
offset := int64 ( 0 )
switch v . Op {
case OpStructSelect :
2020-12-01 03:25:29 -08:00
if w . Type . Kind ( ) == types . TSTRUCT {
2020-08-17 16:57:22 -04:00
offset = w . Type . FieldOff ( int ( v . AuxInt ) )
} else { // Immediate interface data artifact, offset is zero.
f . Fatalf ( "Expand calls interface data problem, func %s, v=%s, w=%s\n" , f . Name , v . LongString ( ) , w . LongString ( ) )
2020-07-27 16:46:35 -04:00
}
2020-08-17 16:57:22 -04:00
case OpArraySelect :
offset = size * v . AuxInt
case OpSelectN :
offset = w . Aux . ( * AuxCall ) . OffsetOfResult ( v . AuxInt )
case OpInt64Hi :
2021-01-26 19:33:34 -05:00
offset = x . hiOffset
2020-08-17 16:57:22 -04:00
case OpInt64Lo :
2021-01-26 19:33:34 -05:00
offset = x . lowOffset
2020-08-17 16:57:22 -04:00
case OpStringLen , OpSliceLen , OpIData :
2021-01-26 19:33:34 -05:00
offset = x . ptrSize
2020-08-17 16:57:22 -04:00
case OpSliceCap :
2021-01-26 19:33:34 -05:00
offset = 2 * x . ptrSize
2020-08-17 16:57:22 -04:00
case OpComplexImag :
offset = size
}
2020-10-13 19:24:04 -04:00
sk := selKey { from : w , size : size , offset : offset , typ : typ }
2021-01-26 19:33:34 -05:00
dupe := x . common [ sk ]
2020-08-17 16:57:22 -04:00
if dupe == nil {
2021-01-26 19:33:34 -05:00
x . common [ sk ] = v
} else if x . sdom . IsAncestorEq ( dupe . Block , v . Block ) {
2020-08-17 16:57:22 -04:00
v . copyOf ( dupe )
} else {
// Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen.
// Installing the new value might match some future values.
2021-01-26 19:33:34 -05:00
x . common [ sk ] = v
2020-07-27 16:46:35 -04:00
}
}
2020-08-17 16:57:22 -04:00
// Indices of entries in f.Names that need to be deleted.
var toDelete [ ] namedVal
// Rewrite selectors.
for i , v := range allOrdered {
2021-01-26 19:33:34 -05:00
if x . debug {
2020-08-17 16:57:22 -04:00
b := v . Block
fmt . Printf ( "allOrdered[%d] = b%d, %s, uses=%d\n" , i , b . ID , v . LongString ( ) , v . Uses )
}
if v . Uses == 0 {
v . reset ( OpInvalid )
continue
}
if v . Op == OpCopy {
continue
}
2021-02-01 13:26:47 -05:00
locs := x . rewriteSelect ( v , v , 0 , 0 )
2020-08-17 16:57:22 -04:00
// Install new names.
if v . Type . IsMemory ( ) {
continue
}
// Leaf types may have debug locations
2021-01-26 19:33:34 -05:00
if ! x . isAlreadyExpandedAggregateType ( v . Type ) {
2020-08-17 16:57:22 -04:00
for _ , l := range locs {
f . NamedValues [ l ] = append ( f . NamedValues [ l ] , v )
}
f . Names = append ( f . Names , locs ... )
continue
}
// Not-leaf types that had debug locations need to lose them.
2021-01-26 19:33:34 -05:00
if ns , ok := x . namedSelects [ v ] ; ok {
2020-08-17 16:57:22 -04:00
toDelete = append ( toDelete , ns ... )
}
}
deleteNamedVals ( f , toDelete )
2020-07-27 16:46:35 -04:00
// Step 4: rewrite the calls themselves, correcting the type
for _ , b := range f . Blocks {
for _ , v := range b . Values {
switch v . Op {
case OpStaticLECall :
v . Op = OpStaticCall
v . Type = types . TypeMem
2020-08-07 22:46:43 -04:00
case OpClosureLECall :
v . Op = OpClosureCall
v . Type = types . TypeMem
case OpInterLECall :
v . Op = OpInterCall
v . Type = types . TypeMem
2020-07-27 16:46:35 -04:00
}
}
}
// Step 5: elide any copies introduced.
for _ , b := range f . Blocks {
for _ , v := range b . Values {
for i , a := range v . Args {
if a . Op != OpCopy {
continue
}
aa := copySource ( a )
v . SetArg ( i , aa )
for a . Uses == 0 {
b := a . Args [ 0 ]
a . reset ( OpInvalid )
a = b
}
}
}
}
2020-07-09 15:47:26 -04:00
}