mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
cmd/compile: change StaticCall to return a "Results"
StaticLECall (multiple value in +mem, multiple value result +mem) -> StaticCall (multiple ergister value in +mem, multiple register-sized-value result +mem) -> ARCH CallStatic (multiple ergister value in +mem, multiple register-sized-value result +mem) But the architecture-dependent stuff is indifferent to whether it is mem->mem or (mem)->(mem) until Prog generation. Deal with OpSelectN -> Prog in ssagen/ssa.go, others, as they appear. For #40724. Change-Id: I1d0436f6371054f1881862641d8e7e418e4a6a16 Reviewed-on: https://go-review.googlesource.com/c/go/+/293391 Trust: David Chase <drchase@google.com> Run-TryBot: David Chase <drchase@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Zhang <cherryyz@google.com>
This commit is contained in:
parent
9a555fc24c
commit
e25040d162
18 changed files with 387 additions and 382 deletions
|
|
@ -27,6 +27,8 @@ type ABIParamResultInfo struct {
|
||||||
outparams []ABIParamAssignment
|
outparams []ABIParamAssignment
|
||||||
offsetToSpillArea int64
|
offsetToSpillArea int64
|
||||||
spillAreaSize int64
|
spillAreaSize int64
|
||||||
|
inRegistersUsed int
|
||||||
|
outRegistersUsed int
|
||||||
config *ABIConfig // to enable String() method
|
config *ABIConfig // to enable String() method
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -42,6 +44,14 @@ func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
|
||||||
return a.outparams
|
return a.outparams
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *ABIParamResultInfo) InRegistersUsed() int {
|
||||||
|
return a.inRegistersUsed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ABIParamResultInfo) OutRegistersUsed() int {
|
||||||
|
return a.outRegistersUsed
|
||||||
|
}
|
||||||
|
|
||||||
func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment {
|
func (a *ABIParamResultInfo) InParam(i int) ABIParamAssignment {
|
||||||
return a.inparams[i]
|
return a.inparams[i]
|
||||||
}
|
}
|
||||||
|
|
@ -164,6 +174,55 @@ func (a *ABIConfig) NumParamRegs(t *types.Type) int {
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// preAllocateParams gets the slice sizes right for inputs and outputs.
|
||||||
|
func (a *ABIParamResultInfo) preAllocateParams(hasRcvr bool, nIns, nOuts int) {
|
||||||
|
if hasRcvr {
|
||||||
|
nIns++
|
||||||
|
}
|
||||||
|
a.inparams = make([]ABIParamAssignment, 0, nIns)
|
||||||
|
a.outparams = make([]ABIParamAssignment, 0, nOuts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ABIAnalyzeTypes takes an optional receiver type, arrays of ins and outs, and returns an ABIParamResultInfo,
|
||||||
|
// based on the given configuration. This is the same result computed by config.ABIAnalyze applied to the
|
||||||
|
// corresponding method/function type, except that all the embedded parameter names are nil.
|
||||||
|
// This is intended for use by ssagen/ssa.go:(*state).rtcall, for runtime functions that lack a parsed function type.
|
||||||
|
func (config *ABIConfig) ABIAnalyzeTypes(rcvr *types.Type, ins, outs []*types.Type) *ABIParamResultInfo {
|
||||||
|
setup()
|
||||||
|
s := assignState{
|
||||||
|
rTotal: config.regAmounts,
|
||||||
|
}
|
||||||
|
result := &ABIParamResultInfo{config: config}
|
||||||
|
result.preAllocateParams(rcvr != nil, len(ins), len(outs))
|
||||||
|
|
||||||
|
// Receiver
|
||||||
|
if rcvr != nil {
|
||||||
|
result.inparams = append(result.inparams,
|
||||||
|
s.assignParamOrReturn(rcvr, nil, false))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inputs
|
||||||
|
for _, t := range ins {
|
||||||
|
result.inparams = append(result.inparams,
|
||||||
|
s.assignParamOrReturn(t, nil, false))
|
||||||
|
}
|
||||||
|
s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
|
||||||
|
result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
|
||||||
|
|
||||||
|
// Outputs
|
||||||
|
s.rUsed = RegAmounts{}
|
||||||
|
for _, t := range outs {
|
||||||
|
result.outparams = append(result.outparams, s.assignParamOrReturn(t, nil, true))
|
||||||
|
}
|
||||||
|
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
|
||||||
|
// TODO in theory could align offset only to minimum required by spilled data types.
|
||||||
|
result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
|
||||||
|
result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
|
||||||
|
result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
// ABIAnalyze takes a function type 't' and an ABI rules description
|
// ABIAnalyze takes a function type 't' and an ABI rules description
|
||||||
// 'config' and analyzes the function to determine how its parameters
|
// 'config' and analyzes the function to determine how its parameters
|
||||||
// and results will be passed (in registers or on the stack), returning
|
// and results will be passed (in registers or on the stack), returning
|
||||||
|
|
@ -174,33 +233,37 @@ func (config *ABIConfig) ABIAnalyze(t *types.Type) *ABIParamResultInfo {
|
||||||
rTotal: config.regAmounts,
|
rTotal: config.regAmounts,
|
||||||
}
|
}
|
||||||
result := &ABIParamResultInfo{config: config}
|
result := &ABIParamResultInfo{config: config}
|
||||||
|
ft := t.FuncType()
|
||||||
|
result.preAllocateParams(t.NumRecvs() != 0, ft.Params.NumFields(), ft.Results.NumFields())
|
||||||
|
|
||||||
// Receiver
|
// Receiver
|
||||||
ft := t.FuncType()
|
// TODO(register args) ? seems like "struct" and "fields" is not right anymore for describing function parameters
|
||||||
if t.NumRecvs() != 0 {
|
if t.NumRecvs() != 0 {
|
||||||
rfsl := ft.Receiver.FieldSlice()
|
r := ft.Receiver.FieldSlice()[0]
|
||||||
result.inparams = append(result.inparams,
|
result.inparams = append(result.inparams,
|
||||||
s.assignParamOrReturn(rfsl[0], false))
|
s.assignParamOrReturn(r.Type, r.Nname, false))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inputs
|
// Inputs
|
||||||
ifsl := ft.Params.FieldSlice()
|
ifsl := ft.Params.FieldSlice()
|
||||||
for _, f := range ifsl {
|
for _, f := range ifsl {
|
||||||
result.inparams = append(result.inparams,
|
result.inparams = append(result.inparams,
|
||||||
s.assignParamOrReturn(f, false))
|
s.assignParamOrReturn(f.Type, f.Nname, false))
|
||||||
}
|
}
|
||||||
s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
|
s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
|
||||||
|
result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
|
||||||
|
|
||||||
// Outputs
|
// Outputs
|
||||||
s.rUsed = RegAmounts{}
|
s.rUsed = RegAmounts{}
|
||||||
ofsl := ft.Results.FieldSlice()
|
ofsl := ft.Results.FieldSlice()
|
||||||
for _, f := range ofsl {
|
for _, f := range ofsl {
|
||||||
result.outparams = append(result.outparams, s.assignParamOrReturn(f, true))
|
result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, f.Nname, true))
|
||||||
}
|
}
|
||||||
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
|
// The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
|
||||||
// TODO in theory could align offset only to minimum required by spilled data types.
|
// TODO in theory could align offset only to minimum required by spilled data types.
|
||||||
result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
|
result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
|
||||||
result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
|
result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
|
||||||
|
result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
@ -460,17 +523,15 @@ func (state *assignState) regassign(pt *types.Type) bool {
|
||||||
// of field f to determine whether it can be register assigned.
|
// of field f to determine whether it can be register assigned.
|
||||||
// The result of the analysis is recorded in the result
|
// The result of the analysis is recorded in the result
|
||||||
// ABIParamResultInfo held in 'state'.
|
// ABIParamResultInfo held in 'state'.
|
||||||
func (state *assignState) assignParamOrReturn(f *types.Field, isReturn bool) ABIParamAssignment {
|
func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, isReturn bool) ABIParamAssignment {
|
||||||
// TODO(register args) ? seems like "struct" and "fields" is not right anymore for describing function parameters
|
|
||||||
pt := f.Type
|
|
||||||
state.pUsed = RegAmounts{}
|
state.pUsed = RegAmounts{}
|
||||||
if pt.Width == types.BADWIDTH {
|
if pt.Width == types.BADWIDTH {
|
||||||
panic("should never happen")
|
panic("should never happen")
|
||||||
} else if pt.Width == 0 {
|
} else if pt.Width == 0 {
|
||||||
return state.stackAllocate(pt, f.Nname)
|
return state.stackAllocate(pt, n)
|
||||||
} else if state.regassign(pt) {
|
} else if state.regassign(pt) {
|
||||||
return state.regAllocate(pt, f.Nname, isReturn)
|
return state.regAllocate(pt, n, isReturn)
|
||||||
} else {
|
} else {
|
||||||
return state.stackAllocate(pt, f.Nname)
|
return state.stackAllocate(pt, n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -299,7 +299,7 @@ func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp {
|
||||||
// OpSelect is a pseudo-op. We need to be more aggressive
|
// OpSelect is a pseudo-op. We need to be more aggressive
|
||||||
// regarding CSE to keep multiple OpSelect's of the same
|
// regarding CSE to keep multiple OpSelect's of the same
|
||||||
// argument from existing.
|
// argument from existing.
|
||||||
if v.Op != OpSelect0 && v.Op != OpSelect1 {
|
if v.Op != OpSelect0 && v.Op != OpSelect1 && v.Op != OpSelectN {
|
||||||
if tc := v.Type.Compare(w.Type); tc != types.CMPeq {
|
if tc := v.Type.Compare(w.Type); tc != types.CMPeq {
|
||||||
return tc
|
return tc
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -170,6 +170,7 @@ type expandState struct {
|
||||||
sdom SparseTree
|
sdom SparseTree
|
||||||
common map[selKey]*Value
|
common map[selKey]*Value
|
||||||
offsets map[offsetKey]*Value
|
offsets map[offsetKey]*Value
|
||||||
|
memForCall map[ID]*Value // For a call, need to know the unique selector that gets the mem.
|
||||||
}
|
}
|
||||||
|
|
||||||
// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
|
// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
|
||||||
|
|
@ -280,7 +281,6 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
|
||||||
if !x.isAlreadyExpandedAggregateType(selector.Type) {
|
if !x.isAlreadyExpandedAggregateType(selector.Type) {
|
||||||
if leafType == selector.Type { // OpIData leads us here, sometimes.
|
if leafType == selector.Type { // OpIData leads us here, sometimes.
|
||||||
leaf.copyOf(selector)
|
leaf.copyOf(selector)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
|
x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
|
||||||
}
|
}
|
||||||
|
|
@ -357,13 +357,43 @@ func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64,
|
||||||
which := selector.AuxInt
|
which := selector.AuxInt
|
||||||
if which == aux.NResults() { // mem is after the results.
|
if which == aux.NResults() { // mem is after the results.
|
||||||
// rewrite v as a Copy of call -- the replacement call will produce a mem.
|
// rewrite v as a Copy of call -- the replacement call will produce a mem.
|
||||||
leaf.copyOf(call)
|
if call.Op == OpStaticLECall {
|
||||||
|
if leaf != selector {
|
||||||
|
panic("Unexpected selector of memory")
|
||||||
|
}
|
||||||
|
// StaticCall selector will address last element of Result.
|
||||||
|
// TODO do this for all the other call types eventually.
|
||||||
|
if aux.abiInfo == nil {
|
||||||
|
panic(fmt.Errorf("aux.abiInfo nil for call %s", call.LongString()))
|
||||||
|
}
|
||||||
|
if existing := x.memForCall[call.ID]; existing == nil {
|
||||||
|
selector.AuxInt = int64(aux.abiInfo.OutRegistersUsed())
|
||||||
|
x.memForCall[call.ID] = selector
|
||||||
|
} else {
|
||||||
|
selector.copyOf(existing)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
leaf.copyOf(call)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
leafType := removeTrivialWrapperTypes(leaf.Type)
|
leafType := removeTrivialWrapperTypes(leaf.Type)
|
||||||
if x.canSSAType(leafType) {
|
if x.canSSAType(leafType) {
|
||||||
pt := types.NewPtr(leafType)
|
pt := types.NewPtr(leafType)
|
||||||
off := x.offsetFrom(x.sp, offset+aux.OffsetOfResult(which), pt)
|
off := x.offsetFrom(x.sp, offset+aux.OffsetOfResult(which), pt)
|
||||||
// Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
|
// Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
|
||||||
|
if call.Op == OpStaticLECall { // TODO this is temporary until all calls are register-able
|
||||||
|
// Create a "mem" for any loads that need to occur.
|
||||||
|
if mem := x.memForCall[call.ID]; mem != nil {
|
||||||
|
if mem.Block != call.Block {
|
||||||
|
panic(fmt.Errorf("selector and call need to be in same block, selector=%s; call=%s", selector.LongString(), call.LongString()))
|
||||||
|
}
|
||||||
|
call = mem
|
||||||
|
} else {
|
||||||
|
mem = call.Block.NewValue1I(call.Pos.WithNotStmt(), OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call)
|
||||||
|
x.memForCall[call.ID] = mem
|
||||||
|
call = mem
|
||||||
|
}
|
||||||
|
}
|
||||||
if leaf.Block == call.Block {
|
if leaf.Block == call.Block {
|
||||||
leaf.reset(OpLoad)
|
leaf.reset(OpLoad)
|
||||||
leaf.SetArgs2(off, call)
|
leaf.SetArgs2(off, call)
|
||||||
|
|
@ -835,6 +865,7 @@ func expandCalls(f *Func) {
|
||||||
sdom: f.Sdom(),
|
sdom: f.Sdom(),
|
||||||
common: make(map[selKey]*Value),
|
common: make(map[selKey]*Value),
|
||||||
offsets: make(map[offsetKey]*Value),
|
offsets: make(map[offsetKey]*Value),
|
||||||
|
memForCall: make(map[ID]*Value),
|
||||||
}
|
}
|
||||||
|
|
||||||
// For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
|
// For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
|
||||||
|
|
@ -1173,7 +1204,8 @@ func expandCalls(f *Func) {
|
||||||
switch v.Op {
|
switch v.Op {
|
||||||
case OpStaticLECall:
|
case OpStaticLECall:
|
||||||
v.Op = OpStaticCall
|
v.Op = OpStaticCall
|
||||||
v.Type = types.TypeMem
|
// TODO need to insert all the register types.
|
||||||
|
v.Type = types.NewResults([]*types.Type{types.TypeMem})
|
||||||
case OpClosureLECall:
|
case OpClosureLECall:
|
||||||
v.Op = OpClosureCall
|
v.Op = OpClosureCall
|
||||||
v.Type = types.TypeMem
|
v.Type = types.TypeMem
|
||||||
|
|
|
||||||
|
|
@ -1970,37 +1970,6 @@
|
||||||
|
|
||||||
(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)])
|
(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)])
|
||||||
|
|
||||||
// recognize runtime.newobject and don't Zero/Nilcheck it
|
|
||||||
(Zero (Load (OffPtr [c] (SP)) mem) mem)
|
|
||||||
&& mem.Op == OpStaticCall
|
|
||||||
&& isSameCall(mem.Aux, "runtime.newobject")
|
|
||||||
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
|
|
||||||
=> mem
|
|
||||||
(Store (Load (OffPtr [c] (SP)) mem) x mem)
|
|
||||||
&& isConstZero(x)
|
|
||||||
&& mem.Op == OpStaticCall
|
|
||||||
&& isSameCall(mem.Aux, "runtime.newobject")
|
|
||||||
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
|
|
||||||
=> mem
|
|
||||||
(Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem)
|
|
||||||
&& isConstZero(x)
|
|
||||||
&& mem.Op == OpStaticCall
|
|
||||||
&& isSameCall(mem.Aux, "runtime.newobject")
|
|
||||||
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
|
|
||||||
=> mem
|
|
||||||
// nil checks just need to rewrite to something useless.
|
|
||||||
// they will be deadcode eliminated soon afterwards.
|
|
||||||
(NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _)
|
|
||||||
&& isSameCall(sym, "runtime.newobject")
|
|
||||||
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
|
|
||||||
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
|
|
||||||
=> (Invalid)
|
|
||||||
(NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _)
|
|
||||||
&& isSameCall(sym, "runtime.newobject")
|
|
||||||
&& c == config.ctxt.FixedFrameSize() + config.RegSize // offset of return value
|
|
||||||
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
|
|
||||||
=> (Invalid)
|
|
||||||
|
|
||||||
// for rewriting results of some late-expanded rewrites (below)
|
// for rewriting results of some late-expanded rewrites (below)
|
||||||
(SelectN [0] (MakeResult a ___)) => a
|
(SelectN [0] (MakeResult a ___)) => a
|
||||||
(SelectN [1] (MakeResult a b ___)) => b
|
(SelectN [1] (MakeResult a b ___)) => b
|
||||||
|
|
@ -2021,12 +1990,12 @@
|
||||||
&& isSameCall(call.Aux, "runtime.newobject")
|
&& isSameCall(call.Aux, "runtime.newobject")
|
||||||
=> mem
|
=> mem
|
||||||
|
|
||||||
(NilCheck (SelectN [0] call:(StaticLECall _ _)) (SelectN [1] call))
|
(NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
|
||||||
&& isSameCall(call.Aux, "runtime.newobject")
|
&& isSameCall(call.Aux, "runtime.newobject")
|
||||||
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
|
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
|
||||||
=> (Invalid)
|
=> (Invalid)
|
||||||
|
|
||||||
(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) (SelectN [1] call))
|
(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
|
||||||
&& isSameCall(call.Aux, "runtime.newobject")
|
&& isSameCall(call.Aux, "runtime.newobject")
|
||||||
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
|
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
|
||||||
=> (Invalid)
|
=> (Invalid)
|
||||||
|
|
@ -2083,15 +2052,19 @@
|
||||||
(IsNonNil (Addr _)) => (ConstBool [true])
|
(IsNonNil (Addr _)) => (ConstBool [true])
|
||||||
(IsNonNil (LocalAddr _ _)) => (ConstBool [true])
|
(IsNonNil (LocalAddr _ _)) => (ConstBool [true])
|
||||||
|
|
||||||
|
// TODO REGISTER ARGS this will need revision.
|
||||||
|
// Because expand calls runs after prove, constants useful to this pattern may not appear
|
||||||
|
// In the future both versions need to exist; the memory and register variants.
|
||||||
|
|
||||||
// Inline small or disjoint runtime.memmove calls with constant length.
|
// Inline small or disjoint runtime.memmove calls with constant length.
|
||||||
// See the comment in op Move in genericOps.go for discussion of the type.
|
// See the comment in op Move in genericOps.go for discussion of the type.
|
||||||
(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))
|
(SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
|
||||||
&& sz >= 0
|
&& sz >= 0
|
||||||
&& isSameCall(sym, "runtime.memmove")
|
&& isSameCall(sym, "runtime.memmove")
|
||||||
&& t.IsPtr() // avoids TUINTPTR, see issue 30061
|
&& t.IsPtr() // avoids TUINTPTR, see issue 30061
|
||||||
&& s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
|
&& s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
|
||||||
&& isInlinableMemmove(dst, src, int64(sz), config)
|
&& isInlinableMemmove(dst, src, int64(sz), config)
|
||||||
&& clobber(s1, s2, s3)
|
&& clobber(s1, s2, s3, call)
|
||||||
=> (Move {t.Elem()} [int64(sz)] dst src mem)
|
=> (Move {t.Elem()} [int64(sz)] dst src mem)
|
||||||
|
|
||||||
// Inline small or disjoint runtime.memmove calls with constant length.
|
// Inline small or disjoint runtime.memmove calls with constant length.
|
||||||
|
|
@ -2105,13 +2078,6 @@
|
||||||
&& clobber(call)
|
&& clobber(call)
|
||||||
=> (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
|
=> (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
|
||||||
|
|
||||||
// De-virtualize interface calls into static calls.
|
|
||||||
// Note that (ITab (IMake)) doesn't get
|
|
||||||
// rewritten until after the first opt pass,
|
|
||||||
// so this rule should trigger reliably.
|
|
||||||
(InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem) && devirt(v, auxCall, itab, off) != nil =>
|
|
||||||
(StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem)
|
|
||||||
|
|
||||||
// De-virtualize late-expanded interface calls into late-expanded static calls.
|
// De-virtualize late-expanded interface calls into late-expanded static calls.
|
||||||
// Note that (ITab (IMake)) doesn't get rewritten until after the first opt pass,
|
// Note that (ITab (IMake)) doesn't get rewritten until after the first opt pass,
|
||||||
// so this rule should trigger reliably.
|
// so this rule should trigger reliably.
|
||||||
|
|
@ -2499,8 +2465,8 @@
|
||||||
(Store {t5} (OffPtr <tt5> [o5] dst) d4
|
(Store {t5} (OffPtr <tt5> [o5] dst) d4
|
||||||
(Zero {t1} [n] dst mem)))))
|
(Zero {t1} [n] dst mem)))))
|
||||||
|
|
||||||
// TODO this does not fire before call expansion; is that acceptable?
|
(SelectN [0] call:(StaticLECall {sym} a x)) && needRaceCleanup(sym, call) && clobber(call) => x
|
||||||
(StaticCall {sym} x) && needRaceCleanup(sym, v) => x
|
(SelectN [0] call:(StaticLECall {sym} x)) && needRaceCleanup(sym, call) && clobber(call) => x
|
||||||
|
|
||||||
// Collapse moving A -> B -> C into just A -> C.
|
// Collapse moving A -> B -> C into just A -> C.
|
||||||
// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.
|
// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.
|
||||||
|
|
|
||||||
|
|
@ -396,7 +396,7 @@ var genericOps = []opData{
|
||||||
// to match StaticCall's 32 bit arg size limit.
|
// to match StaticCall's 32 bit arg size limit.
|
||||||
// TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff?
|
// TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff?
|
||||||
{name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
|
{name: "ClosureCall", argLength: 3, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2=memory. auxint=arg size. Returns memory.
|
||||||
{name: "StaticCall", argLength: 1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0=memory. auxint=arg size. Returns memory.
|
{name: "StaticCall", argLength: -1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
|
||||||
{name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory.
|
{name: "InterCall", argLength: 2, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1=memory, auxint=arg size. Returns memory.
|
||||||
{name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
|
{name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
|
||||||
{name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
|
{name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
|
||||||
|
|
|
||||||
|
|
@ -88,6 +88,20 @@ func (t LocPair) String() string {
|
||||||
return fmt.Sprintf("<%s,%s>", n0, n1)
|
return fmt.Sprintf("<%s,%s>", n0, n1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type LocResults []Location
|
||||||
|
|
||||||
|
func (t LocResults) String() string {
|
||||||
|
s := "<"
|
||||||
|
a := ""
|
||||||
|
for _, r := range t {
|
||||||
|
a += s
|
||||||
|
s = ","
|
||||||
|
a += r.String()
|
||||||
|
}
|
||||||
|
a += ">"
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
type ArgPair struct {
|
type ArgPair struct {
|
||||||
reg *Register
|
reg *Register
|
||||||
mem LocalSlot
|
mem LocalSlot
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ func checkLower(f *Func) {
|
||||||
continue // lowered
|
continue // lowered
|
||||||
}
|
}
|
||||||
switch v.Op {
|
switch v.Op {
|
||||||
case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpConvert, OpInlMark:
|
case OpSP, OpSB, OpInitMem, OpArg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpSelectN, OpConvert, OpInlMark:
|
||||||
continue // ok not to lower
|
continue // ok not to lower
|
||||||
case OpGetG:
|
case OpGetG:
|
||||||
if f.Config.hasGReg {
|
if f.Config.hasGReg {
|
||||||
|
|
|
||||||
|
|
@ -203,9 +203,19 @@ func (a *AuxCall) String() string {
|
||||||
return fn + "}"
|
return fn + "}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ACParamsToTypes(ps []Param) (ts []*types.Type) {
|
||||||
|
for _, p := range ps {
|
||||||
|
ts = append(ts, p.Type)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// StaticAuxCall returns an AuxCall for a static call.
|
// StaticAuxCall returns an AuxCall for a static call.
|
||||||
func StaticAuxCall(sym *obj.LSym, args []Param, results []Param, paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
|
func StaticAuxCall(sym *obj.LSym, args []Param, results []Param, paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
|
||||||
// TODO Create regInfo for AuxCall
|
// TODO Create regInfo for AuxCall
|
||||||
|
if paramResultInfo == nil {
|
||||||
|
panic(fmt.Errorf("Nil paramResultInfo, sym=%v", sym))
|
||||||
|
}
|
||||||
return &AuxCall{Fn: sym, args: args, results: results, abiInfo: paramResultInfo}
|
return &AuxCall{Fn: sym, args: args, results: results, abiInfo: paramResultInfo}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -35435,7 +35435,7 @@ var opcodeTable = [...]opInfo{
|
||||||
{
|
{
|
||||||
name: "StaticCall",
|
name: "StaticCall",
|
||||||
auxType: auxCallOff,
|
auxType: auxCallOff,
|
||||||
argLen: 1,
|
argLen: -1,
|
||||||
call: true,
|
call: true,
|
||||||
generic: true,
|
generic: true,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -761,6 +761,9 @@ func (s *regAllocState) advanceUses(v *Value) {
|
||||||
// current instruction.
|
// current instruction.
|
||||||
func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
|
func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
|
||||||
u := s.values[v.ID].uses
|
u := s.values[v.ID].uses
|
||||||
|
if u == nil {
|
||||||
|
panic(fmt.Errorf("u is nil, v = %s, s.values[v.ID] = %v", v.LongString(), s.values[v.ID]))
|
||||||
|
}
|
||||||
d := u.dist
|
d := u.dist
|
||||||
for u != nil && u.dist == d {
|
for u != nil && u.dist == d {
|
||||||
u = u.next
|
u = u.next
|
||||||
|
|
@ -1208,13 +1211,17 @@ func (s *regAllocState) regalloc(f *Func) {
|
||||||
s.sb = v.ID
|
s.sb = v.ID
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if v.Op == OpSelect0 || v.Op == OpSelect1 {
|
if v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN {
|
||||||
if s.values[v.ID].needReg {
|
if s.values[v.ID].needReg {
|
||||||
var i = 0
|
if v.Op == OpSelectN {
|
||||||
if v.Op == OpSelect1 {
|
s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocResults)[int(v.AuxInt)].(*Register).num), v, v)
|
||||||
i = 1
|
} else {
|
||||||
|
var i = 0
|
||||||
|
if v.Op == OpSelect1 {
|
||||||
|
i = 1
|
||||||
|
}
|
||||||
|
s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v)
|
||||||
}
|
}
|
||||||
s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v)
|
|
||||||
}
|
}
|
||||||
b.Values = append(b.Values, v)
|
b.Values = append(b.Values, v)
|
||||||
s.advanceUses(v)
|
s.advanceUses(v)
|
||||||
|
|
@ -1767,6 +1774,9 @@ func (s *regAllocState) placeSpills() {
|
||||||
// put the spill of v. At the start "best" is the best place
|
// put the spill of v. At the start "best" is the best place
|
||||||
// we have found so far.
|
// we have found so far.
|
||||||
// TODO: find a way to make this O(1) without arbitrary cutoffs.
|
// TODO: find a way to make this O(1) without arbitrary cutoffs.
|
||||||
|
if v == nil {
|
||||||
|
panic(fmt.Errorf("nil v, s.orig[%d], vi = %v, spill = %s", i, vi, spill.LongString()))
|
||||||
|
}
|
||||||
best := v.Block
|
best := v.Block
|
||||||
bestArg := v
|
bestArg := v
|
||||||
var bestDepth int16
|
var bestDepth int16
|
||||||
|
|
|
||||||
|
|
@ -793,7 +793,10 @@ func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym {
|
||||||
|
|
||||||
func devirtLECall(v *Value, sym *obj.LSym) *Value {
|
func devirtLECall(v *Value, sym *obj.LSym) *Value {
|
||||||
v.Op = OpStaticLECall
|
v.Op = OpStaticLECall
|
||||||
v.Aux.(*AuxCall).Fn = sym
|
auxcall := v.Aux.(*AuxCall)
|
||||||
|
auxcall.Fn = sym
|
||||||
|
// TODO(register args) this should not be necessary when fully transition to the new register ABI.
|
||||||
|
auxcall.abiInfo = v.Block.Func.ABIDefault.ABIAnalyzeTypes(nil, ACParamsToTypes(auxcall.args), ACParamsToTypes(auxcall.results))
|
||||||
v.RemoveArg(0)
|
v.RemoveArg(0)
|
||||||
return v
|
return v
|
||||||
}
|
}
|
||||||
|
|
@ -1617,7 +1620,7 @@ func needRaceCleanup(sym *AuxCall, v *Value) bool {
|
||||||
for _, b := range f.Blocks {
|
for _, b := range f.Blocks {
|
||||||
for _, v := range b.Values {
|
for _, v := range b.Values {
|
||||||
switch v.Op {
|
switch v.Op {
|
||||||
case OpStaticCall:
|
case OpStaticCall, OpStaticLECall:
|
||||||
// Check for racefuncenter/racefuncenterfp will encounter racefuncexit and vice versa.
|
// Check for racefuncenter/racefuncenterfp will encounter racefuncexit and vice versa.
|
||||||
// Allow calls to panic*
|
// Allow calls to panic*
|
||||||
s := v.Aux.(*AuxCall).Fn.String()
|
s := v.Aux.(*AuxCall).Fn.String()
|
||||||
|
|
@ -1632,15 +1635,20 @@ func needRaceCleanup(sym *AuxCall, v *Value) bool {
|
||||||
return false
|
return false
|
||||||
case OpPanicBounds, OpPanicExtend:
|
case OpPanicBounds, OpPanicExtend:
|
||||||
// Note: these are panic generators that are ok (like the static calls above).
|
// Note: these are panic generators that are ok (like the static calls above).
|
||||||
case OpClosureCall, OpInterCall:
|
case OpClosureCall, OpInterCall, OpClosureLECall, OpInterLECall:
|
||||||
// We must keep the race functions if there are any other call types.
|
// We must keep the race functions if there are any other call types.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isSameCall(sym, "runtime.racefuncenter") {
|
if isSameCall(sym, "runtime.racefuncenter") {
|
||||||
|
// TODO REGISTER ABI this needs to be cleaned up.
|
||||||
// If we're removing racefuncenter, remove its argument as well.
|
// If we're removing racefuncenter, remove its argument as well.
|
||||||
if v.Args[0].Op != OpStore {
|
if v.Args[0].Op != OpStore {
|
||||||
|
if v.Op == OpStaticLECall {
|
||||||
|
// there is no store, yet.
|
||||||
|
return true
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
mem := v.Args[0].Args[2]
|
mem := v.Args[0].Args[2]
|
||||||
|
|
|
||||||
|
|
@ -122,8 +122,6 @@ func rewriteValuegeneric(v *Value) bool {
|
||||||
return rewriteValuegeneric_OpEqSlice(v)
|
return rewriteValuegeneric_OpEqSlice(v)
|
||||||
case OpIMake:
|
case OpIMake:
|
||||||
return rewriteValuegeneric_OpIMake(v)
|
return rewriteValuegeneric_OpIMake(v)
|
||||||
case OpInterCall:
|
|
||||||
return rewriteValuegeneric_OpInterCall(v)
|
|
||||||
case OpInterLECall:
|
case OpInterLECall:
|
||||||
return rewriteValuegeneric_OpInterLECall(v)
|
return rewriteValuegeneric_OpInterLECall(v)
|
||||||
case OpIsInBounds:
|
case OpIsInBounds:
|
||||||
|
|
@ -392,8 +390,6 @@ func rewriteValuegeneric(v *Value) bool {
|
||||||
return rewriteValuegeneric_OpSlicemask(v)
|
return rewriteValuegeneric_OpSlicemask(v)
|
||||||
case OpSqrt:
|
case OpSqrt:
|
||||||
return rewriteValuegeneric_OpSqrt(v)
|
return rewriteValuegeneric_OpSqrt(v)
|
||||||
case OpStaticCall:
|
|
||||||
return rewriteValuegeneric_OpStaticCall(v)
|
|
||||||
case OpStaticLECall:
|
case OpStaticLECall:
|
||||||
return rewriteValuegeneric_OpStaticLECall(v)
|
return rewriteValuegeneric_OpStaticLECall(v)
|
||||||
case OpStore:
|
case OpStore:
|
||||||
|
|
@ -8542,52 +8538,6 @@ func rewriteValuegeneric_OpIMake(v *Value) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
func rewriteValuegeneric_OpInterCall(v *Value) bool {
|
|
||||||
v_1 := v.Args[1]
|
|
||||||
v_0 := v.Args[0]
|
|
||||||
// match: (InterCall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) mem)
|
|
||||||
// cond: devirt(v, auxCall, itab, off) != nil
|
|
||||||
// result: (StaticCall [int32(argsize)] {devirt(v, auxCall, itab, off)} mem)
|
|
||||||
for {
|
|
||||||
argsize := auxIntToInt32(v.AuxInt)
|
|
||||||
auxCall := auxToCall(v.Aux)
|
|
||||||
if v_0.Op != OpLoad {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v_0_0 := v_0.Args[0]
|
|
||||||
if v_0_0.Op != OpOffPtr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
off := auxIntToInt64(v_0_0.AuxInt)
|
|
||||||
v_0_0_0 := v_0_0.Args[0]
|
|
||||||
if v_0_0_0.Op != OpITab {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v_0_0_0_0 := v_0_0_0.Args[0]
|
|
||||||
if v_0_0_0_0.Op != OpIMake {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v_0_0_0_0_0 := v_0_0_0_0.Args[0]
|
|
||||||
if v_0_0_0_0_0.Op != OpAddr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
itab := auxToSym(v_0_0_0_0_0.Aux)
|
|
||||||
v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
|
|
||||||
if v_0_0_0_0_0_0.Op != OpSB {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
mem := v_1
|
|
||||||
if !(devirt(v, auxCall, itab, off) != nil) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.reset(OpStaticCall)
|
|
||||||
v.AuxInt = int32ToAuxInt(int32(argsize))
|
|
||||||
v.Aux = callToAux(devirt(v, auxCall, itab, off))
|
|
||||||
v.AddArg(mem)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
func rewriteValuegeneric_OpInterLECall(v *Value) bool {
|
func rewriteValuegeneric_OpInterLECall(v *Value) bool {
|
||||||
// match: (InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___)
|
// match: (InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___)
|
||||||
// cond: devirtLESym(v, auxCall, itab, off) != nil
|
// cond: devirtLESym(v, auxCall, itab, off) != nil
|
||||||
|
|
@ -16113,7 +16063,6 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
|
||||||
v_1 := v.Args[1]
|
v_1 := v.Args[1]
|
||||||
v_0 := v.Args[0]
|
v_0 := v.Args[0]
|
||||||
b := v.Block
|
b := v.Block
|
||||||
config := b.Func.Config
|
|
||||||
fe := b.Func.fe
|
fe := b.Func.fe
|
||||||
// match: (NilCheck (GetG mem) mem)
|
// match: (NilCheck (GetG mem) mem)
|
||||||
// result: mem
|
// result: mem
|
||||||
|
|
@ -16128,67 +16077,7 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
|
||||||
v.copyOf(mem)
|
v.copyOf(mem)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NilCheck (Load (OffPtr [c] (SP)) (StaticCall {sym} _)) _)
|
// match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
|
||||||
// cond: isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")
|
|
||||||
// result: (Invalid)
|
|
||||||
for {
|
|
||||||
if v_0.Op != OpLoad {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
_ = v_0.Args[1]
|
|
||||||
v_0_0 := v_0.Args[0]
|
|
||||||
if v_0_0.Op != OpOffPtr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c := auxIntToInt64(v_0_0.AuxInt)
|
|
||||||
v_0_0_0 := v_0_0.Args[0]
|
|
||||||
if v_0_0_0.Op != OpSP {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v_0_1 := v_0.Args[1]
|
|
||||||
if v_0_1.Op != OpStaticCall {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sym := auxToCall(v_0_1.Aux)
|
|
||||||
if !(isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.reset(OpInvalid)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// match: (NilCheck (OffPtr (Load (OffPtr [c] (SP)) (StaticCall {sym} _))) _)
|
|
||||||
// cond: isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")
|
|
||||||
// result: (Invalid)
|
|
||||||
for {
|
|
||||||
if v_0.Op != OpOffPtr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v_0_0 := v_0.Args[0]
|
|
||||||
if v_0_0.Op != OpLoad {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
_ = v_0_0.Args[1]
|
|
||||||
v_0_0_0 := v_0_0.Args[0]
|
|
||||||
if v_0_0_0.Op != OpOffPtr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c := auxIntToInt64(v_0_0_0.AuxInt)
|
|
||||||
v_0_0_0_0 := v_0_0_0.Args[0]
|
|
||||||
if v_0_0_0_0.Op != OpSP {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v_0_0_1 := v_0_0.Args[1]
|
|
||||||
if v_0_0_1.Op != OpStaticCall {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sym := auxToCall(v_0_0_1.Aux)
|
|
||||||
if !(isSameCall(sym, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.reset(OpInvalid)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) (SelectN [1] call))
|
|
||||||
// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
|
// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
|
||||||
// result: (Invalid)
|
// result: (Invalid)
|
||||||
for {
|
for {
|
||||||
|
|
@ -16196,13 +16085,13 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
call := v_0.Args[0]
|
call := v_0.Args[0]
|
||||||
if call.Op != OpStaticLECall || len(call.Args) != 2 || v_1.Op != OpSelectN || auxIntToInt64(v_1.AuxInt) != 1 || call != v_1.Args[0] || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
|
if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpInvalid)
|
v.reset(OpInvalid)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) (SelectN [1] call))
|
// match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
|
||||||
// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
|
// cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
|
||||||
// result: (Invalid)
|
// result: (Invalid)
|
||||||
for {
|
for {
|
||||||
|
|
@ -16214,7 +16103,7 @@ func rewriteValuegeneric_OpNilCheck(v *Value) bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
call := v_0_0.Args[0]
|
call := v_0_0.Args[0]
|
||||||
if call.Op != OpStaticLECall || len(call.Args) != 2 || v_1.Op != OpSelectN || auxIntToInt64(v_1.AuxInt) != 1 || call != v_1.Args[0] || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
|
if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
v.reset(OpInvalid)
|
v.reset(OpInvalid)
|
||||||
|
|
@ -20799,6 +20688,94 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool {
|
||||||
v.copyOf(c)
|
v.copyOf(c)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
|
||||||
|
// cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)
|
||||||
|
// result: (Move {t.Elem()} [int64(sz)] dst src mem)
|
||||||
|
for {
|
||||||
|
if auxIntToInt64(v.AuxInt) != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
call := v_0
|
||||||
|
if call.Op != OpStaticCall || len(call.Args) != 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sym := auxToCall(call.Aux)
|
||||||
|
s1 := call.Args[0]
|
||||||
|
if s1.Op != OpStore {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
_ = s1.Args[2]
|
||||||
|
s1_1 := s1.Args[1]
|
||||||
|
if s1_1.Op != OpConst64 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sz := auxIntToInt64(s1_1.AuxInt)
|
||||||
|
s2 := s1.Args[2]
|
||||||
|
if s2.Op != OpStore {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
_ = s2.Args[2]
|
||||||
|
src := s2.Args[1]
|
||||||
|
s3 := s2.Args[2]
|
||||||
|
if s3.Op != OpStore {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t := auxToType(s3.Aux)
|
||||||
|
mem := s3.Args[2]
|
||||||
|
dst := s3.Args[1]
|
||||||
|
if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpMove)
|
||||||
|
v.AuxInt = int64ToAuxInt(int64(sz))
|
||||||
|
v.Aux = typeToAux(t.Elem())
|
||||||
|
v.AddArg3(dst, src, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
|
||||||
|
// cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)
|
||||||
|
// result: (Move {t.Elem()} [int64(sz)] dst src mem)
|
||||||
|
for {
|
||||||
|
if auxIntToInt64(v.AuxInt) != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
call := v_0
|
||||||
|
if call.Op != OpStaticCall || len(call.Args) != 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sym := auxToCall(call.Aux)
|
||||||
|
s1 := call.Args[0]
|
||||||
|
if s1.Op != OpStore {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
_ = s1.Args[2]
|
||||||
|
s1_1 := s1.Args[1]
|
||||||
|
if s1_1.Op != OpConst32 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sz := auxIntToInt32(s1_1.AuxInt)
|
||||||
|
s2 := s1.Args[2]
|
||||||
|
if s2.Op != OpStore {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
_ = s2.Args[2]
|
||||||
|
src := s2.Args[1]
|
||||||
|
s3 := s2.Args[2]
|
||||||
|
if s3.Op != OpStore {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t := auxToType(s3.Aux)
|
||||||
|
mem := s3.Args[2]
|
||||||
|
dst := s3.Args[1]
|
||||||
|
if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.reset(OpMove)
|
||||||
|
v.AuxInt = int64ToAuxInt(int64(sz))
|
||||||
|
v.Aux = typeToAux(t.Elem())
|
||||||
|
v.AddArg3(dst, src, mem)
|
||||||
|
return true
|
||||||
|
}
|
||||||
// match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem))
|
// match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem))
|
||||||
// cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
|
// cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && dst.Type.IsPtr() && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
|
||||||
// result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
|
// result: (Move {dst.Type.Elem()} [int64(sz)] dst src mem)
|
||||||
|
|
@ -20857,6 +20834,44 @@ func rewriteValuegeneric_OpSelectN(v *Value) bool {
|
||||||
v.AddArg3(dst, src, mem)
|
v.AddArg3(dst, src, mem)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
// match: (SelectN [0] call:(StaticLECall {sym} a x))
|
||||||
|
// cond: needRaceCleanup(sym, call) && clobber(call)
|
||||||
|
// result: x
|
||||||
|
for {
|
||||||
|
if auxIntToInt64(v.AuxInt) != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
call := v_0
|
||||||
|
if call.Op != OpStaticLECall || len(call.Args) != 2 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sym := auxToCall(call.Aux)
|
||||||
|
x := call.Args[1]
|
||||||
|
if !(needRaceCleanup(sym, call) && clobber(call)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.copyOf(x)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// match: (SelectN [0] call:(StaticLECall {sym} x))
|
||||||
|
// cond: needRaceCleanup(sym, call) && clobber(call)
|
||||||
|
// result: x
|
||||||
|
for {
|
||||||
|
if auxIntToInt64(v.AuxInt) != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
call := v_0
|
||||||
|
if call.Op != OpStaticLECall || len(call.Args) != 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
sym := auxToCall(call.Aux)
|
||||||
|
x := call.Args[0]
|
||||||
|
if !(needRaceCleanup(sym, call) && clobber(call)) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
v.copyOf(x)
|
||||||
|
return true
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
func rewriteValuegeneric_OpSignExt16to32(v *Value) bool {
|
func rewriteValuegeneric_OpSignExt16to32(v *Value) bool {
|
||||||
|
|
@ -21307,98 +21322,6 @@ func rewriteValuegeneric_OpSqrt(v *Value) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
func rewriteValuegeneric_OpStaticCall(v *Value) bool {
|
|
||||||
v_0 := v.Args[0]
|
|
||||||
b := v.Block
|
|
||||||
config := b.Func.Config
|
|
||||||
// match: (StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))
|
|
||||||
// cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)
|
|
||||||
// result: (Move {t.Elem()} [int64(sz)] dst src mem)
|
|
||||||
for {
|
|
||||||
sym := auxToCall(v.Aux)
|
|
||||||
s1 := v_0
|
|
||||||
if s1.Op != OpStore {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
_ = s1.Args[2]
|
|
||||||
s1_1 := s1.Args[1]
|
|
||||||
if s1_1.Op != OpConst64 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sz := auxIntToInt64(s1_1.AuxInt)
|
|
||||||
s2 := s1.Args[2]
|
|
||||||
if s2.Op != OpStore {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
_ = s2.Args[2]
|
|
||||||
src := s2.Args[1]
|
|
||||||
s3 := s2.Args[2]
|
|
||||||
if s3.Op != OpStore {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
t := auxToType(s3.Aux)
|
|
||||||
mem := s3.Args[2]
|
|
||||||
dst := s3.Args[1]
|
|
||||||
if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.reset(OpMove)
|
|
||||||
v.AuxInt = int64ToAuxInt(int64(sz))
|
|
||||||
v.Aux = typeToAux(t.Elem())
|
|
||||||
v.AddArg3(dst, src, mem)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// match: (StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))
|
|
||||||
// cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)
|
|
||||||
// result: (Move {t.Elem()} [int64(sz)] dst src mem)
|
|
||||||
for {
|
|
||||||
sym := auxToCall(v.Aux)
|
|
||||||
s1 := v_0
|
|
||||||
if s1.Op != OpStore {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
_ = s1.Args[2]
|
|
||||||
s1_1 := s1.Args[1]
|
|
||||||
if s1_1.Op != OpConst32 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
sz := auxIntToInt32(s1_1.AuxInt)
|
|
||||||
s2 := s1.Args[2]
|
|
||||||
if s2.Op != OpStore {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
_ = s2.Args[2]
|
|
||||||
src := s2.Args[1]
|
|
||||||
s3 := s2.Args[2]
|
|
||||||
if s3.Op != OpStore {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
t := auxToType(s3.Aux)
|
|
||||||
mem := s3.Args[2]
|
|
||||||
dst := s3.Args[1]
|
|
||||||
if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && t.IsPtr() && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3)) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.reset(OpMove)
|
|
||||||
v.AuxInt = int64ToAuxInt(int64(sz))
|
|
||||||
v.Aux = typeToAux(t.Elem())
|
|
||||||
v.AddArg3(dst, src, mem)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// match: (StaticCall {sym} x)
|
|
||||||
// cond: needRaceCleanup(sym, v)
|
|
||||||
// result: x
|
|
||||||
for {
|
|
||||||
sym := auxToCall(v.Aux)
|
|
||||||
x := v_0
|
|
||||||
if !(needRaceCleanup(sym, v)) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.copyOf(x)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
|
func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
|
||||||
b := v.Block
|
b := v.Block
|
||||||
typ := &b.Func.Config.Types
|
typ := &b.Func.Config.Types
|
||||||
|
|
@ -21442,7 +21365,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
|
||||||
v_1 := v.Args[1]
|
v_1 := v.Args[1]
|
||||||
v_0 := v.Args[0]
|
v_0 := v.Args[0]
|
||||||
b := v.Block
|
b := v.Block
|
||||||
config := b.Func.Config
|
|
||||||
fe := b.Func.fe
|
fe := b.Func.fe
|
||||||
// match: (Store {t1} p1 (Load <t2> p2 mem) mem)
|
// match: (Store {t1} p1 (Load <t2> p2 mem) mem)
|
||||||
// cond: isSamePtr(p1, p2) && t2.Size() == t1.Size()
|
// cond: isSamePtr(p1, p2) && t2.Size() == t1.Size()
|
||||||
|
|
@ -21890,58 +21812,6 @@ func rewriteValuegeneric_OpStore(v *Value) bool {
|
||||||
v.AddArg3(dst, e, mem)
|
v.AddArg3(dst, e, mem)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
// match: (Store (Load (OffPtr [c] (SP)) mem) x mem)
|
|
||||||
// cond: isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize
|
|
||||||
// result: mem
|
|
||||||
for {
|
|
||||||
if v_0.Op != OpLoad {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
mem := v_0.Args[1]
|
|
||||||
v_0_0 := v_0.Args[0]
|
|
||||||
if v_0_0.Op != OpOffPtr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c := auxIntToInt64(v_0_0.AuxInt)
|
|
||||||
v_0_0_0 := v_0_0.Args[0]
|
|
||||||
if v_0_0_0.Op != OpSP {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
x := v_1
|
|
||||||
if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.copyOf(mem)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// match: (Store (OffPtr (Load (OffPtr [c] (SP)) mem)) x mem)
|
|
||||||
// cond: isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize
|
|
||||||
// result: mem
|
|
||||||
for {
|
|
||||||
if v_0.Op != OpOffPtr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v_0_0 := v_0.Args[0]
|
|
||||||
if v_0_0.Op != OpLoad {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
mem := v_0_0.Args[1]
|
|
||||||
v_0_0_0 := v_0_0.Args[0]
|
|
||||||
if v_0_0_0.Op != OpOffPtr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c := auxIntToInt64(v_0_0_0.AuxInt)
|
|
||||||
v_0_0_0_0 := v_0_0_0.Args[0]
|
|
||||||
if v_0_0_0_0.Op != OpSP {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
x := v_1
|
|
||||||
if mem != v_2 || !(isConstZero(x) && mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.copyOf(mem)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
|
// match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
|
||||||
// cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
|
// cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
|
||||||
// result: mem
|
// result: mem
|
||||||
|
|
@ -24660,27 +24530,6 @@ func rewriteValuegeneric_OpZero(v *Value) bool {
|
||||||
v_1 := v.Args[1]
|
v_1 := v.Args[1]
|
||||||
v_0 := v.Args[0]
|
v_0 := v.Args[0]
|
||||||
b := v.Block
|
b := v.Block
|
||||||
config := b.Func.Config
|
|
||||||
// match: (Zero (Load (OffPtr [c] (SP)) mem) mem)
|
|
||||||
// cond: mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize() + config.RegSize
|
|
||||||
// result: mem
|
|
||||||
for {
|
|
||||||
if v_0.Op != OpLoad {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
mem := v_0.Args[1]
|
|
||||||
v_0_0 := v_0.Args[0]
|
|
||||||
if v_0_0.Op != OpOffPtr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
c := auxIntToInt64(v_0_0.AuxInt)
|
|
||||||
v_0_0_0 := v_0_0.Args[0]
|
|
||||||
if v_0_0_0.Op != OpSP || mem != v_1 || !(mem.Op == OpStaticCall && isSameCall(mem.Aux, "runtime.newobject") && c == config.ctxt.FixedFrameSize()+config.RegSize) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
v.copyOf(mem)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
|
// match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
|
||||||
// cond: isSameCall(call.Aux, "runtime.newobject")
|
// cond: isSameCall(call.Aux, "runtime.newobject")
|
||||||
// result: mem
|
// result: mem
|
||||||
|
|
|
||||||
|
|
@ -145,7 +145,7 @@ func schedule(f *Func) {
|
||||||
// reduce register pressure. It also helps make sure
|
// reduce register pressure. It also helps make sure
|
||||||
// VARDEF ops are scheduled before the corresponding LEA.
|
// VARDEF ops are scheduled before the corresponding LEA.
|
||||||
score[v.ID] = ScoreMemory
|
score[v.ID] = ScoreMemory
|
||||||
case v.Op == OpSelect0 || v.Op == OpSelect1:
|
case v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN:
|
||||||
// Schedule the pseudo-op of reading part of a tuple
|
// Schedule the pseudo-op of reading part of a tuple
|
||||||
// immediately after the tuple-generating op, since
|
// immediately after the tuple-generating op, since
|
||||||
// this value is already live. This also removes its
|
// this value is already live. This also removes its
|
||||||
|
|
@ -270,6 +270,20 @@ func schedule(f *Func) {
|
||||||
tuples[v.Args[0].ID] = make([]*Value, 2)
|
tuples[v.Args[0].ID] = make([]*Value, 2)
|
||||||
}
|
}
|
||||||
tuples[v.Args[0].ID][1] = v
|
tuples[v.Args[0].ID][1] = v
|
||||||
|
case v.Op == OpSelectN:
|
||||||
|
if tuples[v.Args[0].ID] == nil {
|
||||||
|
tuples[v.Args[0].ID] = make([]*Value, v.Args[0].Type.NumFields())
|
||||||
|
}
|
||||||
|
tuples[v.Args[0].ID][v.AuxInt] = v
|
||||||
|
case v.Type.IsResults() && tuples[v.ID] != nil:
|
||||||
|
tup := tuples[v.ID]
|
||||||
|
for i := len(tup) - 1; i >= 0; i-- {
|
||||||
|
if tup[i] != nil {
|
||||||
|
order = append(order, tup[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
delete(tuples, v.ID)
|
||||||
|
order = append(order, v)
|
||||||
case v.Type.IsTuple() && tuples[v.ID] != nil:
|
case v.Type.IsTuple() && tuples[v.ID] != nil:
|
||||||
if tuples[v.ID][1] != nil {
|
if tuples[v.ID][1] != nil {
|
||||||
order = append(order, tuples[v.ID][1])
|
order = append(order, tuples[v.ID][1])
|
||||||
|
|
|
||||||
|
|
@ -18,10 +18,11 @@ func tighten(f *Func) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch v.Op {
|
switch v.Op {
|
||||||
case OpPhi, OpArg, OpSelect0, OpSelect1:
|
case OpPhi, OpArg, OpSelect0, OpSelect1, OpSelectN:
|
||||||
// Phis need to stay in their block.
|
// Phis need to stay in their block.
|
||||||
// Arg must stay in the entry block.
|
// Arg must stay in the entry block.
|
||||||
// Tuple selectors must stay with the tuple generator.
|
// Tuple selectors must stay with the tuple generator.
|
||||||
|
// SelectN is typically, ultimately, a register.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if v.MemoryArg() != nil {
|
if v.MemoryArg() != nil {
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@
|
||||||
|
|
||||||
package ssa
|
package ssa
|
||||||
|
|
||||||
// tightenTupleSelectors ensures that tuple selectors (Select0 and
|
// tightenTupleSelectors ensures that tuple selectors (Select0, Select1,
|
||||||
// Select1 ops) are in the same block as their tuple generator. The
|
// and SelectN ops) are in the same block as their tuple generator. The
|
||||||
// function also ensures that there are no duplicate tuple selectors.
|
// function also ensures that there are no duplicate tuple selectors.
|
||||||
// These properties are expected by the scheduler but may not have
|
// These properties are expected by the scheduler but may not have
|
||||||
// been maintained by the optimization pipeline up to this point.
|
// been maintained by the optimization pipeline up to this point.
|
||||||
|
|
@ -13,28 +13,40 @@ package ssa
|
||||||
// See issues 16741 and 39472.
|
// See issues 16741 and 39472.
|
||||||
func tightenTupleSelectors(f *Func) {
|
func tightenTupleSelectors(f *Func) {
|
||||||
selectors := make(map[struct {
|
selectors := make(map[struct {
|
||||||
id ID
|
id ID
|
||||||
op Op
|
which int
|
||||||
}]*Value)
|
}]*Value)
|
||||||
for _, b := range f.Blocks {
|
for _, b := range f.Blocks {
|
||||||
for _, selector := range b.Values {
|
for _, selector := range b.Values {
|
||||||
if selector.Op != OpSelect0 && selector.Op != OpSelect1 {
|
// Key fields for de-duplication
|
||||||
|
var tuple *Value
|
||||||
|
idx := 0
|
||||||
|
switch selector.Op {
|
||||||
|
default:
|
||||||
continue
|
continue
|
||||||
}
|
case OpSelect1:
|
||||||
|
idx = 1
|
||||||
// Get the tuple generator to use as a key for de-duplication.
|
fallthrough
|
||||||
tuple := selector.Args[0]
|
case OpSelect0:
|
||||||
if !tuple.Type.IsTuple() {
|
tuple = selector.Args[0]
|
||||||
f.Fatalf("arg of tuple selector %s is not a tuple: %s", selector.String(), tuple.LongString())
|
if !tuple.Type.IsTuple() {
|
||||||
|
f.Fatalf("arg of tuple selector %s is not a tuple: %s", selector.String(), tuple.LongString())
|
||||||
|
}
|
||||||
|
case OpSelectN:
|
||||||
|
tuple = selector.Args[0]
|
||||||
|
idx = int(selector.AuxInt)
|
||||||
|
if !tuple.Type.IsResults() {
|
||||||
|
f.Fatalf("arg of result selector %s is not a results: %s", selector.String(), tuple.LongString())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a pre-existing selector in the target block then
|
// If there is a pre-existing selector in the target block then
|
||||||
// use that. Do this even if the selector is already in the
|
// use that. Do this even if the selector is already in the
|
||||||
// target block to avoid duplicate tuple selectors.
|
// target block to avoid duplicate tuple selectors.
|
||||||
key := struct {
|
key := struct {
|
||||||
id ID
|
id ID
|
||||||
op Op
|
which int
|
||||||
}{tuple.ID, selector.Op}
|
}{tuple.ID, idx}
|
||||||
if t := selectors[key]; t != nil {
|
if t := selectors[key]; t != nil {
|
||||||
if selector != t {
|
if selector != t {
|
||||||
selector.copyOf(t)
|
selector.copyOf(t)
|
||||||
|
|
|
||||||
|
|
@ -487,12 +487,14 @@ func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Va
|
||||||
off := config.ctxt.FixedFrameSize()
|
off := config.ctxt.FixedFrameSize()
|
||||||
|
|
||||||
var ACArgs []Param
|
var ACArgs []Param
|
||||||
|
var argTypes []*types.Type
|
||||||
if typ != nil { // for typedmemmove
|
if typ != nil { // for typedmemmove
|
||||||
taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
|
taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
|
||||||
off = round(off, taddr.Type.Alignment())
|
off = round(off, taddr.Type.Alignment())
|
||||||
arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
|
arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
|
||||||
mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem)
|
mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem)
|
||||||
ACArgs = append(ACArgs, Param{Type: b.Func.Config.Types.Uintptr, Offset: int32(off)})
|
ACArgs = append(ACArgs, Param{Type: b.Func.Config.Types.Uintptr, Offset: int32(off)})
|
||||||
|
argTypes = append(argTypes, b.Func.Config.Types.Uintptr)
|
||||||
off += taddr.Type.Size()
|
off += taddr.Type.Size()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -500,6 +502,7 @@ func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Va
|
||||||
arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
|
arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
|
||||||
mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem)
|
mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem)
|
||||||
ACArgs = append(ACArgs, Param{Type: ptr.Type, Offset: int32(off)})
|
ACArgs = append(ACArgs, Param{Type: ptr.Type, Offset: int32(off)})
|
||||||
|
argTypes = append(argTypes, ptr.Type)
|
||||||
off += ptr.Type.Size()
|
off += ptr.Type.Size()
|
||||||
|
|
||||||
if val != nil {
|
if val != nil {
|
||||||
|
|
@ -507,15 +510,15 @@ func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Va
|
||||||
arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
|
arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
|
||||||
mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem)
|
mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem)
|
||||||
ACArgs = append(ACArgs, Param{Type: val.Type, Offset: int32(off)})
|
ACArgs = append(ACArgs, Param{Type: val.Type, Offset: int32(off)})
|
||||||
|
argTypes = append(argTypes, val.Type)
|
||||||
off += val.Type.Size()
|
off += val.Type.Size()
|
||||||
}
|
}
|
||||||
off = round(off, config.PtrSize)
|
off = round(off, config.PtrSize)
|
||||||
|
|
||||||
// issue call
|
// issue call
|
||||||
// TODO(register args) -- will need more details
|
mem = b.NewValue1A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, ACArgs, nil, b.Func.ABIDefault.ABIAnalyzeTypes(nil, argTypes, nil)), mem)
|
||||||
mem = b.NewValue1A(pos, OpStaticCall, types.TypeMem, StaticAuxCall(fn, ACArgs, nil, nil), mem)
|
|
||||||
mem.AuxInt = off - config.ctxt.FixedFrameSize()
|
mem.AuxInt = off - config.ctxt.FixedFrameSize()
|
||||||
return mem
|
return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, mem)
|
||||||
}
|
}
|
||||||
|
|
||||||
// round to a multiple of r, r is a power of 2
|
// round to a multiple of r, r is a power of 2
|
||||||
|
|
@ -563,12 +566,20 @@ func IsReadOnlyGlobalAddr(v *Value) bool {
|
||||||
|
|
||||||
// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem.
|
// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object at memory state mem.
|
||||||
func IsNewObject(v *Value, mem *Value) bool {
|
func IsNewObject(v *Value, mem *Value) bool {
|
||||||
|
// TODO this will need updating for register args; the OpLoad is wrong.
|
||||||
if v.Op != OpLoad {
|
if v.Op != OpLoad {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if v.MemoryArg() != mem {
|
if v.MemoryArg() != mem {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
if mem.Op != OpSelectN {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if mem.Type != types.TypeMem {
|
||||||
|
return false
|
||||||
|
} // assume it is the right selection if true
|
||||||
|
mem = mem.Args[0]
|
||||||
if mem.Op != OpStaticCall {
|
if mem.Op != OpStaticCall {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4734,7 +4734,8 @@ func (s *state) openDeferExit() {
|
||||||
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
|
aux := ssa.ClosureAuxCall(ACArgs, ACResults)
|
||||||
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
|
call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
|
||||||
} else {
|
} else {
|
||||||
aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), ACArgs, ACResults, nil) // TODO will need types for this.
|
aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), ACArgs, ACResults,
|
||||||
|
s.f.ABIDefault.ABIAnalyzeTypes(nil, ssa.ACParamsToTypes(ACArgs), ssa.ACParamsToTypes(ACResults)))
|
||||||
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
|
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
|
||||||
}
|
}
|
||||||
callArgs = append(callArgs, s.mem())
|
callArgs = append(callArgs, s.mem())
|
||||||
|
|
@ -4896,7 +4897,8 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
|
||||||
|
|
||||||
// Call runtime.deferprocStack with pointer to _defer record.
|
// Call runtime.deferprocStack with pointer to _defer record.
|
||||||
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())})
|
ACArgs = append(ACArgs, ssa.Param{Type: types.Types[types.TUINTPTR], Offset: int32(base.Ctxt.FixedFrameSize())})
|
||||||
aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, ACArgs, ACResults, nil)
|
aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, ACArgs, ACResults,
|
||||||
|
s.f.ABIDefault.ABIAnalyzeTypes(nil, ssa.ACParamsToTypes(ACArgs), ssa.ACParamsToTypes(ACResults)))
|
||||||
callArgs = append(callArgs, addr, s.mem())
|
callArgs = append(callArgs, addr, s.mem())
|
||||||
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
|
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
|
||||||
call.AddArgs(callArgs...)
|
call.AddArgs(callArgs...)
|
||||||
|
|
@ -4956,10 +4958,12 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
|
||||||
// call target
|
// call target
|
||||||
switch {
|
switch {
|
||||||
case k == callDefer:
|
case k == callDefer:
|
||||||
aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults, nil) // TODO paramResultInfo for DeferProc
|
aux := ssa.StaticAuxCall(ir.Syms.Deferproc, ACArgs, ACResults,
|
||||||
|
s.f.ABIDefault.ABIAnalyzeTypes(nil, ssa.ACParamsToTypes(ACArgs), ssa.ACParamsToTypes(ACResults))) // TODO paramResultInfo for DeferProc
|
||||||
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
|
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
|
||||||
case k == callGo:
|
case k == callGo:
|
||||||
aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults, nil)
|
aux := ssa.StaticAuxCall(ir.Syms.Newproc, ACArgs, ACResults,
|
||||||
|
s.f.ABIDefault.ABIAnalyzeTypes(nil, ssa.ACParamsToTypes(ACArgs), ssa.ACParamsToTypes(ACResults)))
|
||||||
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
|
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
|
||||||
case closure != nil:
|
case closure != nil:
|
||||||
// rawLoad because loading the code pointer from a
|
// rawLoad because loading the code pointer from a
|
||||||
|
|
@ -5434,6 +5438,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
|
||||||
var ACArgs []ssa.Param
|
var ACArgs []ssa.Param
|
||||||
var ACResults []ssa.Param
|
var ACResults []ssa.Param
|
||||||
var callArgs []*ssa.Value
|
var callArgs []*ssa.Value
|
||||||
|
var callArgTypes []*types.Type
|
||||||
|
|
||||||
for _, arg := range args {
|
for _, arg := range args {
|
||||||
t := arg.Type
|
t := arg.Type
|
||||||
|
|
@ -5441,6 +5446,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
|
||||||
size := t.Size()
|
size := t.Size()
|
||||||
ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
|
ACArgs = append(ACArgs, ssa.Param{Type: t, Offset: int32(off)})
|
||||||
callArgs = append(callArgs, arg)
|
callArgs = append(callArgs, arg)
|
||||||
|
callArgTypes = append(callArgTypes, t)
|
||||||
off += size
|
off += size
|
||||||
}
|
}
|
||||||
off = types.Rnd(off, int64(types.RegSize))
|
off = types.Rnd(off, int64(types.RegSize))
|
||||||
|
|
@ -5455,7 +5461,7 @@ func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args .
|
||||||
|
|
||||||
// Issue call
|
// Issue call
|
||||||
var call *ssa.Value
|
var call *ssa.Value
|
||||||
aux := ssa.StaticAuxCall(fn, ACArgs, ACResults, nil) // WILL NEED A TYPE FOR THIS.)
|
aux := ssa.StaticAuxCall(fn, ACArgs, ACResults, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
|
||||||
callArgs = append(callArgs, s.mem())
|
callArgs = append(callArgs, s.mem())
|
||||||
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
|
call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
|
||||||
call.AddArgs(callArgs...)
|
call.AddArgs(callArgs...)
|
||||||
|
|
@ -6520,7 +6526,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
|
||||||
// input args need no code
|
// input args need no code
|
||||||
case ssa.OpSP, ssa.OpSB:
|
case ssa.OpSP, ssa.OpSB:
|
||||||
// nothing to do
|
// nothing to do
|
||||||
case ssa.OpSelect0, ssa.OpSelect1:
|
case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN:
|
||||||
// nothing to do
|
// nothing to do
|
||||||
case ssa.OpGetG:
|
case ssa.OpGetG:
|
||||||
// nothing to do when there's a g register,
|
// nothing to do when there's a g register,
|
||||||
|
|
|
||||||
|
|
@ -581,12 +581,19 @@ func NewTuple(t1, t2 *Type) *Type {
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewResults(types []*Type) *Type {
|
func newResults(types []*Type) *Type {
|
||||||
t := New(TRESULTS)
|
t := New(TRESULTS)
|
||||||
t.Extra.(*Results).Types = types
|
t.Extra.(*Results).Types = types
|
||||||
return t
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewResults(types []*Type) *Type {
|
||||||
|
if len(types) == 1 && types[0] == TypeMem {
|
||||||
|
return TypeResultMem
|
||||||
|
}
|
||||||
|
return newResults(types)
|
||||||
|
}
|
||||||
|
|
||||||
func newSSA(name string) *Type {
|
func newSSA(name string) *Type {
|
||||||
t := New(TSSA)
|
t := New(TSSA)
|
||||||
t.Extra = name
|
t.Extra = name
|
||||||
|
|
@ -1407,6 +1414,9 @@ func (t *Type) PtrTo() *Type {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Type) NumFields() int {
|
func (t *Type) NumFields() int {
|
||||||
|
if t.kind == TRESULTS {
|
||||||
|
return len(t.Extra.(*Results).Types)
|
||||||
|
}
|
||||||
return t.Fields().Len()
|
return t.Fields().Len()
|
||||||
}
|
}
|
||||||
func (t *Type) FieldType(i int) *Type {
|
func (t *Type) FieldType(i int) *Type {
|
||||||
|
|
@ -1597,11 +1607,12 @@ func FakeRecvType() *Type {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// TSSA types. HasPointers assumes these are pointer-free.
|
// TSSA types. HasPointers assumes these are pointer-free.
|
||||||
TypeInvalid = newSSA("invalid")
|
TypeInvalid = newSSA("invalid")
|
||||||
TypeMem = newSSA("mem")
|
TypeMem = newSSA("mem")
|
||||||
TypeFlags = newSSA("flags")
|
TypeFlags = newSSA("flags")
|
||||||
TypeVoid = newSSA("void")
|
TypeVoid = newSSA("void")
|
||||||
TypeInt128 = newSSA("int128")
|
TypeInt128 = newSSA("int128")
|
||||||
|
TypeResultMem = newResults([]*Type{TypeMem})
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewNamed returns a new named type for the given type name.
|
// NewNamed returns a new named type for the given type name.
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue