2016-10-13 06:57:00 -04:00
|
|
|
// Copyright 2016 The Go Authors. All rights reserved.
|
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
package ssa
|
|
|
|
|
|
2016-12-06 17:08:06 -08:00
|
|
|
import (
|
2017-02-06 13:30:40 -08:00
|
|
|
"cmd/internal/obj"
|
2016-12-06 17:08:06 -08:00
|
|
|
"cmd/internal/src"
|
|
|
|
|
)
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-05 23:43:31 -05:00
|
|
|
// needwb returns whether we need write barrier for store op v.
|
|
|
|
|
// v must be Store/Move/Zero.
|
|
|
|
|
func needwb(v *Value) bool {
|
|
|
|
|
t, ok := v.Aux.(Type)
|
|
|
|
|
if !ok {
|
|
|
|
|
v.Fatalf("store aux is not a type: %s", v.LongString())
|
|
|
|
|
}
|
|
|
|
|
if !t.HasPointer() {
|
|
|
|
|
return false
|
|
|
|
|
}
|
2017-03-17 11:53:24 -04:00
|
|
|
if IsStackAddr(v.Args[0]) {
|
2017-02-05 23:43:31 -05:00
|
|
|
return false // write on stack doesn't need write barrier
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// writebarrier pass inserts write barriers for store ops (Store, Move, Zero)
|
|
|
|
|
// when necessary (the condition above). It rewrites store ops to branches
|
|
|
|
|
// and runtime calls, like
|
2016-10-13 06:57:00 -04:00
|
|
|
//
|
|
|
|
|
// if writeBarrier.enabled {
|
|
|
|
|
// writebarrierptr(ptr, val)
|
|
|
|
|
// } else {
|
|
|
|
|
// *ptr = val
|
|
|
|
|
// }
|
|
|
|
|
//
|
|
|
|
|
// A sequence of WB stores for many pointer fields of a single type will
|
|
|
|
|
// be emitted together, with a single branch.
|
|
|
|
|
func writebarrier(f *Func) {
|
2017-03-16 22:42:10 -07:00
|
|
|
if !f.fe.UseWriteBarrier() {
|
2017-02-05 23:43:31 -05:00
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-01 14:27:40 -05:00
|
|
|
var sb, sp, wbaddr, const0 *Value
|
2017-02-06 13:30:40 -08:00
|
|
|
var writebarrierptr, typedmemmove, typedmemclr *obj.LSym
|
2017-02-01 14:27:40 -05:00
|
|
|
var stores, after []*Value
|
|
|
|
|
var sset *sparseSet
|
|
|
|
|
var storeNumber []int32
|
|
|
|
|
|
|
|
|
|
for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand
|
2017-02-05 23:43:31 -05:00
|
|
|
// first, identify all the stores that need to insert a write barrier.
|
|
|
|
|
// mark them with WB ops temporarily. record presence of WB ops.
|
2017-02-01 14:27:40 -05:00
|
|
|
hasStore := false
|
|
|
|
|
for _, v := range b.Values {
|
2016-10-13 06:57:00 -04:00
|
|
|
switch v.Op {
|
2017-02-05 23:43:31 -05:00
|
|
|
case OpStore, OpMove, OpZero:
|
|
|
|
|
if needwb(v) {
|
2016-10-13 06:57:00 -04:00
|
|
|
switch v.Op {
|
2017-02-05 23:43:31 -05:00
|
|
|
case OpStore:
|
|
|
|
|
v.Op = OpStoreWB
|
|
|
|
|
case OpMove:
|
|
|
|
|
v.Op = OpMoveWB
|
|
|
|
|
case OpZero:
|
|
|
|
|
v.Op = OpZeroWB
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
2017-02-05 23:43:31 -05:00
|
|
|
hasStore = true
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
2017-02-01 14:27:40 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !hasStore {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-01 14:27:40 -05:00
|
|
|
if wbaddr == nil {
|
|
|
|
|
// lazily initialize global values for write barrier test and calls
|
|
|
|
|
// find SB and SP values in entry block
|
|
|
|
|
initpos := f.Entry.Pos
|
|
|
|
|
for _, v := range f.Entry.Values {
|
|
|
|
|
if v.Op == OpSB {
|
|
|
|
|
sb = v
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
2017-02-01 14:27:40 -05:00
|
|
|
if v.Op == OpSP {
|
|
|
|
|
sp = v
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
2017-02-01 14:27:40 -05:00
|
|
|
if sb != nil && sp != nil {
|
|
|
|
|
break
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
2017-02-01 14:27:40 -05:00
|
|
|
}
|
|
|
|
|
if sb == nil {
|
2017-03-16 22:42:10 -07:00
|
|
|
sb = f.Entry.NewValue0(initpos, OpSB, f.fe.TypeUintptr())
|
2017-02-01 14:27:40 -05:00
|
|
|
}
|
|
|
|
|
if sp == nil {
|
2017-03-16 22:42:10 -07:00
|
|
|
sp = f.Entry.NewValue0(initpos, OpSP, f.fe.TypeUintptr())
|
2017-02-01 14:27:40 -05:00
|
|
|
}
|
2017-03-16 22:42:10 -07:00
|
|
|
wbsym := &ExternSymbol{Typ: f.fe.TypeBool(), Sym: f.fe.Syslook("writeBarrier")}
|
|
|
|
|
wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.fe.TypeUInt32().PtrTo(), wbsym, sb)
|
|
|
|
|
writebarrierptr = f.fe.Syslook("writebarrierptr")
|
|
|
|
|
typedmemmove = f.fe.Syslook("typedmemmove")
|
|
|
|
|
typedmemclr = f.fe.Syslook("typedmemclr")
|
|
|
|
|
const0 = f.ConstInt32(initpos, f.fe.TypeUInt32(), 0)
|
2017-02-01 14:27:40 -05:00
|
|
|
|
|
|
|
|
// allocate auxiliary data structures for computing store order
|
|
|
|
|
sset = f.newSparseSet(f.NumValues())
|
|
|
|
|
defer f.retSparseSet(sset)
|
|
|
|
|
storeNumber = make([]int32, f.NumValues())
|
|
|
|
|
}
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-01 14:27:40 -05:00
|
|
|
// order values in store order
|
|
|
|
|
b.Values = storeOrder(b.Values, sset, storeNumber)
|
|
|
|
|
|
|
|
|
|
again:
|
|
|
|
|
// find the start and end of the last contiguous WB store sequence.
|
|
|
|
|
// a branch will be inserted there. values after it will be moved
|
|
|
|
|
// to a new block.
|
|
|
|
|
var last *Value
|
|
|
|
|
var start, end int
|
|
|
|
|
values := b.Values
|
|
|
|
|
for i := len(values) - 1; i >= 0; i-- {
|
|
|
|
|
w := values[i]
|
2017-02-02 19:47:59 -05:00
|
|
|
if w.Op == OpStoreWB || w.Op == OpMoveWB || w.Op == OpZeroWB {
|
2017-02-01 14:27:40 -05:00
|
|
|
if last == nil {
|
|
|
|
|
last = w
|
|
|
|
|
end = i + 1
|
2016-12-03 19:17:16 -05:00
|
|
|
}
|
2017-02-01 14:27:40 -05:00
|
|
|
} else {
|
|
|
|
|
if last != nil {
|
|
|
|
|
start = i + 1
|
|
|
|
|
break
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
2017-02-01 14:27:40 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing
|
|
|
|
|
after = append(after[:0], b.Values[end:]...)
|
|
|
|
|
b.Values = b.Values[:start]
|
|
|
|
|
|
|
|
|
|
// find the memory before the WB stores
|
2017-03-03 13:44:18 -08:00
|
|
|
mem := stores[0].MemoryArg()
|
2017-02-01 14:27:40 -05:00
|
|
|
pos := stores[0].Pos
|
|
|
|
|
bThen := f.NewBlock(BlockPlain)
|
|
|
|
|
bElse := f.NewBlock(BlockPlain)
|
|
|
|
|
bEnd := f.NewBlock(b.Kind)
|
|
|
|
|
bThen.Pos = pos
|
|
|
|
|
bElse.Pos = pos
|
|
|
|
|
bEnd.Pos = b.Pos
|
|
|
|
|
b.Pos = pos
|
|
|
|
|
|
|
|
|
|
// set up control flow for end block
|
|
|
|
|
bEnd.SetControl(b.Control)
|
|
|
|
|
bEnd.Likely = b.Likely
|
|
|
|
|
for _, e := range b.Succs {
|
|
|
|
|
bEnd.Succs = append(bEnd.Succs, e)
|
|
|
|
|
e.b.Preds[e.i].b = bEnd
|
|
|
|
|
}
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-01 14:27:40 -05:00
|
|
|
// set up control flow for write barrier test
|
|
|
|
|
// load word, test word, avoiding partial register write from load byte.
|
2017-03-16 22:42:10 -07:00
|
|
|
flag := b.NewValue2(pos, OpLoad, f.fe.TypeUInt32(), wbaddr, mem)
|
|
|
|
|
flag = b.NewValue2(pos, OpNeq32, f.fe.TypeBool(), flag, const0)
|
2017-02-01 14:27:40 -05:00
|
|
|
b.Kind = BlockIf
|
|
|
|
|
b.SetControl(flag)
|
|
|
|
|
b.Likely = BranchUnlikely
|
|
|
|
|
b.Succs = b.Succs[:0]
|
|
|
|
|
b.AddEdgeTo(bThen)
|
|
|
|
|
b.AddEdgeTo(bElse)
|
|
|
|
|
bThen.AddEdgeTo(bEnd)
|
|
|
|
|
bElse.AddEdgeTo(bEnd)
|
|
|
|
|
|
|
|
|
|
// for each write barrier store, append write barrier version to bThen
|
|
|
|
|
// and simple store version to bElse
|
|
|
|
|
memThen := mem
|
|
|
|
|
memElse := mem
|
|
|
|
|
for _, w := range stores {
|
|
|
|
|
var val *Value
|
|
|
|
|
ptr := w.Args[0]
|
2017-02-09 09:46:44 -05:00
|
|
|
var typ interface{}
|
|
|
|
|
if w.Op != OpStoreWB {
|
2017-03-16 22:42:10 -07:00
|
|
|
typ = &ExternSymbol{Typ: f.fe.TypeUintptr(), Sym: w.Aux.(Type).Symbol()}
|
2017-02-09 09:46:44 -05:00
|
|
|
}
|
2017-02-01 14:27:40 -05:00
|
|
|
pos = w.Pos
|
|
|
|
|
|
|
|
|
|
var fn *obj.LSym
|
|
|
|
|
switch w.Op {
|
|
|
|
|
case OpStoreWB:
|
|
|
|
|
fn = writebarrierptr
|
|
|
|
|
val = w.Args[1]
|
2017-02-02 19:47:59 -05:00
|
|
|
case OpMoveWB:
|
2017-02-01 14:27:40 -05:00
|
|
|
fn = typedmemmove
|
|
|
|
|
val = w.Args[1]
|
|
|
|
|
case OpZeroWB:
|
|
|
|
|
fn = typedmemclr
|
|
|
|
|
}
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-01 14:27:40 -05:00
|
|
|
// then block: emit write barrier call
|
2017-02-02 19:47:59 -05:00
|
|
|
volatile := w.Op == OpMoveWB && isVolatile(val)
|
|
|
|
|
memThen = wbcall(pos, bThen, fn, typ, ptr, val, memThen, sp, sb, volatile)
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-01 14:27:40 -05:00
|
|
|
// else block: normal store
|
2017-03-13 21:51:08 -04:00
|
|
|
switch w.Op {
|
|
|
|
|
case OpStoreWB:
|
|
|
|
|
memElse = bElse.NewValue3A(pos, OpStore, TypeMem, w.Aux, ptr, val, memElse)
|
|
|
|
|
case OpMoveWB:
|
|
|
|
|
memElse = bElse.NewValue3I(pos, OpMove, TypeMem, w.AuxInt, ptr, val, memElse)
|
|
|
|
|
memElse.Aux = w.Aux
|
|
|
|
|
case OpZeroWB:
|
|
|
|
|
memElse = bElse.NewValue2I(pos, OpZero, TypeMem, w.AuxInt, ptr, memElse)
|
|
|
|
|
memElse.Aux = w.Aux
|
2017-02-01 14:27:40 -05:00
|
|
|
}
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-10 10:15:10 -05:00
|
|
|
if f.NoWB {
|
2017-03-16 22:42:10 -07:00
|
|
|
f.fe.Error(pos, "write barrier prohibited")
|
2017-02-10 10:15:10 -05:00
|
|
|
}
|
|
|
|
|
if !f.WBPos.IsKnown() {
|
|
|
|
|
f.WBPos = pos
|
|
|
|
|
}
|
2017-03-16 22:42:10 -07:00
|
|
|
if f.fe.Debug_wb() {
|
|
|
|
|
f.Warnl(pos, "write barrier")
|
2017-02-01 14:27:40 -05:00
|
|
|
}
|
|
|
|
|
}
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-01 14:27:40 -05:00
|
|
|
// merge memory
|
|
|
|
|
// Splice memory Phi into the last memory of the original sequence,
|
|
|
|
|
// which may be used in subsequent blocks. Other memories in the
|
|
|
|
|
// sequence must be dead after this block since there can be only
|
|
|
|
|
// one memory live.
|
|
|
|
|
bEnd.Values = append(bEnd.Values, last)
|
|
|
|
|
last.Block = bEnd
|
|
|
|
|
last.reset(OpPhi)
|
|
|
|
|
last.Type = TypeMem
|
|
|
|
|
last.AddArg(memThen)
|
|
|
|
|
last.AddArg(memElse)
|
|
|
|
|
for _, w := range stores {
|
|
|
|
|
if w != last {
|
|
|
|
|
w.resetArgs()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
for _, w := range stores {
|
|
|
|
|
if w != last {
|
|
|
|
|
f.freeValue(w)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// put values after the store sequence into the end block
|
|
|
|
|
bEnd.Values = append(bEnd.Values, after...)
|
|
|
|
|
for _, w := range after {
|
|
|
|
|
w.Block = bEnd
|
|
|
|
|
}
|
2016-10-13 06:57:00 -04:00
|
|
|
|
2017-02-01 14:27:40 -05:00
|
|
|
// if we have more stores in this block, do this block again
|
|
|
|
|
for _, w := range b.Values {
|
2017-02-02 19:47:59 -05:00
|
|
|
if w.Op == OpStoreWB || w.Op == OpMoveWB || w.Op == OpZeroWB {
|
2017-02-01 14:27:40 -05:00
|
|
|
goto again
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// wbcall emits write barrier runtime call in b, returns memory.
|
|
|
|
|
// if valIsVolatile, it moves val into temp space before making the call.
|
2017-02-06 13:30:40 -08:00
|
|
|
func wbcall(pos src.XPos, b *Block, fn *obj.LSym, typ interface{}, ptr, val, mem, sp, sb *Value, valIsVolatile bool) *Value {
|
2016-10-13 06:57:00 -04:00
|
|
|
config := b.Func.Config
|
|
|
|
|
|
|
|
|
|
var tmp GCNode
|
|
|
|
|
if valIsVolatile {
|
|
|
|
|
// Copy to temp location if the source is volatile (will be clobbered by
|
|
|
|
|
// a function call). Marshaling the args to typedmemmove might clobber the
|
|
|
|
|
// value we're trying to move.
|
|
|
|
|
t := val.Type.ElemType()
|
2017-03-16 22:42:10 -07:00
|
|
|
tmp = b.Func.fe.Auto(t)
|
2016-10-13 06:57:00 -04:00
|
|
|
aux := &AutoSymbol{Typ: t, Node: tmp}
|
2016-12-08 13:49:51 -08:00
|
|
|
mem = b.NewValue1A(pos, OpVarDef, TypeMem, tmp, mem)
|
|
|
|
|
tmpaddr := b.NewValue1A(pos, OpAddr, t.PtrTo(), aux, sp)
|
2017-03-13 21:51:08 -04:00
|
|
|
siz := t.Size()
|
2016-12-08 13:49:51 -08:00
|
|
|
mem = b.NewValue3I(pos, OpMove, TypeMem, siz, tmpaddr, val, mem)
|
2017-03-13 21:51:08 -04:00
|
|
|
mem.Aux = t
|
2016-10-13 06:57:00 -04:00
|
|
|
val = tmpaddr
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// put arguments on stack
|
|
|
|
|
off := config.ctxt.FixedFrameSize()
|
|
|
|
|
|
|
|
|
|
if typ != nil { // for typedmemmove
|
2017-03-16 22:42:10 -07:00
|
|
|
taddr := b.NewValue1A(pos, OpAddr, b.Func.fe.TypeUintptr(), typ, sb)
|
2016-10-13 06:57:00 -04:00
|
|
|
off = round(off, taddr.Type.Alignment())
|
2016-12-08 13:49:51 -08:00
|
|
|
arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
|
2017-03-13 21:51:08 -04:00
|
|
|
mem = b.NewValue3A(pos, OpStore, TypeMem, ptr.Type, arg, taddr, mem)
|
2016-10-13 06:57:00 -04:00
|
|
|
off += taddr.Type.Size()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
off = round(off, ptr.Type.Alignment())
|
2016-12-08 13:49:51 -08:00
|
|
|
arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
|
2017-03-13 21:51:08 -04:00
|
|
|
mem = b.NewValue3A(pos, OpStore, TypeMem, ptr.Type, arg, ptr, mem)
|
2016-10-13 06:57:00 -04:00
|
|
|
off += ptr.Type.Size()
|
|
|
|
|
|
2016-10-18 11:06:28 -04:00
|
|
|
if val != nil {
|
|
|
|
|
off = round(off, val.Type.Alignment())
|
2016-12-08 13:49:51 -08:00
|
|
|
arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
|
2017-03-13 21:51:08 -04:00
|
|
|
mem = b.NewValue3A(pos, OpStore, TypeMem, val.Type, arg, val, mem)
|
2016-10-18 11:06:28 -04:00
|
|
|
off += val.Type.Size()
|
|
|
|
|
}
|
2016-10-13 06:57:00 -04:00
|
|
|
off = round(off, config.PtrSize)
|
|
|
|
|
|
|
|
|
|
// issue call
|
2016-12-08 13:49:51 -08:00
|
|
|
mem = b.NewValue1A(pos, OpStaticCall, TypeMem, fn, mem)
|
2016-10-13 06:57:00 -04:00
|
|
|
mem.AuxInt = off - config.ctxt.FixedFrameSize()
|
|
|
|
|
|
|
|
|
|
if valIsVolatile {
|
2016-12-08 13:49:51 -08:00
|
|
|
mem = b.NewValue1A(pos, OpVarKill, TypeMem, tmp, mem) // mark temp dead
|
2016-10-13 06:57:00 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return mem
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// round to a multiple of r, r is a power of 2
|
|
|
|
|
func round(o int64, r int64) int64 {
|
|
|
|
|
return (o + r - 1) &^ (r - 1)
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-17 11:53:24 -04:00
|
|
|
// IsStackAddr returns whether v is known to be an address of a stack slot
|
|
|
|
|
func IsStackAddr(v *Value) bool {
|
2016-10-13 06:57:00 -04:00
|
|
|
for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
|
|
|
|
|
v = v.Args[0]
|
|
|
|
|
}
|
|
|
|
|
switch v.Op {
|
|
|
|
|
case OpSP:
|
|
|
|
|
return true
|
|
|
|
|
case OpAddr:
|
|
|
|
|
return v.Args[0].Op == OpSP
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
2017-02-02 19:47:59 -05:00
|
|
|
|
|
|
|
|
// isVolatile returns whether v is a pointer to argument region on stack which
|
|
|
|
|
// will be clobbered by a function call.
|
|
|
|
|
func isVolatile(v *Value) bool {
|
|
|
|
|
for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
|
|
|
|
|
v = v.Args[0]
|
|
|
|
|
}
|
|
|
|
|
return v.Op == OpSP
|
|
|
|
|
}
|