mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
cmd/compile: separate ssa.Frontend and ssa.TypeSource
Prior to this CL, the ssa.Frontend field was responsible for providing types to the backend during compilation. However, the types needed by the backend are few and static. It makes more sense to use a struct for them and to hang that struct off the ssa.Config, which is the correct home for readonly data. Now that Types is a struct, we can clean up the names a bit as well. This has the added benefit of allowing early construction of all types needed by the backend. This will be useful for concurrent backend compilation. Passes toolstash-check -all. No compiler performance change. Updates #15756 Change-Id: I021658c8cf2836d6a22bbc20cc828ac38c7da08a Reviewed-on: https://go-review.googlesource.com/38336 Reviewed-by: Matthew Dempsky <mdempsky@google.com>
This commit is contained in:
parent
2c397c7a75
commit
aea3aff669
31 changed files with 4663 additions and 4623 deletions
|
|
@ -22,7 +22,24 @@ var ssaConfig *ssa.Config
|
|||
var ssaCache *ssa.Cache
|
||||
|
||||
func initssaconfig() {
|
||||
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, Ctxt, Debug['N'] == 0)
|
||||
types := ssa.Types{
|
||||
Bool: Types[TBOOL],
|
||||
Int8: Types[TINT8],
|
||||
Int16: Types[TINT16],
|
||||
Int32: Types[TINT32],
|
||||
Int64: Types[TINT64],
|
||||
UInt8: Types[TUINT8],
|
||||
UInt16: Types[TUINT16],
|
||||
UInt32: Types[TUINT32],
|
||||
UInt64: Types[TUINT64],
|
||||
Float32: Types[TFLOAT32],
|
||||
Float64: Types[TFLOAT64],
|
||||
Int: Types[TINT],
|
||||
Uintptr: Types[TUINTPTR],
|
||||
String: Types[TSTRING],
|
||||
BytePtr: ptrto(Types[TUINT8]),
|
||||
}
|
||||
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types, Ctxt, Debug['N'] == 0)
|
||||
if thearch.LinkArch.Name == "386" {
|
||||
ssaConfig.Set387(thearch.Use387)
|
||||
}
|
||||
|
|
@ -4673,22 +4690,6 @@ type ssafn struct {
|
|||
log bool
|
||||
}
|
||||
|
||||
func (s *ssafn) TypeBool() ssa.Type { return Types[TBOOL] }
|
||||
func (s *ssafn) TypeInt8() ssa.Type { return Types[TINT8] }
|
||||
func (s *ssafn) TypeInt16() ssa.Type { return Types[TINT16] }
|
||||
func (s *ssafn) TypeInt32() ssa.Type { return Types[TINT32] }
|
||||
func (s *ssafn) TypeInt64() ssa.Type { return Types[TINT64] }
|
||||
func (s *ssafn) TypeUInt8() ssa.Type { return Types[TUINT8] }
|
||||
func (s *ssafn) TypeUInt16() ssa.Type { return Types[TUINT16] }
|
||||
func (s *ssafn) TypeUInt32() ssa.Type { return Types[TUINT32] }
|
||||
func (s *ssafn) TypeUInt64() ssa.Type { return Types[TUINT64] }
|
||||
func (s *ssafn) TypeFloat32() ssa.Type { return Types[TFLOAT32] }
|
||||
func (s *ssafn) TypeFloat64() ssa.Type { return Types[TFLOAT64] }
|
||||
func (s *ssafn) TypeInt() ssa.Type { return Types[TINT] }
|
||||
func (s *ssafn) TypeUintptr() ssa.Type { return Types[TUINTPTR] }
|
||||
func (s *ssafn) TypeString() ssa.Type { return Types[TSTRING] }
|
||||
func (s *ssafn) TypeBytePtr() ssa.Type { return ptrto(Types[TUINT8]) }
|
||||
|
||||
// StringData returns a symbol (a *Sym wrapped in an interface) which
|
||||
// is the data component of a global string constant containing s.
|
||||
func (*ssafn) StringData(s string) interface{} {
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ type Config struct {
|
|||
IntSize int64 // 4 or 8
|
||||
PtrSize int64 // 4 or 8
|
||||
RegSize int64 // 4 or 8
|
||||
Types Types
|
||||
lowerBlock blockRewriter // lowering function
|
||||
lowerValue valueRewriter // lowering function
|
||||
registers []Register // machine registers
|
||||
|
|
@ -44,24 +45,22 @@ type (
|
|||
valueRewriter func(*Value) bool
|
||||
)
|
||||
|
||||
type TypeSource interface {
|
||||
TypeBool() Type
|
||||
TypeInt8() Type
|
||||
TypeInt16() Type
|
||||
TypeInt32() Type
|
||||
TypeInt64() Type
|
||||
TypeUInt8() Type
|
||||
TypeUInt16() Type
|
||||
TypeUInt32() Type
|
||||
TypeUInt64() Type
|
||||
TypeInt() Type
|
||||
TypeFloat32() Type
|
||||
TypeFloat64() Type
|
||||
TypeUintptr() Type
|
||||
TypeString() Type
|
||||
TypeBytePtr() Type // TODO: use unsafe.Pointer instead?
|
||||
|
||||
CanSSA(t Type) bool
|
||||
type Types struct {
|
||||
Bool Type
|
||||
Int8 Type
|
||||
Int16 Type
|
||||
Int32 Type
|
||||
Int64 Type
|
||||
UInt8 Type
|
||||
UInt16 Type
|
||||
UInt32 Type
|
||||
UInt64 Type
|
||||
Int Type
|
||||
Float32 Type
|
||||
Float64 Type
|
||||
Uintptr Type
|
||||
String Type
|
||||
BytePtr Type // TODO: use unsafe.Pointer instead?
|
||||
}
|
||||
|
||||
type Logger interface {
|
||||
|
|
@ -87,7 +86,8 @@ type Logger interface {
|
|||
}
|
||||
|
||||
type Frontend interface {
|
||||
TypeSource
|
||||
CanSSA(t Type) bool
|
||||
|
||||
Logger
|
||||
|
||||
// StringData returns a symbol pointing to the given string's contents.
|
||||
|
|
@ -135,8 +135,8 @@ type GCNode interface {
|
|||
}
|
||||
|
||||
// NewConfig returns a new configuration object for the given architecture.
|
||||
func NewConfig(arch string, ctxt *obj.Link, optimize bool) *Config {
|
||||
c := &Config{arch: arch}
|
||||
func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
|
||||
c := &Config{arch: arch, Types: types}
|
||||
switch arch {
|
||||
case "amd64":
|
||||
c.IntSize = 8
|
||||
|
|
|
|||
|
|
@ -28,15 +28,15 @@ func decomposeBuiltIn(f *Func) {
|
|||
case t.IsInteger() && t.Size() == 8 && f.Config.IntSize == 4:
|
||||
var elemType Type
|
||||
if t.IsSigned() {
|
||||
elemType = f.fe.TypeInt32()
|
||||
elemType = f.Config.Types.Int32
|
||||
} else {
|
||||
elemType = f.fe.TypeUInt32()
|
||||
elemType = f.Config.Types.UInt32
|
||||
}
|
||||
hiName, loName := f.fe.SplitInt64(name)
|
||||
newNames = append(newNames, hiName, loName)
|
||||
for _, v := range f.NamedValues[name] {
|
||||
hi := v.Block.NewValue1(v.Pos, OpInt64Hi, elemType, v)
|
||||
lo := v.Block.NewValue1(v.Pos, OpInt64Lo, f.fe.TypeUInt32(), v)
|
||||
lo := v.Block.NewValue1(v.Pos, OpInt64Lo, f.Config.Types.UInt32, v)
|
||||
f.NamedValues[hiName] = append(f.NamedValues[hiName], hi)
|
||||
f.NamedValues[loName] = append(f.NamedValues[loName], lo)
|
||||
}
|
||||
|
|
@ -44,9 +44,9 @@ func decomposeBuiltIn(f *Func) {
|
|||
case t.IsComplex():
|
||||
var elemType Type
|
||||
if t.Size() == 16 {
|
||||
elemType = f.fe.TypeFloat64()
|
||||
elemType = f.Config.Types.Float64
|
||||
} else {
|
||||
elemType = f.fe.TypeFloat32()
|
||||
elemType = f.Config.Types.Float32
|
||||
}
|
||||
rName, iName := f.fe.SplitComplex(name)
|
||||
newNames = append(newNames, rName, iName)
|
||||
|
|
@ -58,8 +58,8 @@ func decomposeBuiltIn(f *Func) {
|
|||
}
|
||||
delete(f.NamedValues, name)
|
||||
case t.IsString():
|
||||
ptrType := f.fe.TypeBytePtr()
|
||||
lenType := f.fe.TypeInt()
|
||||
ptrType := f.Config.Types.BytePtr
|
||||
lenType := f.Config.Types.Int
|
||||
ptrName, lenName := f.fe.SplitString(name)
|
||||
newNames = append(newNames, ptrName, lenName)
|
||||
for _, v := range f.NamedValues[name] {
|
||||
|
|
@ -70,8 +70,8 @@ func decomposeBuiltIn(f *Func) {
|
|||
}
|
||||
delete(f.NamedValues, name)
|
||||
case t.IsSlice():
|
||||
ptrType := f.fe.TypeBytePtr()
|
||||
lenType := f.fe.TypeInt()
|
||||
ptrType := f.Config.Types.BytePtr
|
||||
lenType := f.Config.Types.Int
|
||||
ptrName, lenName, capName := f.fe.SplitSlice(name)
|
||||
newNames = append(newNames, ptrName, lenName, capName)
|
||||
for _, v := range f.NamedValues[name] {
|
||||
|
|
@ -84,7 +84,7 @@ func decomposeBuiltIn(f *Func) {
|
|||
}
|
||||
delete(f.NamedValues, name)
|
||||
case t.IsInterface():
|
||||
ptrType := f.fe.TypeBytePtr()
|
||||
ptrType := f.Config.Types.BytePtr
|
||||
typeName, dataName := f.fe.SplitInterface(name)
|
||||
newNames = append(newNames, typeName, dataName)
|
||||
for _, v := range f.NamedValues[name] {
|
||||
|
|
@ -129,9 +129,9 @@ func decomposeBuiltInPhi(v *Value) {
|
|||
}
|
||||
|
||||
func decomposeStringPhi(v *Value) {
|
||||
fe := v.Block.Func.fe
|
||||
ptrType := fe.TypeBytePtr()
|
||||
lenType := fe.TypeInt()
|
||||
types := &v.Block.Func.Config.Types
|
||||
ptrType := types.BytePtr
|
||||
lenType := types.Int
|
||||
|
||||
ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
|
||||
len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
|
||||
|
|
@ -145,9 +145,9 @@ func decomposeStringPhi(v *Value) {
|
|||
}
|
||||
|
||||
func decomposeSlicePhi(v *Value) {
|
||||
fe := v.Block.Func.fe
|
||||
ptrType := fe.TypeBytePtr()
|
||||
lenType := fe.TypeInt()
|
||||
types := &v.Block.Func.Config.Types
|
||||
ptrType := types.BytePtr
|
||||
lenType := types.Int
|
||||
|
||||
ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
|
||||
len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
|
||||
|
|
@ -164,19 +164,19 @@ func decomposeSlicePhi(v *Value) {
|
|||
}
|
||||
|
||||
func decomposeInt64Phi(v *Value) {
|
||||
fe := v.Block.Func.fe
|
||||
types := &v.Block.Func.Config.Types
|
||||
var partType Type
|
||||
if v.Type.IsSigned() {
|
||||
partType = fe.TypeInt32()
|
||||
partType = types.Int32
|
||||
} else {
|
||||
partType = fe.TypeUInt32()
|
||||
partType = types.UInt32
|
||||
}
|
||||
|
||||
hi := v.Block.NewValue0(v.Pos, OpPhi, partType)
|
||||
lo := v.Block.NewValue0(v.Pos, OpPhi, fe.TypeUInt32())
|
||||
lo := v.Block.NewValue0(v.Pos, OpPhi, types.UInt32)
|
||||
for _, a := range v.Args {
|
||||
hi.AddArg(a.Block.NewValue1(v.Pos, OpInt64Hi, partType, a))
|
||||
lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, fe.TypeUInt32(), a))
|
||||
lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, types.UInt32, a))
|
||||
}
|
||||
v.reset(OpInt64Make)
|
||||
v.AddArg(hi)
|
||||
|
|
@ -184,13 +184,13 @@ func decomposeInt64Phi(v *Value) {
|
|||
}
|
||||
|
||||
func decomposeComplexPhi(v *Value) {
|
||||
fe := v.Block.Func.fe
|
||||
types := &v.Block.Func.Config.Types
|
||||
var partType Type
|
||||
switch z := v.Type.Size(); z {
|
||||
case 8:
|
||||
partType = fe.TypeFloat32()
|
||||
partType = types.Float32
|
||||
case 16:
|
||||
partType = fe.TypeFloat64()
|
||||
partType = types.Float64
|
||||
default:
|
||||
v.Fatalf("decomposeComplexPhi: bad complex size %d", z)
|
||||
}
|
||||
|
|
@ -207,7 +207,7 @@ func decomposeComplexPhi(v *Value) {
|
|||
}
|
||||
|
||||
func decomposeInterfacePhi(v *Value) {
|
||||
ptrType := v.Block.Func.fe.TypeBytePtr()
|
||||
ptrType := v.Block.Func.Config.Types.BytePtr
|
||||
|
||||
itab := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
|
||||
data := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
|
||||
|
|
|
|||
|
|
@ -19,11 +19,11 @@ var Copyelim = copyelim
|
|||
var TestCtxt = obj.Linknew(&x86.Linkamd64)
|
||||
|
||||
func testConfig(t testing.TB) *Config {
|
||||
return NewConfig("amd64", TestCtxt, true)
|
||||
return NewConfig("amd64", dummyTypes, TestCtxt, true)
|
||||
}
|
||||
|
||||
func testConfigS390X(t testing.TB) *Config {
|
||||
return NewConfig("s390x", obj.Linknew(&s390x.Links390x), true)
|
||||
return NewConfig("s390x", dummyTypes, obj.Linknew(&s390x.Links390x), true)
|
||||
}
|
||||
|
||||
// DummyFrontend is a test-only frontend.
|
||||
|
|
@ -52,27 +52,27 @@ func (DummyFrontend) Auto(t Type) GCNode {
|
|||
return &DummyAuto{t: t, s: "aDummyAuto"}
|
||||
}
|
||||
func (d DummyFrontend) SplitString(s LocalSlot) (LocalSlot, LocalSlot) {
|
||||
return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeInt(), s.Off + 8}
|
||||
return LocalSlot{s.N, dummyTypes.BytePtr, s.Off}, LocalSlot{s.N, dummyTypes.Int, s.Off + 8}
|
||||
}
|
||||
func (d DummyFrontend) SplitInterface(s LocalSlot) (LocalSlot, LocalSlot) {
|
||||
return LocalSlot{s.N, d.TypeBytePtr(), s.Off}, LocalSlot{s.N, d.TypeBytePtr(), s.Off + 8}
|
||||
return LocalSlot{s.N, dummyTypes.BytePtr, s.Off}, LocalSlot{s.N, dummyTypes.BytePtr, s.Off + 8}
|
||||
}
|
||||
func (d DummyFrontend) SplitSlice(s LocalSlot) (LocalSlot, LocalSlot, LocalSlot) {
|
||||
return LocalSlot{s.N, s.Type.ElemType().PtrTo(), s.Off},
|
||||
LocalSlot{s.N, d.TypeInt(), s.Off + 8},
|
||||
LocalSlot{s.N, d.TypeInt(), s.Off + 16}
|
||||
LocalSlot{s.N, dummyTypes.Int, s.Off + 8},
|
||||
LocalSlot{s.N, dummyTypes.Int, s.Off + 16}
|
||||
}
|
||||
func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
|
||||
if s.Type.Size() == 16 {
|
||||
return LocalSlot{s.N, d.TypeFloat64(), s.Off}, LocalSlot{s.N, d.TypeFloat64(), s.Off + 8}
|
||||
return LocalSlot{s.N, dummyTypes.Float64, s.Off}, LocalSlot{s.N, dummyTypes.Float64, s.Off + 8}
|
||||
}
|
||||
return LocalSlot{s.N, d.TypeFloat32(), s.Off}, LocalSlot{s.N, d.TypeFloat32(), s.Off + 4}
|
||||
return LocalSlot{s.N, dummyTypes.Float32, s.Off}, LocalSlot{s.N, dummyTypes.Float32, s.Off + 4}
|
||||
}
|
||||
func (d DummyFrontend) SplitInt64(s LocalSlot) (LocalSlot, LocalSlot) {
|
||||
if s.Type.IsSigned() {
|
||||
return LocalSlot{s.N, d.TypeInt32(), s.Off + 4}, LocalSlot{s.N, d.TypeUInt32(), s.Off}
|
||||
return LocalSlot{s.N, dummyTypes.Int32, s.Off + 4}, LocalSlot{s.N, dummyTypes.UInt32, s.Off}
|
||||
}
|
||||
return LocalSlot{s.N, d.TypeUInt32(), s.Off + 4}, LocalSlot{s.N, d.TypeUInt32(), s.Off}
|
||||
return LocalSlot{s.N, dummyTypes.UInt32, s.Off + 4}, LocalSlot{s.N, dummyTypes.UInt32, s.Off}
|
||||
}
|
||||
func (d DummyFrontend) SplitStruct(s LocalSlot, i int) LocalSlot {
|
||||
return LocalSlot{s.N, s.Type.FieldType(i), s.Off + s.Type.FieldOff(i)}
|
||||
|
|
@ -101,21 +101,24 @@ func (d DummyFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t
|
|||
func (d DummyFrontend) Debug_checknil() bool { return false }
|
||||
func (d DummyFrontend) Debug_wb() bool { return false }
|
||||
|
||||
func (d DummyFrontend) TypeBool() Type { return TypeBool }
|
||||
func (d DummyFrontend) TypeInt8() Type { return TypeInt8 }
|
||||
func (d DummyFrontend) TypeInt16() Type { return TypeInt16 }
|
||||
func (d DummyFrontend) TypeInt32() Type { return TypeInt32 }
|
||||
func (d DummyFrontend) TypeInt64() Type { return TypeInt64 }
|
||||
func (d DummyFrontend) TypeUInt8() Type { return TypeUInt8 }
|
||||
func (d DummyFrontend) TypeUInt16() Type { return TypeUInt16 }
|
||||
func (d DummyFrontend) TypeUInt32() Type { return TypeUInt32 }
|
||||
func (d DummyFrontend) TypeUInt64() Type { return TypeUInt64 }
|
||||
func (d DummyFrontend) TypeFloat32() Type { return TypeFloat32 }
|
||||
func (d DummyFrontend) TypeFloat64() Type { return TypeFloat64 }
|
||||
func (d DummyFrontend) TypeInt() Type { return TypeInt64 }
|
||||
func (d DummyFrontend) TypeUintptr() Type { return TypeUInt64 }
|
||||
func (d DummyFrontend) TypeString() Type { panic("unimplemented") }
|
||||
func (d DummyFrontend) TypeBytePtr() Type { return TypeBytePtr }
|
||||
var dummyTypes = Types{
|
||||
Bool: TypeBool,
|
||||
Int8: TypeInt8,
|
||||
Int16: TypeInt16,
|
||||
Int32: TypeInt32,
|
||||
Int64: TypeInt64,
|
||||
UInt8: TypeUInt8,
|
||||
UInt16: TypeUInt16,
|
||||
UInt32: TypeUInt32,
|
||||
UInt64: TypeUInt64,
|
||||
Float32: TypeFloat32,
|
||||
Float64: TypeFloat64,
|
||||
Int: TypeInt64,
|
||||
Uintptr: TypeUInt64,
|
||||
String: nil,
|
||||
BytePtr: TypeBytePtr,
|
||||
}
|
||||
|
||||
func (d DummyFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
|
||||
|
||||
func (d DummyFrontend) CanSSA(t Type) bool {
|
||||
|
|
|
|||
|
|
@ -68,8 +68,8 @@
|
|||
(Neg32 x) -> (NEGL x)
|
||||
(Neg16 x) -> (NEGL x)
|
||||
(Neg8 x) -> (NEGL x)
|
||||
(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
|
||||
(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
|
||||
(Neg32F x) && !config.use387 -> (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
|
||||
(Neg64F x) && !config.use387 -> (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
|
||||
(Neg32F x) && config.use387 -> (FCHS x)
|
||||
(Neg64F x) && config.use387 -> (FCHS x)
|
||||
|
||||
|
|
|
|||
|
|
@ -78,8 +78,8 @@
|
|||
(Neg32 x) -> (NEGL x)
|
||||
(Neg16 x) -> (NEGL x)
|
||||
(Neg8 x) -> (NEGL x)
|
||||
(Neg32F x) -> (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
|
||||
(Neg64F x) -> (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
|
||||
(Neg32F x) -> (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
|
||||
(Neg64F x) -> (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
|
||||
|
||||
(Com64 x) -> (NOTQ x)
|
||||
(Com32 x) -> (NOTL x)
|
||||
|
|
@ -98,10 +98,10 @@
|
|||
|
||||
// Lowering other arithmetic
|
||||
(Ctz64 <t> x) -> (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
|
||||
(Ctz32 x) -> (Select0 (BSFQ (ORQ <fe.TypeUInt64()> (MOVQconst [1<<32]) x)))
|
||||
(Ctz32 x) -> (Select0 (BSFQ (ORQ <types.UInt64> (MOVQconst [1<<32]) x)))
|
||||
|
||||
(BitLen64 <t> x) -> (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x))))
|
||||
(BitLen32 x) -> (BitLen64 (MOVLQZX <fe.TypeUInt64()> x))
|
||||
(BitLen32 x) -> (BitLen64 (MOVLQZX <types.UInt64> x))
|
||||
|
||||
(Bswap64 x) -> (BSWAPQ x)
|
||||
(Bswap32 x) -> (BSWAPL x)
|
||||
|
|
@ -472,10 +472,10 @@
|
|||
|
||||
// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
|
||||
// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
|
||||
(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(fe.TypeUInt32(),TypeMem)> val ptr mem))
|
||||
(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(fe.TypeUInt64(),TypeMem)> val ptr mem))
|
||||
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
|
||||
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
|
||||
(AtomicStore32 ptr val mem) -> (Select1 (XCHGL <MakeTuple(types.UInt32,TypeMem)> val ptr mem))
|
||||
(AtomicStore64 ptr val mem) -> (Select1 (XCHGQ <MakeTuple(types.UInt64,TypeMem)> val ptr mem))
|
||||
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 8 -> (Select1 (XCHGQ <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
|
||||
(AtomicStorePtrNoWB ptr val mem) && config.PtrSize == 4 -> (Select1 (XCHGL <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
|
||||
|
||||
// Atomic exchanges.
|
||||
(AtomicExchange32 ptr val mem) -> (XCHGL val ptr mem)
|
||||
|
|
@ -553,8 +553,8 @@
|
|||
(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) -> (NEF cmp yes no)
|
||||
|
||||
// Disabled because it interferes with the pattern match above and makes worse code.
|
||||
// (SETNEF x) -> (ORQ (SETNE <fe.TypeInt8()> x) (SETNAN <fe.TypeInt8()> x))
|
||||
// (SETEQF x) -> (ANDQ (SETEQ <fe.TypeInt8()> x) (SETORD <fe.TypeInt8()> x))
|
||||
// (SETNEF x) -> (ORQ (SETNE <types.Int8> x) (SETNAN <types.Int8> x))
|
||||
// (SETEQF x) -> (ANDQ (SETEQ <types.Int8> x) (SETORD <types.Int8> x))
|
||||
|
||||
// fold constants into instructions
|
||||
(ADDQ x (MOVQconst [c])) && is32Bit(c) -> (ADDQconst [c] x)
|
||||
|
|
|
|||
|
|
@ -34,12 +34,12 @@
|
|||
(Mul32uhilo x y) -> (MULLU x y)
|
||||
|
||||
(Div32 x y) ->
|
||||
(SUB (XOR <fe.TypeUInt32()> // negate the result if one operand is negative
|
||||
(Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
|
||||
(SUB <fe.TypeUInt32()> (XOR x <fe.TypeUInt32()> (Signmask x)) (Signmask x)) // negate x if negative
|
||||
(SUB <fe.TypeUInt32()> (XOR y <fe.TypeUInt32()> (Signmask y)) (Signmask y)))) // negate y if negative
|
||||
(Signmask (XOR <fe.TypeUInt32()> x y))) (Signmask (XOR <fe.TypeUInt32()> x y)))
|
||||
(Div32u x y) -> (Select0 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
|
||||
(SUB (XOR <types.UInt32> // negate the result if one operand is negative
|
||||
(Select0 <types.UInt32> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
|
||||
(SUB <types.UInt32> (XOR x <types.UInt32> (Signmask x)) (Signmask x)) // negate x if negative
|
||||
(SUB <types.UInt32> (XOR y <types.UInt32> (Signmask y)) (Signmask y)))) // negate y if negative
|
||||
(Signmask (XOR <types.UInt32> x y))) (Signmask (XOR <types.UInt32> x y)))
|
||||
(Div32u x y) -> (Select0 <types.UInt32> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
|
||||
(Div16 x y) -> (Div32 (SignExt16to32 x) (SignExt16to32 y))
|
||||
(Div16u x y) -> (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
|
||||
(Div8 x y) -> (Div32 (SignExt8to32 x) (SignExt8to32 y))
|
||||
|
|
@ -48,12 +48,12 @@
|
|||
(Div64F x y) -> (DIVD x y)
|
||||
|
||||
(Mod32 x y) ->
|
||||
(SUB (XOR <fe.TypeUInt32()> // negate the result if x is negative
|
||||
(Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
|
||||
(SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> x (Signmask x)) (Signmask x)) // negate x if negative
|
||||
(SUB <fe.TypeUInt32()> (XOR <fe.TypeUInt32()> y (Signmask y)) (Signmask y)))) // negate y if negative
|
||||
(SUB (XOR <types.UInt32> // negate the result if x is negative
|
||||
(Select1 <types.UInt32> (CALLudiv {config.ctxt.Lookup("udiv", 0)}
|
||||
(SUB <types.UInt32> (XOR <types.UInt32> x (Signmask x)) (Signmask x)) // negate x if negative
|
||||
(SUB <types.UInt32> (XOR <types.UInt32> y (Signmask y)) (Signmask y)))) // negate y if negative
|
||||
(Signmask x)) (Signmask x))
|
||||
(Mod32u x y) -> (Select1 <fe.TypeUInt32()> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
|
||||
(Mod32u x y) -> (Select1 <types.UInt32> (CALLudiv {config.ctxt.Lookup("udiv", 0)} x y))
|
||||
(Mod16 x y) -> (Mod32 (SignExt16to32 x) (SignExt16to32 y))
|
||||
(Mod16u x y) -> (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
|
||||
(Mod8 x y) -> (Mod32 (SignExt8to32 x) (SignExt8to32 y))
|
||||
|
|
@ -111,7 +111,7 @@
|
|||
// boolean ops -- booleans are represented with 0=false, 1=true
|
||||
(AndB x y) -> (AND x y)
|
||||
(OrB x y) -> (OR x y)
|
||||
(EqB x y) -> (XORconst [1] (XOR <fe.TypeBool()> x y))
|
||||
(EqB x y) -> (XORconst [1] (XOR <types.Bool> x y))
|
||||
(NeqB x y) -> (XOR x y)
|
||||
(Not x) -> (XORconst [1] x)
|
||||
|
||||
|
|
@ -160,11 +160,11 @@
|
|||
(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 -> (SRAconst x [c])
|
||||
(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 -> (SRLconst x [c])
|
||||
(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SLLconst x [c])
|
||||
(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
|
||||
(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
|
||||
(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
|
||||
(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 -> (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
|
||||
(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SLLconst x [c])
|
||||
(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
|
||||
(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
|
||||
(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
|
||||
(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 -> (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
|
||||
|
||||
// large constant shifts
|
||||
(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 -> (Const32 [0])
|
||||
|
|
@ -176,8 +176,8 @@
|
|||
|
||||
// large constant signed right shift, we leave the sign bit
|
||||
(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 -> (SRAconst x [31])
|
||||
(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
|
||||
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
|
||||
(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [31])
|
||||
(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [31])
|
||||
|
||||
// constants
|
||||
(Const8 [val]) -> (MOVWconst [val])
|
||||
|
|
@ -204,7 +204,7 @@
|
|||
(SignExt16to32 x) -> (MOVHreg x)
|
||||
|
||||
(Signmask x) -> (SRAconst x [31])
|
||||
(Zeromask x) -> (SRAconst (RSBshiftRL <fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
|
||||
(Zeromask x) -> (SRAconst (RSBshiftRL <types.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
|
||||
(Slicemask <t> x) -> (SRAconst (RSBconst <t> [0] x) [31])
|
||||
|
||||
// float <-> int conversion
|
||||
|
|
|
|||
|
|
@ -27,8 +27,8 @@
|
|||
|
||||
(Hmul64 x y) -> (MULH x y)
|
||||
(Hmul64u x y) -> (UMULH x y)
|
||||
(Hmul32 x y) -> (SRAconst (MULL <fe.TypeInt64()> x y) [32])
|
||||
(Hmul32u x y) -> (SRAconst (UMULL <fe.TypeUInt64()> x y) [32])
|
||||
(Hmul32 x y) -> (SRAconst (MULL <types.Int64> x y) [32])
|
||||
(Hmul32u x y) -> (SRAconst (UMULL <types.UInt64> x y) [32])
|
||||
|
||||
(Div64 x y) -> (DIV x y)
|
||||
(Div64u x y) -> (UDIV x y)
|
||||
|
|
@ -86,20 +86,20 @@
|
|||
(Ctz64 <t> x) -> (CLZ (RBIT <t> x))
|
||||
(Ctz32 <t> x) -> (CLZW (RBITW <t> x))
|
||||
|
||||
(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <fe.TypeInt()> x))
|
||||
(BitLen64 x) -> (SUB (MOVDconst [64]) (CLZ <types.Int> x))
|
||||
|
||||
(Bswap64 x) -> (REV x)
|
||||
(Bswap32 x) -> (REVW x)
|
||||
|
||||
(BitRev64 x) -> (RBIT x)
|
||||
(BitRev32 x) -> (RBITW x)
|
||||
(BitRev16 x) -> (SRLconst [48] (RBIT <fe.TypeUInt64()> x))
|
||||
(BitRev8 x) -> (SRLconst [56] (RBIT <fe.TypeUInt64()> x))
|
||||
(BitRev16 x) -> (SRLconst [48] (RBIT <types.UInt64> x))
|
||||
(BitRev8 x) -> (SRLconst [56] (RBIT <types.UInt64> x))
|
||||
|
||||
// boolean ops -- booleans are represented with 0=false, 1=true
|
||||
(AndB x y) -> (AND x y)
|
||||
(OrB x y) -> (OR x y)
|
||||
(EqB x y) -> (XOR (MOVDconst [1]) (XOR <fe.TypeBool()> x y))
|
||||
(EqB x y) -> (XOR (MOVDconst [1]) (XOR <types.Bool> x y))
|
||||
(NeqB x y) -> (XOR x y)
|
||||
(Not x) -> (XOR (MOVDconst [1]) x)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
(Add64F x y) -> (ADDD x y)
|
||||
|
||||
(Select0 (Add32carry <t> x y)) -> (ADD <t.FieldType(0)> x y)
|
||||
(Select1 (Add32carry <t> x y)) -> (SGTU <fe.TypeBool()> x (ADD <t.FieldType(0)> x y))
|
||||
(Select1 (Add32carry <t> x y)) -> (SGTU <types.Bool> x (ADD <t.FieldType(0)> x y))
|
||||
(Add32withcarry <t> x y c) -> (ADD c (ADD <t> x y))
|
||||
|
||||
(SubPtr x y) -> (SUB x y)
|
||||
|
|
@ -21,7 +21,7 @@
|
|||
(Sub64F x y) -> (SUBD x y)
|
||||
|
||||
(Select0 (Sub32carry <t> x y)) -> (SUB <t.FieldType(0)> x y)
|
||||
(Select1 (Sub32carry <t> x y)) -> (SGTU <fe.TypeBool()> (SUB <t.FieldType(0)> x y) x)
|
||||
(Select1 (Sub32carry <t> x y)) -> (SGTU <types.Bool> (SUB <t.FieldType(0)> x y) x)
|
||||
(Sub32withcarry <t> x y c) -> (SUB (SUB <t> x y) c)
|
||||
|
||||
(Mul32 x y) -> (MUL x y)
|
||||
|
|
@ -72,11 +72,11 @@
|
|||
(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 -> (SRAconst x [c])
|
||||
(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 -> (SRLconst x [c])
|
||||
(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SLLconst x [c])
|
||||
(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
|
||||
(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [16]) [c+16])
|
||||
(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [c+16])
|
||||
(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 -> (SRLconst (SLLconst <types.UInt32> x [16]) [c+16])
|
||||
(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SLLconst x [c])
|
||||
(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
|
||||
(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <fe.TypeUInt32()> x [24]) [c+24])
|
||||
(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [c+24])
|
||||
(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 -> (SRLconst (SLLconst <types.UInt32> x [24]) [c+24])
|
||||
|
||||
// large constant shifts
|
||||
(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 -> (MOVWconst [0])
|
||||
|
|
@ -88,8 +88,8 @@
|
|||
|
||||
// large constant signed right shift, we leave the sign bit
|
||||
(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 -> (SRAconst x [31])
|
||||
(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [16]) [31])
|
||||
(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <fe.TypeUInt32()> x [24]) [31])
|
||||
(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 -> (SRAconst (SLLconst <types.UInt32> x [16]) [31])
|
||||
(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 -> (SRAconst (SLLconst <types.UInt32> x [24]) [31])
|
||||
|
||||
// shifts
|
||||
// hardware instruction uses only the low 5 bits of the shift
|
||||
|
|
@ -118,17 +118,17 @@
|
|||
(Rsh8Ux16 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
|
||||
(Rsh8Ux8 <t> x y) -> (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
|
||||
|
||||
(Rsh32x32 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
|
||||
(Rsh32x16 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
|
||||
(Rsh32x8 x y) -> (SRA x ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
|
||||
(Rsh32x32 x y) -> (SRA x ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
|
||||
(Rsh32x16 x y) -> (SRA x ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
|
||||
(Rsh32x8 x y) -> (SRA x ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
|
||||
|
||||
(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
|
||||
(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
|
||||
(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
|
||||
(Rsh16x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
|
||||
(Rsh16x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
|
||||
(Rsh16x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
|
||||
|
||||
(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> y (MOVWconst [-1]) (SGTUconst [32] y)))
|
||||
(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
|
||||
(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <fe.TypeUInt32()> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
|
||||
(Rsh8x32 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> y (MOVWconst [-1]) (SGTUconst [32] y)))
|
||||
(Rsh8x16 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt16to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt16to32 y))))
|
||||
(Rsh8x8 x y) -> (SRA (SignExt16to32 x) ( CMOVZ <types.UInt32> (ZeroExt8to32 y) (MOVWconst [-1]) (SGTUconst [32] (ZeroExt8to32 y))))
|
||||
|
||||
// unary ops
|
||||
(Neg32 x) -> (NEG x)
|
||||
|
|
@ -153,7 +153,7 @@
|
|||
// boolean ops -- booleans are represented with 0=false, 1=true
|
||||
(AndB x y) -> (AND x y)
|
||||
(OrB x y) -> (OR x y)
|
||||
(EqB x y) -> (XORconst [1] (XOR <fe.TypeBool()> x y))
|
||||
(EqB x y) -> (XORconst [1] (XOR <types.Bool> x y))
|
||||
(NeqB x y) -> (XOR x y)
|
||||
(Not x) -> (XORconst [1] x)
|
||||
|
||||
|
|
@ -393,41 +393,41 @@
|
|||
|
||||
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
|
||||
(AtomicOr8 ptr val mem) && !config.BigEndian ->
|
||||
(LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
|
||||
(SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
|
||||
(SLLconst <fe.TypeUInt32()> [3]
|
||||
(ANDconst <fe.TypeUInt32()> [3] ptr))) mem)
|
||||
(LoweredAtomicOr (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
|
||||
(SLL <types.UInt32> (ZeroExt8to32 val)
|
||||
(SLLconst <types.UInt32> [3]
|
||||
(ANDconst <types.UInt32> [3] ptr))) mem)
|
||||
|
||||
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
|
||||
(AtomicAnd8 ptr val mem) && !config.BigEndian ->
|
||||
(LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
|
||||
(OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
|
||||
(SLLconst <fe.TypeUInt32()> [3]
|
||||
(ANDconst <fe.TypeUInt32()> [3] ptr)))
|
||||
(NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()>
|
||||
(MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3]
|
||||
(ANDconst <fe.TypeUInt32()> [3]
|
||||
(XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
|
||||
(LoweredAtomicAnd (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
|
||||
(OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val)
|
||||
(SLLconst <types.UInt32> [3]
|
||||
(ANDconst <types.UInt32> [3] ptr)))
|
||||
(NORconst [0] <types.UInt32> (SLL <types.UInt32>
|
||||
(MOVWconst [0xff]) (SLLconst <types.UInt32> [3]
|
||||
(ANDconst <types.UInt32> [3]
|
||||
(XORconst <types.UInt32> [3] ptr)))))) mem)
|
||||
|
||||
// AtomicOr8(ptr,val) -> LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
|
||||
(AtomicOr8 ptr val mem) && config.BigEndian ->
|
||||
(LoweredAtomicOr (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
|
||||
(SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
|
||||
(SLLconst <fe.TypeUInt32()> [3]
|
||||
(ANDconst <fe.TypeUInt32()> [3]
|
||||
(XORconst <fe.TypeUInt32()> [3] ptr)))) mem)
|
||||
(LoweredAtomicOr (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
|
||||
(SLL <types.UInt32> (ZeroExt8to32 val)
|
||||
(SLLconst <types.UInt32> [3]
|
||||
(ANDconst <types.UInt32> [3]
|
||||
(XORconst <types.UInt32> [3] ptr)))) mem)
|
||||
|
||||
// AtomicAnd8(ptr,val) -> LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
|
||||
(AtomicAnd8 ptr val mem) && config.BigEndian ->
|
||||
(LoweredAtomicAnd (AND <fe.TypeUInt32().PtrTo()> (MOVWconst [^3]) ptr)
|
||||
(OR <fe.TypeUInt32()> (SLL <fe.TypeUInt32()> (ZeroExt8to32 val)
|
||||
(SLLconst <fe.TypeUInt32()> [3]
|
||||
(ANDconst <fe.TypeUInt32()> [3]
|
||||
(XORconst <fe.TypeUInt32()> [3] ptr))))
|
||||
(NORconst [0] <fe.TypeUInt32()> (SLL <fe.TypeUInt32()>
|
||||
(MOVWconst [0xff]) (SLLconst <fe.TypeUInt32()> [3]
|
||||
(ANDconst <fe.TypeUInt32()> [3]
|
||||
(XORconst <fe.TypeUInt32()> [3] ptr)))))) mem)
|
||||
(LoweredAtomicAnd (AND <types.UInt32.PtrTo()> (MOVWconst [^3]) ptr)
|
||||
(OR <types.UInt32> (SLL <types.UInt32> (ZeroExt8to32 val)
|
||||
(SLLconst <types.UInt32> [3]
|
||||
(ANDconst <types.UInt32> [3]
|
||||
(XORconst <types.UInt32> [3] ptr))))
|
||||
(NORconst [0] <types.UInt32> (SLL <types.UInt32>
|
||||
(MOVWconst [0xff]) (SLLconst <types.UInt32> [3]
|
||||
(ANDconst <types.UInt32> [3]
|
||||
(XORconst <types.UInt32> [3] ptr)))))) mem)
|
||||
|
||||
|
||||
// checks
|
||||
|
|
|
|||
|
|
@ -27,8 +27,8 @@
|
|||
|
||||
(Hmul64 x y) -> (Select0 (MULV x y))
|
||||
(Hmul64u x y) -> (Select0 (MULVU x y))
|
||||
(Hmul32 x y) -> (SRAVconst (Select1 <fe.TypeInt64()> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
|
||||
(Hmul32u x y) -> (SRLVconst (Select1 <fe.TypeUInt64()> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
|
||||
(Hmul32 x y) -> (SRAVconst (Select1 <types.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
|
||||
(Hmul32u x y) -> (SRLVconst (Select1 <types.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
|
||||
|
||||
(Div64 x y) -> (Select1 (DIVV x y))
|
||||
(Div64u x y) -> (Select1 (DIVVU x y))
|
||||
|
|
@ -71,65 +71,65 @@
|
|||
// shifts
|
||||
// hardware instruction uses only the low 6 bits of the shift
|
||||
// we compare to 64 to ensure Go semantics for large shifts
|
||||
(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
|
||||
(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
|
||||
(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
|
||||
(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
(Lsh64x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
|
||||
(Lsh64x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
|
||||
(Lsh64x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
|
||||
(Lsh64x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
|
||||
(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
|
||||
(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
|
||||
(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
|
||||
(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
(Lsh32x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
|
||||
(Lsh32x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
|
||||
(Lsh32x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
|
||||
(Lsh32x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
|
||||
(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
|
||||
(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
|
||||
(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
|
||||
(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
(Lsh16x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
|
||||
(Lsh16x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
|
||||
(Lsh16x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
|
||||
(Lsh16x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
|
||||
(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SLLV <t> x y))
|
||||
(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
|
||||
(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
|
||||
(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
(Lsh8x64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SLLV <t> x y))
|
||||
(Lsh8x32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
|
||||
(Lsh8x16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
|
||||
(Lsh8x8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
|
||||
|
||||
(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> x y))
|
||||
(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
|
||||
(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
|
||||
(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
|
||||
(Rsh64Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> x y))
|
||||
(Rsh64Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
|
||||
(Rsh64Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
|
||||
(Rsh64Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
|
||||
|
||||
(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
|
||||
(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
|
||||
(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
|
||||
(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
|
||||
(Rsh32Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
|
||||
(Rsh32Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
|
||||
(Rsh32Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
|
||||
(Rsh32Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
|
||||
|
||||
(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
|
||||
(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
|
||||
(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
|
||||
(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
|
||||
(Rsh16Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
|
||||
(Rsh16Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
|
||||
(Rsh16Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
|
||||
(Rsh16Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
|
||||
|
||||
(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
|
||||
(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
|
||||
(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
|
||||
(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <fe.TypeUInt64()> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
|
||||
(Rsh8Ux64 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
|
||||
(Rsh8Ux32 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
|
||||
(Rsh8Ux16 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
|
||||
(Rsh8Ux8 <t> x y) -> (AND (NEGV <t> (SGTU (Const64 <types.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
|
||||
|
||||
(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
|
||||
(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
|
||||
(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
|
||||
(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
|
||||
(Rsh64x64 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
|
||||
(Rsh64x32 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
|
||||
(Rsh64x16 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
|
||||
(Rsh64x8 <t> x y) -> (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
|
||||
|
||||
(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
|
||||
(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
|
||||
(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
|
||||
(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
|
||||
(Rsh32x64 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
|
||||
(Rsh32x32 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
|
||||
(Rsh32x16 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
|
||||
(Rsh32x8 <t> x y) -> (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
|
||||
|
||||
(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
|
||||
(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
|
||||
(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
|
||||
(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
|
||||
(Rsh16x64 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
|
||||
(Rsh16x32 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
|
||||
(Rsh16x16 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
|
||||
(Rsh16x8 <t> x y) -> (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
|
||||
|
||||
(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <fe.TypeUInt64()> [63]))) y))
|
||||
(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt32to64 y)))
|
||||
(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt16to64 y)))
|
||||
(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <fe.TypeUInt64()> [63]))) (ZeroExt8to64 y)))
|
||||
(Rsh8x64 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (Const64 <types.UInt64> [63]))) y))
|
||||
(Rsh8x32 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt32to64 y)))
|
||||
(Rsh8x16 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt16to64 y)))
|
||||
(Rsh8x8 <t> x y) -> (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (Const64 <types.UInt64> [63]))) (ZeroExt8to64 y)))
|
||||
|
||||
// unary ops
|
||||
(Neg64 x) -> (NEGV x)
|
||||
|
|
@ -147,7 +147,7 @@
|
|||
// boolean ops -- booleans are represented with 0=false, 1=true
|
||||
(AndB x y) -> (AND x y)
|
||||
(OrB x y) -> (OR x y)
|
||||
(EqB x y) -> (XOR (MOVVconst [1]) (XOR <fe.TypeBool()> x y))
|
||||
(EqB x y) -> (XOR (MOVVconst [1]) (XOR <types.Bool> x y))
|
||||
(NeqB x y) -> (XOR x y)
|
||||
(Not x) -> (XORconst [1] x)
|
||||
|
||||
|
|
|
|||
|
|
@ -154,72 +154,72 @@
|
|||
(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRAWconst (SignExt8to32 x) [c])
|
||||
(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 -> (SRWconst (ZeroExt8to32 x) [c])
|
||||
|
||||
(Rsh64x64 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
|
||||
(Rsh64Ux64 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
|
||||
(Lsh64x64 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
|
||||
(Rsh64x64 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
|
||||
(Rsh64Ux64 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
|
||||
(Lsh64x64 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] y))))
|
||||
|
||||
(Rsh32x64 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
|
||||
(Rsh32Ux64 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
|
||||
(Lsh32x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
|
||||
(Rsh32x64 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
|
||||
(Rsh32Ux64 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
|
||||
(Lsh32x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] y))))
|
||||
|
||||
(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
|
||||
(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
|
||||
(Lsh16x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
|
||||
(Rsh16x64 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
|
||||
(Rsh16Ux64 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
|
||||
(Lsh16x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] y))))
|
||||
|
||||
(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
|
||||
(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
|
||||
(Lsh8x64 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
|
||||
(Rsh8x64 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
|
||||
(Rsh8Ux64 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
|
||||
(Lsh8x64 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] y))))
|
||||
|
||||
|
||||
(Rsh64x32 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
|
||||
(Rsh64Ux32 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
|
||||
(Lsh64x32 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
|
||||
(Rsh64x32 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
|
||||
(Rsh64Ux32 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
|
||||
(Lsh64x32 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt32to64 y)))))
|
||||
|
||||
(Rsh32x32 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
|
||||
(Rsh32Ux32 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
|
||||
(Lsh32x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
|
||||
(Rsh32x32 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
|
||||
(Rsh32Ux32 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
|
||||
(Lsh32x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt32to64 y)))))
|
||||
|
||||
(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
|
||||
(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
|
||||
(Lsh16x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
|
||||
(Rsh16x32 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
|
||||
(Rsh16Ux32 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
|
||||
(Lsh16x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt32to64 y)))))
|
||||
|
||||
(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
|
||||
(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
|
||||
(Lsh8x32 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
|
||||
(Rsh8x32 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
|
||||
(Rsh8Ux32 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
|
||||
(Lsh8x32 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt32to64 y)))))
|
||||
|
||||
|
||||
(Rsh64x16 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
|
||||
(Rsh64Ux16 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
|
||||
(Lsh64x16 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
|
||||
(Rsh64x16 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
|
||||
(Rsh64Ux16 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
|
||||
(Lsh64x16 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt16to64 y)))))
|
||||
|
||||
(Rsh32x16 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
|
||||
(Rsh32Ux16 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
|
||||
(Lsh32x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
|
||||
(Rsh32x16 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
|
||||
(Rsh32Ux16 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
|
||||
(Lsh32x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt16to64 y)))))
|
||||
|
||||
(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
|
||||
(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
|
||||
(Lsh16x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
|
||||
(Rsh16x16 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
|
||||
(Rsh16Ux16 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
|
||||
(Lsh16x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt16to64 y)))))
|
||||
|
||||
(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
|
||||
(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
|
||||
(Lsh8x16 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
|
||||
(Rsh8x16 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
|
||||
(Rsh8Ux16 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
|
||||
(Lsh8x16 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt16to64 y)))))
|
||||
|
||||
|
||||
(Rsh64x8 x y) -> (SRAD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
|
||||
(Rsh64Ux8 x y) -> (SRD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
|
||||
(Lsh64x8 x y) -> (SLD x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
|
||||
(Rsh64x8 x y) -> (SRAD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
|
||||
(Rsh64Ux8 x y) -> (SRD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
|
||||
(Lsh64x8 x y) -> (SLD x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-64] (ZeroExt8to64 y)))))
|
||||
|
||||
(Rsh32x8 x y) -> (SRAW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
|
||||
(Rsh32Ux8 x y) -> (SRW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
|
||||
(Lsh32x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
|
||||
(Rsh32x8 x y) -> (SRAW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
|
||||
(Rsh32Ux8 x y) -> (SRW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
|
||||
(Lsh32x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-32] (ZeroExt8to64 y)))))
|
||||
|
||||
(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
|
||||
(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
|
||||
(Lsh16x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
|
||||
(Rsh16x8 x y) -> (SRAW (SignExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
|
||||
(Rsh16Ux8 x y) -> (SRW (ZeroExt16to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
|
||||
(Lsh16x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-16] (ZeroExt8to64 y)))))
|
||||
|
||||
(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
|
||||
(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
|
||||
(Lsh8x8 x y) -> (SLW x (ORN y <fe.TypeInt64()> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
|
||||
(Rsh8x8 x y) -> (SRAW (SignExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
|
||||
(Rsh8Ux8 x y) -> (SRW (ZeroExt8to32 x) (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
|
||||
(Lsh8x8 x y) -> (SLW x (ORN y <types.Int64> (MaskIfNotCarry (ADDconstForCarry [-8] (ZeroExt8to64 y)))))
|
||||
|
||||
// Cleaning up shift ops when input is masked
|
||||
(MaskIfNotCarry (ADDconstForCarry [c] (ANDconst [d] _))) && c < 0 && d > 0 && c + d < 0 -> (MOVDconst [-1])
|
||||
|
|
@ -233,7 +233,7 @@
|
|||
|
||||
(Addr {sym} base) -> (MOVDaddr {sym} base)
|
||||
// (Addr {sym} base) -> (ADDconst {sym} base)
|
||||
(OffPtr [off] ptr) -> (ADD (MOVDconst <fe.TypeInt64()> [off]) ptr)
|
||||
(OffPtr [off] ptr) -> (ADD (MOVDconst <types.Int64> [off]) ptr)
|
||||
|
||||
(And64 x y) -> (AND x y)
|
||||
(And32 x y) -> (AND x y)
|
||||
|
|
|
|||
|
|
@ -437,7 +437,7 @@
|
|||
(If (MOVDGTnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GTF cmp yes no)
|
||||
(If (MOVDGEnoinv (MOVDconst [0]) (MOVDconst [1]) cmp) yes no) -> (GEF cmp yes no)
|
||||
|
||||
(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <fe.TypeBool()> cond)) yes no)
|
||||
(If cond yes no) -> (NE (CMPWconst [0] (MOVBZreg <types.Bool> cond)) yes no)
|
||||
|
||||
// ***************************
|
||||
// Above: lowering rules
|
||||
|
|
|
|||
|
|
@ -13,28 +13,28 @@
|
|||
|
||||
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 ->
|
||||
(ComplexMake
|
||||
(Load <fe.TypeFloat32()> ptr mem)
|
||||
(Load <fe.TypeFloat32()>
|
||||
(OffPtr <fe.TypeFloat32().PtrTo()> [4] ptr)
|
||||
(Load <types.Float32> ptr mem)
|
||||
(Load <types.Float32>
|
||||
(OffPtr <types.Float32.PtrTo()> [4] ptr)
|
||||
mem)
|
||||
)
|
||||
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 8 ->
|
||||
(Store {fe.TypeFloat32()}
|
||||
(OffPtr <fe.TypeFloat32().PtrTo()> [4] dst)
|
||||
(Store {types.Float32}
|
||||
(OffPtr <types.Float32.PtrTo()> [4] dst)
|
||||
imag
|
||||
(Store {fe.TypeFloat32()} dst real mem))
|
||||
(Store {types.Float32} dst real mem))
|
||||
(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
|
||||
(ComplexMake
|
||||
(Load <fe.TypeFloat64()> ptr mem)
|
||||
(Load <fe.TypeFloat64()>
|
||||
(OffPtr <fe.TypeFloat64().PtrTo()> [8] ptr)
|
||||
(Load <types.Float64> ptr mem)
|
||||
(Load <types.Float64>
|
||||
(OffPtr <types.Float64.PtrTo()> [8] ptr)
|
||||
mem)
|
||||
)
|
||||
(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 16 ->
|
||||
(Store {fe.TypeFloat64()}
|
||||
(OffPtr <fe.TypeFloat64().PtrTo()> [8] dst)
|
||||
(Store {types.Float64}
|
||||
(OffPtr <types.Float64.PtrTo()> [8] dst)
|
||||
imag
|
||||
(Store {fe.TypeFloat64()} dst real mem))
|
||||
(Store {types.Float64} dst real mem))
|
||||
|
||||
// string ops
|
||||
(StringPtr (StringMake ptr _)) -> ptr
|
||||
|
|
@ -42,15 +42,15 @@
|
|||
|
||||
(Load <t> ptr mem) && t.IsString() ->
|
||||
(StringMake
|
||||
(Load <fe.TypeBytePtr()> ptr mem)
|
||||
(Load <fe.TypeInt()>
|
||||
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
|
||||
(Load <types.BytePtr> ptr mem)
|
||||
(Load <types.Int>
|
||||
(OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr)
|
||||
mem))
|
||||
(Store dst (StringMake ptr len) mem) ->
|
||||
(Store {fe.TypeInt()}
|
||||
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst)
|
||||
(Store {types.Int}
|
||||
(OffPtr <types.Int.PtrTo()> [config.PtrSize] dst)
|
||||
len
|
||||
(Store {fe.TypeBytePtr()} dst ptr mem))
|
||||
(Store {types.BytePtr} dst ptr mem))
|
||||
|
||||
// slice ops
|
||||
(SlicePtr (SliceMake ptr _ _ )) -> ptr
|
||||
|
|
@ -60,20 +60,20 @@
|
|||
(Load <t> ptr mem) && t.IsSlice() ->
|
||||
(SliceMake
|
||||
(Load <t.ElemType().PtrTo()> ptr mem)
|
||||
(Load <fe.TypeInt()>
|
||||
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr)
|
||||
(Load <types.Int>
|
||||
(OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr)
|
||||
mem)
|
||||
(Load <fe.TypeInt()>
|
||||
(OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr)
|
||||
(Load <types.Int>
|
||||
(OffPtr <types.Int.PtrTo()> [2*config.PtrSize] ptr)
|
||||
mem))
|
||||
(Store dst (SliceMake ptr len cap) mem) ->
|
||||
(Store {fe.TypeInt()}
|
||||
(OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] dst)
|
||||
(Store {types.Int}
|
||||
(OffPtr <types.Int.PtrTo()> [2*config.PtrSize] dst)
|
||||
cap
|
||||
(Store {fe.TypeInt()}
|
||||
(OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst)
|
||||
(Store {types.Int}
|
||||
(OffPtr <types.Int.PtrTo()> [config.PtrSize] dst)
|
||||
len
|
||||
(Store {fe.TypeBytePtr()} dst ptr mem)))
|
||||
(Store {types.BytePtr} dst ptr mem)))
|
||||
|
||||
// interface ops
|
||||
(ITab (IMake itab _)) -> itab
|
||||
|
|
@ -81,12 +81,12 @@
|
|||
|
||||
(Load <t> ptr mem) && t.IsInterface() ->
|
||||
(IMake
|
||||
(Load <fe.TypeBytePtr()> ptr mem)
|
||||
(Load <fe.TypeBytePtr()>
|
||||
(OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr)
|
||||
(Load <types.BytePtr> ptr mem)
|
||||
(Load <types.BytePtr>
|
||||
(OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] ptr)
|
||||
mem))
|
||||
(Store dst (IMake itab data) mem) ->
|
||||
(Store {fe.TypeBytePtr()}
|
||||
(OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst)
|
||||
(Store {types.BytePtr}
|
||||
(OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] dst)
|
||||
data
|
||||
(Store {fe.TypeUintptr()} dst itab mem))
|
||||
(Store {types.Uintptr} dst itab mem))
|
||||
|
|
|
|||
|
|
@ -12,23 +12,23 @@
|
|||
|
||||
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() ->
|
||||
(Int64Make
|
||||
(Load <fe.TypeInt32()> (OffPtr <fe.TypeInt32().PtrTo()> [4] ptr) mem)
|
||||
(Load <fe.TypeUInt32()> ptr mem))
|
||||
(Load <types.Int32> (OffPtr <types.Int32.PtrTo()> [4] ptr) mem)
|
||||
(Load <types.UInt32> ptr mem))
|
||||
|
||||
(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() ->
|
||||
(Int64Make
|
||||
(Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem)
|
||||
(Load <fe.TypeUInt32()> ptr mem))
|
||||
(Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem)
|
||||
(Load <types.UInt32> ptr mem))
|
||||
|
||||
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() ->
|
||||
(Int64Make
|
||||
(Load <fe.TypeInt32()> ptr mem)
|
||||
(Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem))
|
||||
(Load <types.Int32> ptr mem)
|
||||
(Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem))
|
||||
|
||||
(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() ->
|
||||
(Int64Make
|
||||
(Load <fe.TypeUInt32()> ptr mem)
|
||||
(Load <fe.TypeUInt32()> (OffPtr <fe.TypeUInt32().PtrTo()> [4] ptr) mem))
|
||||
(Load <types.UInt32> ptr mem)
|
||||
(Load <types.UInt32> (OffPtr <types.UInt32.PtrTo()> [4] ptr) mem))
|
||||
|
||||
(Store {t} dst (Int64Make hi lo) mem) && t.(Type).Size() == 8 && !config.BigEndian ->
|
||||
(Store {hi.Type}
|
||||
|
|
@ -44,94 +44,94 @@
|
|||
|
||||
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() ->
|
||||
(Int64Make
|
||||
(Arg <fe.TypeInt32()> {n} [off+4])
|
||||
(Arg <fe.TypeUInt32()> {n} [off]))
|
||||
(Arg <types.Int32> {n} [off+4])
|
||||
(Arg <types.UInt32> {n} [off]))
|
||||
(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() ->
|
||||
(Int64Make
|
||||
(Arg <fe.TypeUInt32()> {n} [off+4])
|
||||
(Arg <fe.TypeUInt32()> {n} [off]))
|
||||
(Arg <types.UInt32> {n} [off+4])
|
||||
(Arg <types.UInt32> {n} [off]))
|
||||
|
||||
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() ->
|
||||
(Int64Make
|
||||
(Arg <fe.TypeInt32()> {n} [off])
|
||||
(Arg <fe.TypeUInt32()> {n} [off+4]))
|
||||
(Arg <types.Int32> {n} [off])
|
||||
(Arg <types.UInt32> {n} [off+4]))
|
||||
(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() ->
|
||||
(Int64Make
|
||||
(Arg <fe.TypeUInt32()> {n} [off])
|
||||
(Arg <fe.TypeUInt32()> {n} [off+4]))
|
||||
(Arg <types.UInt32> {n} [off])
|
||||
(Arg <types.UInt32> {n} [off+4]))
|
||||
|
||||
(Add64 x y) ->
|
||||
(Int64Make
|
||||
(Add32withcarry <fe.TypeInt32()>
|
||||
(Add32withcarry <types.Int32>
|
||||
(Int64Hi x)
|
||||
(Int64Hi y)
|
||||
(Select1 <TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
|
||||
(Select0 <fe.TypeUInt32()> (Add32carry (Int64Lo x) (Int64Lo y))))
|
||||
(Select0 <types.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
|
||||
|
||||
(Sub64 x y) ->
|
||||
(Int64Make
|
||||
(Sub32withcarry <fe.TypeInt32()>
|
||||
(Sub32withcarry <types.Int32>
|
||||
(Int64Hi x)
|
||||
(Int64Hi y)
|
||||
(Select1 <TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
|
||||
(Select0 <fe.TypeUInt32()> (Sub32carry (Int64Lo x) (Int64Lo y))))
|
||||
(Select0 <types.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
|
||||
|
||||
(Mul64 x y) ->
|
||||
(Int64Make
|
||||
(Add32 <fe.TypeUInt32()>
|
||||
(Mul32 <fe.TypeUInt32()> (Int64Lo x) (Int64Hi y))
|
||||
(Add32 <fe.TypeUInt32()>
|
||||
(Mul32 <fe.TypeUInt32()> (Int64Hi x) (Int64Lo y))
|
||||
(Select0 <fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
|
||||
(Select1 <fe.TypeUInt32()> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
|
||||
(Add32 <types.UInt32>
|
||||
(Mul32 <types.UInt32> (Int64Lo x) (Int64Hi y))
|
||||
(Add32 <types.UInt32>
|
||||
(Mul32 <types.UInt32> (Int64Hi x) (Int64Lo y))
|
||||
(Select0 <types.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
|
||||
(Select1 <types.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
|
||||
|
||||
(And64 x y) ->
|
||||
(Int64Make
|
||||
(And32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
|
||||
(And32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
|
||||
(And32 <types.UInt32> (Int64Hi x) (Int64Hi y))
|
||||
(And32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
|
||||
|
||||
(Or64 x y) ->
|
||||
(Int64Make
|
||||
(Or32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
|
||||
(Or32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
|
||||
(Or32 <types.UInt32> (Int64Hi x) (Int64Hi y))
|
||||
(Or32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
|
||||
|
||||
(Xor64 x y) ->
|
||||
(Int64Make
|
||||
(Xor32 <fe.TypeUInt32()> (Int64Hi x) (Int64Hi y))
|
||||
(Xor32 <fe.TypeUInt32()> (Int64Lo x) (Int64Lo y)))
|
||||
(Xor32 <types.UInt32> (Int64Hi x) (Int64Hi y))
|
||||
(Xor32 <types.UInt32> (Int64Lo x) (Int64Lo y)))
|
||||
|
||||
(Neg64 <t> x) -> (Sub64 (Const64 <t> [0]) x)
|
||||
|
||||
(Com64 x) ->
|
||||
(Int64Make
|
||||
(Com32 <fe.TypeUInt32()> (Int64Hi x))
|
||||
(Com32 <fe.TypeUInt32()> (Int64Lo x)))
|
||||
(Com32 <types.UInt32> (Int64Hi x))
|
||||
(Com32 <types.UInt32> (Int64Lo x)))
|
||||
|
||||
(Ctz64 x) ->
|
||||
(Add32 <fe.TypeUInt32()>
|
||||
(Ctz32 <fe.TypeUInt32()> (Int64Lo x))
|
||||
(And32 <fe.TypeUInt32()>
|
||||
(Com32 <fe.TypeUInt32()> (Zeromask (Int64Lo x)))
|
||||
(Ctz32 <fe.TypeUInt32()> (Int64Hi x))))
|
||||
(Add32 <types.UInt32>
|
||||
(Ctz32 <types.UInt32> (Int64Lo x))
|
||||
(And32 <types.UInt32>
|
||||
(Com32 <types.UInt32> (Zeromask (Int64Lo x)))
|
||||
(Ctz32 <types.UInt32> (Int64Hi x))))
|
||||
|
||||
(BitLen64 x) ->
|
||||
(Add32 <fe.TypeInt()>
|
||||
(BitLen32 <fe.TypeInt()> (Int64Hi x))
|
||||
(BitLen32 <fe.TypeInt()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Add32 <types.Int>
|
||||
(BitLen32 <types.Int> (Int64Hi x))
|
||||
(BitLen32 <types.Int>
|
||||
(Or32 <types.UInt32>
|
||||
(Int64Lo x)
|
||||
(Zeromask (Int64Hi x)))))
|
||||
|
||||
(Bswap64 x) ->
|
||||
(Int64Make
|
||||
(Bswap32 <fe.TypeUInt32()> (Int64Lo x))
|
||||
(Bswap32 <fe.TypeUInt32()> (Int64Hi x)))
|
||||
(Bswap32 <types.UInt32> (Int64Lo x))
|
||||
(Bswap32 <types.UInt32> (Int64Hi x)))
|
||||
|
||||
(SignExt32to64 x) -> (Int64Make (Signmask x) x)
|
||||
(SignExt16to64 x) -> (SignExt32to64 (SignExt16to32 x))
|
||||
(SignExt8to64 x) -> (SignExt32to64 (SignExt8to32 x))
|
||||
|
||||
(ZeroExt32to64 x) -> (Int64Make (Const32 <fe.TypeUInt32()> [0]) x)
|
||||
(ZeroExt32to64 x) -> (Int64Make (Const32 <types.UInt32> [0]) x)
|
||||
(ZeroExt16to64 x) -> (ZeroExt32to64 (ZeroExt16to32 x))
|
||||
(ZeroExt8to64 x) -> (ZeroExt32to64 (ZeroExt8to32 x))
|
||||
|
||||
|
|
@ -170,160 +170,160 @@
|
|||
// turn x64 non-constant shifts to x32 shifts
|
||||
// if high 32-bit of the shift is nonzero, make a huge shift
|
||||
(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Lsh64x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Lsh64x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Rsh64x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Rsh64x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Rsh64Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Rsh64Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Lsh32x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Lsh32x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Rsh32x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Rsh32x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Rsh32Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Rsh32Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Lsh16x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Lsh16x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Rsh16x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Rsh16x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Rsh16Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Rsh16Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Lsh8x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Lsh8x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Rsh8x32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Rsh8x32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 ->
|
||||
(Rsh8Ux32 x (Or32 <fe.TypeUInt32()> (Zeromask hi) lo))
|
||||
(Rsh8Ux32 x (Or32 <types.UInt32> (Zeromask hi) lo))
|
||||
|
||||
// 64x left shift
|
||||
// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
|
||||
// result.lo = lo<<s
|
||||
(Lsh64x32 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Lsh32x32 <fe.TypeUInt32()> hi s)
|
||||
(Rsh32Ux32 <fe.TypeUInt32()>
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Lsh32x32 <types.UInt32> hi s)
|
||||
(Rsh32Ux32 <types.UInt32>
|
||||
lo
|
||||
(Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
|
||||
(Lsh32x32 <fe.TypeUInt32()>
|
||||
(Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s)))
|
||||
(Lsh32x32 <types.UInt32>
|
||||
lo
|
||||
(Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32]))))
|
||||
(Lsh32x32 <fe.TypeUInt32()> lo s))
|
||||
(Sub32 <types.UInt32> s (Const32 <types.UInt32> [32]))))
|
||||
(Lsh32x32 <types.UInt32> lo s))
|
||||
(Lsh64x16 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Lsh32x16 <fe.TypeUInt32()> hi s)
|
||||
(Rsh32Ux16 <fe.TypeUInt32()>
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Lsh32x16 <types.UInt32> hi s)
|
||||
(Rsh32Ux16 <types.UInt32>
|
||||
lo
|
||||
(Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
|
||||
(Lsh32x16 <fe.TypeUInt32()>
|
||||
(Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s)))
|
||||
(Lsh32x16 <types.UInt32>
|
||||
lo
|
||||
(Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32]))))
|
||||
(Lsh32x16 <fe.TypeUInt32()> lo s))
|
||||
(Sub16 <types.UInt16> s (Const16 <types.UInt16> [32]))))
|
||||
(Lsh32x16 <types.UInt32> lo s))
|
||||
(Lsh64x8 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Lsh32x8 <fe.TypeUInt32()> hi s)
|
||||
(Rsh32Ux8 <fe.TypeUInt32()>
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Lsh32x8 <types.UInt32> hi s)
|
||||
(Rsh32Ux8 <types.UInt32>
|
||||
lo
|
||||
(Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
|
||||
(Lsh32x8 <fe.TypeUInt32()>
|
||||
(Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s)))
|
||||
(Lsh32x8 <types.UInt32>
|
||||
lo
|
||||
(Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32]))))
|
||||
(Lsh32x8 <fe.TypeUInt32()> lo s))
|
||||
(Sub8 <types.UInt8> s (Const8 <types.UInt8> [32]))))
|
||||
(Lsh32x8 <types.UInt32> lo s))
|
||||
|
||||
// 64x unsigned right shift
|
||||
// result.hi = hi>>s
|
||||
// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
|
||||
(Rsh64Ux32 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Rsh32Ux32 <fe.TypeUInt32()> hi s)
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Rsh32Ux32 <fe.TypeUInt32()> lo s)
|
||||
(Lsh32x32 <fe.TypeUInt32()>
|
||||
(Rsh32Ux32 <types.UInt32> hi s)
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Rsh32Ux32 <types.UInt32> lo s)
|
||||
(Lsh32x32 <types.UInt32>
|
||||
hi
|
||||
(Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
|
||||
(Rsh32Ux32 <fe.TypeUInt32()>
|
||||
(Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s)))
|
||||
(Rsh32Ux32 <types.UInt32>
|
||||
hi
|
||||
(Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32])))))
|
||||
(Sub32 <types.UInt32> s (Const32 <types.UInt32> [32])))))
|
||||
(Rsh64Ux16 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Rsh32Ux16 <fe.TypeUInt32()> hi s)
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Rsh32Ux16 <fe.TypeUInt32()> lo s)
|
||||
(Lsh32x16 <fe.TypeUInt32()>
|
||||
(Rsh32Ux16 <types.UInt32> hi s)
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Rsh32Ux16 <types.UInt32> lo s)
|
||||
(Lsh32x16 <types.UInt32>
|
||||
hi
|
||||
(Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
|
||||
(Rsh32Ux16 <fe.TypeUInt32()>
|
||||
(Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s)))
|
||||
(Rsh32Ux16 <types.UInt32>
|
||||
hi
|
||||
(Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32])))))
|
||||
(Sub16 <types.UInt16> s (Const16 <types.UInt16> [32])))))
|
||||
(Rsh64Ux8 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Rsh32Ux8 <fe.TypeUInt32()> hi s)
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Rsh32Ux8 <fe.TypeUInt32()> lo s)
|
||||
(Lsh32x8 <fe.TypeUInt32()>
|
||||
(Rsh32Ux8 <types.UInt32> hi s)
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Rsh32Ux8 <types.UInt32> lo s)
|
||||
(Lsh32x8 <types.UInt32>
|
||||
hi
|
||||
(Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
|
||||
(Rsh32Ux8 <fe.TypeUInt32()>
|
||||
(Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s)))
|
||||
(Rsh32Ux8 <types.UInt32>
|
||||
hi
|
||||
(Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32])))))
|
||||
(Sub8 <types.UInt8> s (Const8 <types.UInt8> [32])))))
|
||||
|
||||
// 64x signed right shift
|
||||
// result.hi = hi>>s
|
||||
// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
|
||||
(Rsh64x32 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Rsh32x32 <fe.TypeUInt32()> hi s)
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Rsh32Ux32 <fe.TypeUInt32()> lo s)
|
||||
(Lsh32x32 <fe.TypeUInt32()>
|
||||
(Rsh32x32 <types.UInt32> hi s)
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Rsh32Ux32 <types.UInt32> lo s)
|
||||
(Lsh32x32 <types.UInt32>
|
||||
hi
|
||||
(Sub32 <fe.TypeUInt32()> (Const32 <fe.TypeUInt32()> [32]) s)))
|
||||
(And32 <fe.TypeUInt32()>
|
||||
(Rsh32x32 <fe.TypeUInt32()>
|
||||
(Sub32 <types.UInt32> (Const32 <types.UInt32> [32]) s)))
|
||||
(And32 <types.UInt32>
|
||||
(Rsh32x32 <types.UInt32>
|
||||
hi
|
||||
(Sub32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [32])))
|
||||
(Sub32 <types.UInt32> s (Const32 <types.UInt32> [32])))
|
||||
(Zeromask
|
||||
(Rsh32Ux32 <fe.TypeUInt32()> s (Const32 <fe.TypeUInt32()> [5]))))))
|
||||
(Rsh32Ux32 <types.UInt32> s (Const32 <types.UInt32> [5]))))))
|
||||
(Rsh64x16 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Rsh32x16 <fe.TypeUInt32()> hi s)
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Rsh32Ux16 <fe.TypeUInt32()> lo s)
|
||||
(Lsh32x16 <fe.TypeUInt32()>
|
||||
(Rsh32x16 <types.UInt32> hi s)
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Rsh32Ux16 <types.UInt32> lo s)
|
||||
(Lsh32x16 <types.UInt32>
|
||||
hi
|
||||
(Sub16 <fe.TypeUInt16()> (Const16 <fe.TypeUInt16()> [32]) s)))
|
||||
(And32 <fe.TypeUInt32()>
|
||||
(Rsh32x16 <fe.TypeUInt32()>
|
||||
(Sub16 <types.UInt16> (Const16 <types.UInt16> [32]) s)))
|
||||
(And32 <types.UInt32>
|
||||
(Rsh32x16 <types.UInt32>
|
||||
hi
|
||||
(Sub16 <fe.TypeUInt16()> s (Const16 <fe.TypeUInt16()> [32])))
|
||||
(Sub16 <types.UInt16> s (Const16 <types.UInt16> [32])))
|
||||
(Zeromask
|
||||
(ZeroExt16to32
|
||||
(Rsh16Ux32 <fe.TypeUInt16()> s (Const32 <fe.TypeUInt32()> [5])))))))
|
||||
(Rsh16Ux32 <types.UInt16> s (Const32 <types.UInt32> [5])))))))
|
||||
(Rsh64x8 (Int64Make hi lo) s) ->
|
||||
(Int64Make
|
||||
(Rsh32x8 <fe.TypeUInt32()> hi s)
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Or32 <fe.TypeUInt32()>
|
||||
(Rsh32Ux8 <fe.TypeUInt32()> lo s)
|
||||
(Lsh32x8 <fe.TypeUInt32()>
|
||||
(Rsh32x8 <types.UInt32> hi s)
|
||||
(Or32 <types.UInt32>
|
||||
(Or32 <types.UInt32>
|
||||
(Rsh32Ux8 <types.UInt32> lo s)
|
||||
(Lsh32x8 <types.UInt32>
|
||||
hi
|
||||
(Sub8 <fe.TypeUInt8()> (Const8 <fe.TypeUInt8()> [32]) s)))
|
||||
(And32 <fe.TypeUInt32()>
|
||||
(Rsh32x8 <fe.TypeUInt32()>
|
||||
(Sub8 <types.UInt8> (Const8 <types.UInt8> [32]) s)))
|
||||
(And32 <types.UInt32>
|
||||
(Rsh32x8 <types.UInt32>
|
||||
hi
|
||||
(Sub8 <fe.TypeUInt8()> s (Const8 <fe.TypeUInt8()> [32])))
|
||||
(Sub8 <types.UInt8> s (Const8 <types.UInt8> [32])))
|
||||
(Zeromask
|
||||
(ZeroExt8to32
|
||||
(Rsh8Ux32 <fe.TypeUInt8()> s (Const32 <fe.TypeUInt32()> [5])))))))
|
||||
(Rsh8Ux32 <types.UInt8> s (Const32 <types.UInt32> [5])))))))
|
||||
|
||||
// 64xConst32 shifts
|
||||
// we probably do not need them -- lateopt may take care of them just fine
|
||||
|
|
@ -333,48 +333,48 @@
|
|||
//
|
||||
//(Lsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
|
||||
// (Int64Make
|
||||
// (Lsh32x32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c-32]))
|
||||
// (Const32 <fe.TypeUInt32()> [0]))
|
||||
// (Lsh32x32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [c-32]))
|
||||
// (Const32 <types.UInt32> [0]))
|
||||
//(Rsh64x32 x (Const32 [c])) && c < 64 && c > 32 ->
|
||||
// (Int64Make
|
||||
// (Signmask (Int64Hi x))
|
||||
// (Rsh32x32 <fe.TypeInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c-32])))
|
||||
// (Rsh32x32 <types.Int32> (Int64Hi x) (Const32 <types.UInt32> [c-32])))
|
||||
//(Rsh64Ux32 x (Const32 [c])) && c < 64 && c > 32 ->
|
||||
// (Int64Make
|
||||
// (Const32 <fe.TypeUInt32()> [0])
|
||||
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c-32])))
|
||||
// (Const32 <types.UInt32> [0])
|
||||
// (Rsh32Ux32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [c-32])))
|
||||
//
|
||||
//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <fe.TypeUInt32()> [0]))
|
||||
//(Lsh64x32 x (Const32 [32])) -> (Int64Make (Int64Lo x) (Const32 <types.UInt32> [0]))
|
||||
//(Rsh64x32 x (Const32 [32])) -> (Int64Make (Signmask (Int64Hi x)) (Int64Hi x))
|
||||
//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <fe.TypeUInt32()> [0]) (Int64Hi x))
|
||||
//(Rsh64Ux32 x (Const32 [32])) -> (Int64Make (Const32 <types.UInt32> [0]) (Int64Hi x))
|
||||
//
|
||||
//(Lsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
|
||||
// (Int64Make
|
||||
// (Or32 <fe.TypeUInt32()>
|
||||
// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
|
||||
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [32-c])))
|
||||
// (Lsh32x32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c])))
|
||||
// (Or32 <types.UInt32>
|
||||
// (Lsh32x32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [c]))
|
||||
// (Rsh32Ux32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [32-c])))
|
||||
// (Lsh32x32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [c])))
|
||||
//(Rsh64x32 x (Const32 [c])) && c < 32 && c > 0 ->
|
||||
// (Int64Make
|
||||
// (Rsh32x32 <fe.TypeInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
|
||||
// (Or32 <fe.TypeUInt32()>
|
||||
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c]))
|
||||
// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [32-c]))))
|
||||
// (Rsh32x32 <types.Int32> (Int64Hi x) (Const32 <types.UInt32> [c]))
|
||||
// (Or32 <types.UInt32>
|
||||
// (Rsh32Ux32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [c]))
|
||||
// (Lsh32x32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [32-c]))))
|
||||
//(Rsh64Ux32 x (Const32 [c])) && c < 32 && c > 0 ->
|
||||
// (Int64Make
|
||||
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [c]))
|
||||
// (Or32 <fe.TypeUInt32()>
|
||||
// (Rsh32Ux32 <fe.TypeUInt32()> (Int64Lo x) (Const32 <fe.TypeUInt32()> [c]))
|
||||
// (Lsh32x32 <fe.TypeUInt32()> (Int64Hi x) (Const32 <fe.TypeUInt32()> [32-c]))))
|
||||
// (Rsh32Ux32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [c]))
|
||||
// (Or32 <types.UInt32>
|
||||
// (Rsh32Ux32 <types.UInt32> (Int64Lo x) (Const32 <types.UInt32> [c]))
|
||||
// (Lsh32x32 <types.UInt32> (Int64Hi x) (Const32 <types.UInt32> [32-c]))))
|
||||
//
|
||||
//(Lsh64x32 x (Const32 [0])) -> x
|
||||
//(Rsh64x32 x (Const32 [0])) -> x
|
||||
//(Rsh64Ux32 x (Const32 [0])) -> x
|
||||
|
||||
(Const64 <t> [c]) && t.IsSigned() ->
|
||||
(Int64Make (Const32 <fe.TypeInt32()> [c>>32]) (Const32 <fe.TypeUInt32()> [int64(int32(c))]))
|
||||
(Int64Make (Const32 <types.Int32> [c>>32]) (Const32 <types.UInt32> [int64(int32(c))]))
|
||||
(Const64 <t> [c]) && !t.IsSigned() ->
|
||||
(Int64Make (Const32 <fe.TypeUInt32()> [c>>32]) (Const32 <fe.TypeUInt32()> [int64(int32(c))]))
|
||||
(Int64Make (Const32 <types.UInt32> [c>>32]) (Const32 <types.UInt32> [int64(int32(c))]))
|
||||
|
||||
(Eq64 x y) ->
|
||||
(AndB
|
||||
|
|
|
|||
|
|
@ -155,14 +155,14 @@
|
|||
(Mul64 (Const64 [-1]) x) -> (Neg64 x)
|
||||
|
||||
// Convert multiplication by a power of two to a shift.
|
||||
(Mul8 <t> n (Const8 [c])) && isPowerOfTwo(c) -> (Lsh8x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Mul16 <t> n (Const16 [c])) && isPowerOfTwo(c) -> (Lsh16x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Mul32 <t> n (Const32 [c])) && isPowerOfTwo(c) -> (Lsh32x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Mul64 <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Lsh64x64 <t> n (Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg8 (Lsh8x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
|
||||
(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg16 (Lsh16x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
|
||||
(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg32 (Lsh32x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
|
||||
(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg64 (Lsh64x64 <t> n (Const64 <fe.TypeUInt64()> [log2(-c)])))
|
||||
(Mul8 <t> n (Const8 [c])) && isPowerOfTwo(c) -> (Lsh8x64 <t> n (Const64 <types.UInt64> [log2(c)]))
|
||||
(Mul16 <t> n (Const16 [c])) && isPowerOfTwo(c) -> (Lsh16x64 <t> n (Const64 <types.UInt64> [log2(c)]))
|
||||
(Mul32 <t> n (Const32 [c])) && isPowerOfTwo(c) -> (Lsh32x64 <t> n (Const64 <types.UInt64> [log2(c)]))
|
||||
(Mul64 <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Lsh64x64 <t> n (Const64 <types.UInt64> [log2(c)]))
|
||||
(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg8 (Lsh8x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
|
||||
(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg16 (Lsh16x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
|
||||
(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg32 (Lsh32x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
|
||||
(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo(-c) -> (Neg64 (Lsh64x64 <t> n (Const64 <types.UInt64> [log2(-c)])))
|
||||
|
||||
(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 -> (Const8 [int64(int8(c % d))])
|
||||
(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(c % d))])
|
||||
|
|
@ -481,46 +481,46 @@
|
|||
// ((x >> c1) << c2) >> c3
|
||||
(Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
|
||||
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
|
||||
-> (Rsh64Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
|
||||
-> (Rsh64Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
|
||||
(Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
|
||||
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
|
||||
-> (Rsh32Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
|
||||
-> (Rsh32Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
|
||||
(Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
|
||||
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
|
||||
-> (Rsh16Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
|
||||
-> (Rsh16Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
|
||||
(Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
|
||||
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
|
||||
-> (Rsh8Ux64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
|
||||
-> (Rsh8Ux64 x (Const64 <types.UInt64> [c1-c2+c3]))
|
||||
|
||||
// ((x << c1) >> c2) << c3
|
||||
(Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
|
||||
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
|
||||
-> (Lsh64x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
|
||||
-> (Lsh64x64 x (Const64 <types.UInt64> [c1-c2+c3]))
|
||||
(Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
|
||||
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
|
||||
-> (Lsh32x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
|
||||
-> (Lsh32x64 x (Const64 <types.UInt64> [c1-c2+c3]))
|
||||
(Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
|
||||
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
|
||||
-> (Lsh16x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
|
||||
-> (Lsh16x64 x (Const64 <types.UInt64> [c1-c2+c3]))
|
||||
(Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
|
||||
&& uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
|
||||
-> (Lsh8x64 x (Const64 <fe.TypeUInt64()> [c1-c2+c3]))
|
||||
-> (Lsh8x64 x (Const64 <types.UInt64> [c1-c2+c3]))
|
||||
|
||||
// replace shifts with zero extensions
|
||||
(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (ZeroExt8to16 (Trunc16to8 <fe.TypeUInt8()> x))
|
||||
(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (ZeroExt8to32 (Trunc32to8 <fe.TypeUInt8()> x))
|
||||
(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (ZeroExt8to64 (Trunc64to8 <fe.TypeUInt8()> x))
|
||||
(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (ZeroExt16to32 (Trunc32to16 <fe.TypeUInt16()> x))
|
||||
(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (ZeroExt16to64 (Trunc64to16 <fe.TypeUInt16()> x))
|
||||
(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (ZeroExt32to64 (Trunc64to32 <fe.TypeUInt32()> x))
|
||||
(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (ZeroExt8to16 (Trunc16to8 <types.UInt8> x))
|
||||
(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (ZeroExt8to32 (Trunc32to8 <types.UInt8> x))
|
||||
(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (ZeroExt8to64 (Trunc64to8 <types.UInt8> x))
|
||||
(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (ZeroExt16to32 (Trunc32to16 <types.UInt16> x))
|
||||
(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (ZeroExt16to64 (Trunc64to16 <types.UInt16> x))
|
||||
(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (ZeroExt32to64 (Trunc64to32 <types.UInt32> x))
|
||||
|
||||
// replace shifts with sign extensions
|
||||
(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (SignExt8to16 (Trunc16to8 <fe.TypeInt8()> x))
|
||||
(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (SignExt8to32 (Trunc32to8 <fe.TypeInt8()> x))
|
||||
(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (SignExt8to64 (Trunc64to8 <fe.TypeInt8()> x))
|
||||
(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (SignExt16to32 (Trunc32to16 <fe.TypeInt16()> x))
|
||||
(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (SignExt16to64 (Trunc64to16 <fe.TypeInt16()> x))
|
||||
(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (SignExt32to64 (Trunc64to32 <fe.TypeInt32()> x))
|
||||
(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) -> (SignExt8to16 (Trunc16to8 <types.Int8> x))
|
||||
(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) -> (SignExt8to32 (Trunc32to8 <types.Int8> x))
|
||||
(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) -> (SignExt8to64 (Trunc64to8 <types.Int8> x))
|
||||
(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) -> (SignExt16to32 (Trunc32to16 <types.Int16> x))
|
||||
(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) -> (SignExt16to64 (Trunc64to16 <types.Int16> x))
|
||||
(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) -> (SignExt32to64 (Trunc64to32 <types.Int32> x))
|
||||
|
||||
// constant comparisons
|
||||
(Eq64 (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(c == d)])
|
||||
|
|
@ -754,8 +754,8 @@
|
|||
|
||||
// indexing operations
|
||||
// Note: bounds check has already been done
|
||||
(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <fe.TypeInt()> idx (Const32 <fe.TypeInt()> [t.ElemType().Size()])))
|
||||
(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <fe.TypeInt()> idx (Const64 <fe.TypeInt()> [t.ElemType().Size()])))
|
||||
(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <types.Int> idx (Const32 <types.Int> [t.ElemType().Size()])))
|
||||
(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <types.Int> idx (Const64 <types.Int> [t.ElemType().Size()])))
|
||||
|
||||
// struct operations
|
||||
(StructSelect (StructMake1 x)) -> x
|
||||
|
|
@ -862,19 +862,19 @@
|
|||
(StringPtr (StringMake (Const64 <t> [c]) _)) -> (Const64 <t> [c])
|
||||
(StringLen (StringMake _ (Const64 <t> [c]))) -> (Const64 <t> [c])
|
||||
(ConstString {s}) && config.PtrSize == 4 && s.(string) == "" ->
|
||||
(StringMake (ConstNil) (Const32 <fe.TypeInt()> [0]))
|
||||
(StringMake (ConstNil) (Const32 <types.Int> [0]))
|
||||
(ConstString {s}) && config.PtrSize == 8 && s.(string) == "" ->
|
||||
(StringMake (ConstNil) (Const64 <fe.TypeInt()> [0]))
|
||||
(StringMake (ConstNil) (Const64 <types.Int> [0]))
|
||||
(ConstString {s}) && config.PtrSize == 4 && s.(string) != "" ->
|
||||
(StringMake
|
||||
(Addr <fe.TypeBytePtr()> {fe.StringData(s.(string))}
|
||||
(Addr <types.BytePtr> {fe.StringData(s.(string))}
|
||||
(SB))
|
||||
(Const32 <fe.TypeInt()> [int64(len(s.(string)))]))
|
||||
(Const32 <types.Int> [int64(len(s.(string)))]))
|
||||
(ConstString {s}) && config.PtrSize == 8 && s.(string) != "" ->
|
||||
(StringMake
|
||||
(Addr <fe.TypeBytePtr()> {fe.StringData(s.(string))}
|
||||
(Addr <types.BytePtr> {fe.StringData(s.(string))}
|
||||
(SB))
|
||||
(Const64 <fe.TypeInt()> [int64(len(s.(string)))]))
|
||||
(Const64 <types.Int> [int64(len(s.(string)))]))
|
||||
|
||||
// slice ops
|
||||
// Only a few slice rules are provided here. See dec.rules for
|
||||
|
|
@ -890,19 +890,19 @@
|
|||
(ConstSlice) && config.PtrSize == 4 ->
|
||||
(SliceMake
|
||||
(ConstNil <v.Type.ElemType().PtrTo()>)
|
||||
(Const32 <fe.TypeInt()> [0])
|
||||
(Const32 <fe.TypeInt()> [0]))
|
||||
(Const32 <types.Int> [0])
|
||||
(Const32 <types.Int> [0]))
|
||||
(ConstSlice) && config.PtrSize == 8 ->
|
||||
(SliceMake
|
||||
(ConstNil <v.Type.ElemType().PtrTo()>)
|
||||
(Const64 <fe.TypeInt()> [0])
|
||||
(Const64 <fe.TypeInt()> [0]))
|
||||
(Const64 <types.Int> [0])
|
||||
(Const64 <types.Int> [0]))
|
||||
|
||||
// interface ops
|
||||
(ConstInterface) ->
|
||||
(IMake
|
||||
(ConstNil <fe.TypeBytePtr()>)
|
||||
(ConstNil <fe.TypeBytePtr()>))
|
||||
(ConstNil <types.BytePtr>)
|
||||
(ConstNil <types.BytePtr>))
|
||||
|
||||
(NilCheck (GetG mem) mem) -> mem
|
||||
|
||||
|
|
@ -918,29 +918,29 @@
|
|||
// Decompose compound argument values
|
||||
(Arg {n} [off]) && v.Type.IsString() ->
|
||||
(StringMake
|
||||
(Arg <fe.TypeBytePtr()> {n} [off])
|
||||
(Arg <fe.TypeInt()> {n} [off+config.PtrSize]))
|
||||
(Arg <types.BytePtr> {n} [off])
|
||||
(Arg <types.Int> {n} [off+config.PtrSize]))
|
||||
|
||||
(Arg {n} [off]) && v.Type.IsSlice() ->
|
||||
(SliceMake
|
||||
(Arg <v.Type.ElemType().PtrTo()> {n} [off])
|
||||
(Arg <fe.TypeInt()> {n} [off+config.PtrSize])
|
||||
(Arg <fe.TypeInt()> {n} [off+2*config.PtrSize]))
|
||||
(Arg <types.Int> {n} [off+config.PtrSize])
|
||||
(Arg <types.Int> {n} [off+2*config.PtrSize]))
|
||||
|
||||
(Arg {n} [off]) && v.Type.IsInterface() ->
|
||||
(IMake
|
||||
(Arg <fe.TypeBytePtr()> {n} [off])
|
||||
(Arg <fe.TypeBytePtr()> {n} [off+config.PtrSize]))
|
||||
(Arg <types.BytePtr> {n} [off])
|
||||
(Arg <types.BytePtr> {n} [off+config.PtrSize]))
|
||||
|
||||
(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 ->
|
||||
(ComplexMake
|
||||
(Arg <fe.TypeFloat64()> {n} [off])
|
||||
(Arg <fe.TypeFloat64()> {n} [off+8]))
|
||||
(Arg <types.Float64> {n} [off])
|
||||
(Arg <types.Float64> {n} [off+8]))
|
||||
|
||||
(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 ->
|
||||
(ComplexMake
|
||||
(Arg <fe.TypeFloat32()> {n} [off])
|
||||
(Arg <fe.TypeFloat32()> {n} [off+4]))
|
||||
(Arg <types.Float32> {n} [off])
|
||||
(Arg <types.Float32> {n} [off+4]))
|
||||
|
||||
(Arg <t>) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) ->
|
||||
(StructMake0)
|
||||
|
|
@ -972,125 +972,125 @@
|
|||
// See ../magic.go for a detailed description of these algorithms.
|
||||
|
||||
// Unsigned divide by power of 2. Strength reduce to a shift.
|
||||
(Div8u n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xff)]))
|
||||
(Div16u n (Const16 [c])) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xffff)]))
|
||||
(Div32u n (Const32 [c])) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 <fe.TypeUInt64()> [log2(c&0xffffffff)]))
|
||||
(Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Div8u n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (Rsh8Ux64 n (Const64 <types.UInt64> [log2(c&0xff)]))
|
||||
(Div16u n (Const16 [c])) && isPowerOfTwo(c&0xffff) -> (Rsh16Ux64 n (Const64 <types.UInt64> [log2(c&0xffff)]))
|
||||
(Div32u n (Const32 [c])) && isPowerOfTwo(c&0xffffffff) -> (Rsh32Ux64 n (Const64 <types.UInt64> [log2(c&0xffffffff)]))
|
||||
(Div64u n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <types.UInt64> [log2(c)]))
|
||||
|
||||
// Unsigned divide, not a power of 2. Strength reduce to a multiply.
|
||||
// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply.
|
||||
(Div8u x (Const8 [c])) && umagicOK(8, c) ->
|
||||
(Trunc32to8
|
||||
(Rsh32Ux64 <fe.TypeUInt32()>
|
||||
(Mul32 <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(1<<8+umagic(8,c).m)])
|
||||
(Rsh32Ux64 <types.UInt32>
|
||||
(Mul32 <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(1<<8+umagic(8,c).m)])
|
||||
(ZeroExt8to32 x))
|
||||
(Const64 <fe.TypeUInt64()> [8+umagic(8,c).s])))
|
||||
(Const64 <types.UInt64> [8+umagic(8,c).s])))
|
||||
|
||||
// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply.
|
||||
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 8 ->
|
||||
(Trunc64to16
|
||||
(Rsh64Ux64 <fe.TypeUInt64()>
|
||||
(Mul64 <fe.TypeUInt64()>
|
||||
(Const64 <fe.TypeUInt64()> [int64(1<<16+umagic(16,c).m)])
|
||||
(Rsh64Ux64 <types.UInt64>
|
||||
(Mul64 <types.UInt64>
|
||||
(Const64 <types.UInt64> [int64(1<<16+umagic(16,c).m)])
|
||||
(ZeroExt16to64 x))
|
||||
(Const64 <fe.TypeUInt64()> [16+umagic(16,c).s])))
|
||||
(Const64 <types.UInt64> [16+umagic(16,c).s])))
|
||||
|
||||
// For 16-bit divides on 32-bit machines
|
||||
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && umagic(16,c).m&1 == 0 ->
|
||||
(Trunc32to16
|
||||
(Rsh32Ux64 <fe.TypeUInt32()>
|
||||
(Mul32 <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(1<<15+umagic(16,c).m/2)])
|
||||
(Rsh32Ux64 <types.UInt32>
|
||||
(Mul32 <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(1<<15+umagic(16,c).m/2)])
|
||||
(ZeroExt16to32 x))
|
||||
(Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-1])))
|
||||
(Const64 <types.UInt64> [16+umagic(16,c).s-1])))
|
||||
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 && c&1 == 0 ->
|
||||
(Trunc32to16
|
||||
(Rsh32Ux64 <fe.TypeUInt32()>
|
||||
(Mul32 <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(1<<15+(umagic(16,c).m+1)/2)])
|
||||
(Rsh32Ux64 <fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <fe.TypeUInt64()> [1])))
|
||||
(Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-2])))
|
||||
(Rsh32Ux64 <types.UInt32>
|
||||
(Mul32 <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(1<<15+(umagic(16,c).m+1)/2)])
|
||||
(Rsh32Ux64 <types.UInt32> (ZeroExt16to32 x) (Const64 <types.UInt64> [1])))
|
||||
(Const64 <types.UInt64> [16+umagic(16,c).s-2])))
|
||||
(Div16u x (Const16 [c])) && umagicOK(16, c) && config.RegSize == 4 ->
|
||||
(Trunc32to16
|
||||
(Rsh32Ux64 <fe.TypeUInt32()>
|
||||
(Rsh32Ux64 <types.UInt32>
|
||||
(Avg32u
|
||||
(Lsh32x64 <fe.TypeUInt32()> (ZeroExt16to32 x) (Const64 <fe.TypeUInt64()> [16]))
|
||||
(Mul32 <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(umagic(16,c).m)])
|
||||
(Lsh32x64 <types.UInt32> (ZeroExt16to32 x) (Const64 <types.UInt64> [16]))
|
||||
(Mul32 <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(umagic(16,c).m)])
|
||||
(ZeroExt16to32 x)))
|
||||
(Const64 <fe.TypeUInt64()> [16+umagic(16,c).s-1])))
|
||||
(Const64 <types.UInt64> [16+umagic(16,c).s-1])))
|
||||
|
||||
// For 32-bit divides on 32-bit machines
|
||||
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && umagic(32,c).m&1 == 0 ->
|
||||
(Rsh32Ux64 <fe.TypeUInt32()>
|
||||
(Hmul32u <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(int32(1<<31+umagic(32,c).m/2))])
|
||||
(Rsh32Ux64 <types.UInt32>
|
||||
(Hmul32u <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(int32(1<<31+umagic(32,c).m/2))])
|
||||
x)
|
||||
(Const64 <fe.TypeUInt64()> [umagic(32,c).s-1]))
|
||||
(Const64 <types.UInt64> [umagic(32,c).s-1]))
|
||||
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 && c&1 == 0 ->
|
||||
(Rsh32Ux64 <fe.TypeUInt32()>
|
||||
(Hmul32u <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
|
||||
(Rsh32Ux64 <fe.TypeUInt32()> x (Const64 <fe.TypeUInt64()> [1])))
|
||||
(Const64 <fe.TypeUInt64()> [umagic(32,c).s-2]))
|
||||
(Rsh32Ux64 <types.UInt32>
|
||||
(Hmul32u <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(int32(1<<31+(umagic(32,c).m+1)/2))])
|
||||
(Rsh32Ux64 <types.UInt32> x (Const64 <types.UInt64> [1])))
|
||||
(Const64 <types.UInt64> [umagic(32,c).s-2]))
|
||||
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 4 ->
|
||||
(Rsh32Ux64 <fe.TypeUInt32()>
|
||||
(Rsh32Ux64 <types.UInt32>
|
||||
(Avg32u
|
||||
x
|
||||
(Hmul32u <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(int32(umagic(32,c).m))])
|
||||
(Hmul32u <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(int32(umagic(32,c).m))])
|
||||
x))
|
||||
(Const64 <fe.TypeUInt64()> [umagic(32,c).s-1]))
|
||||
(Const64 <types.UInt64> [umagic(32,c).s-1]))
|
||||
|
||||
// For 32-bit divides on 64-bit machines
|
||||
// We'll use a regular (non-hi) multiply for this case.
|
||||
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && umagic(32,c).m&1 == 0 ->
|
||||
(Trunc64to32
|
||||
(Rsh64Ux64 <fe.TypeUInt64()>
|
||||
(Mul64 <fe.TypeUInt64()>
|
||||
(Const64 <fe.TypeUInt64()> [int64(1<<31+umagic(32,c).m/2)])
|
||||
(Rsh64Ux64 <types.UInt64>
|
||||
(Mul64 <types.UInt64>
|
||||
(Const64 <types.UInt64> [int64(1<<31+umagic(32,c).m/2)])
|
||||
(ZeroExt32to64 x))
|
||||
(Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-1])))
|
||||
(Const64 <types.UInt64> [32+umagic(32,c).s-1])))
|
||||
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 && c&1 == 0 ->
|
||||
(Trunc64to32
|
||||
(Rsh64Ux64 <fe.TypeUInt64()>
|
||||
(Mul64 <fe.TypeUInt64()>
|
||||
(Const64 <fe.TypeUInt64()> [int64(1<<31+(umagic(32,c).m+1)/2)])
|
||||
(Rsh64Ux64 <fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <fe.TypeUInt64()> [1])))
|
||||
(Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-2])))
|
||||
(Rsh64Ux64 <types.UInt64>
|
||||
(Mul64 <types.UInt64>
|
||||
(Const64 <types.UInt64> [int64(1<<31+(umagic(32,c).m+1)/2)])
|
||||
(Rsh64Ux64 <types.UInt64> (ZeroExt32to64 x) (Const64 <types.UInt64> [1])))
|
||||
(Const64 <types.UInt64> [32+umagic(32,c).s-2])))
|
||||
(Div32u x (Const32 [c])) && umagicOK(32, c) && config.RegSize == 8 ->
|
||||
(Trunc64to32
|
||||
(Rsh64Ux64 <fe.TypeUInt64()>
|
||||
(Rsh64Ux64 <types.UInt64>
|
||||
(Avg64u
|
||||
(Lsh64x64 <fe.TypeUInt64()> (ZeroExt32to64 x) (Const64 <fe.TypeUInt64()> [32]))
|
||||
(Mul64 <fe.TypeUInt64()>
|
||||
(Const64 <fe.TypeUInt32()> [int64(umagic(32,c).m)])
|
||||
(Lsh64x64 <types.UInt64> (ZeroExt32to64 x) (Const64 <types.UInt64> [32]))
|
||||
(Mul64 <types.UInt64>
|
||||
(Const64 <types.UInt32> [int64(umagic(32,c).m)])
|
||||
(ZeroExt32to64 x)))
|
||||
(Const64 <fe.TypeUInt64()> [32+umagic(32,c).s-1])))
|
||||
(Const64 <types.UInt64> [32+umagic(32,c).s-1])))
|
||||
|
||||
// For 64-bit divides on 64-bit machines
|
||||
// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
|
||||
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && umagic(64,c).m&1 == 0 ->
|
||||
(Rsh64Ux64 <fe.TypeUInt64()>
|
||||
(Hmul64u <fe.TypeUInt64()>
|
||||
(Const64 <fe.TypeUInt64()> [int64(1<<63+umagic(64,c).m/2)])
|
||||
(Rsh64Ux64 <types.UInt64>
|
||||
(Hmul64u <types.UInt64>
|
||||
(Const64 <types.UInt64> [int64(1<<63+umagic(64,c).m/2)])
|
||||
x)
|
||||
(Const64 <fe.TypeUInt64()> [umagic(64,c).s-1]))
|
||||
(Const64 <types.UInt64> [umagic(64,c).s-1]))
|
||||
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 && c&1 == 0 ->
|
||||
(Rsh64Ux64 <fe.TypeUInt64()>
|
||||
(Hmul64u <fe.TypeUInt64()>
|
||||
(Const64 <fe.TypeUInt64()> [int64(1<<63+(umagic(64,c).m+1)/2)])
|
||||
(Rsh64Ux64 <fe.TypeUInt64()> x (Const64 <fe.TypeUInt64()> [1])))
|
||||
(Const64 <fe.TypeUInt64()> [umagic(64,c).s-2]))
|
||||
(Rsh64Ux64 <types.UInt64>
|
||||
(Hmul64u <types.UInt64>
|
||||
(Const64 <types.UInt64> [int64(1<<63+(umagic(64,c).m+1)/2)])
|
||||
(Rsh64Ux64 <types.UInt64> x (Const64 <types.UInt64> [1])))
|
||||
(Const64 <types.UInt64> [umagic(64,c).s-2]))
|
||||
(Div64u x (Const64 [c])) && umagicOK(64, c) && config.RegSize == 8 ->
|
||||
(Rsh64Ux64 <fe.TypeUInt64()>
|
||||
(Rsh64Ux64 <types.UInt64>
|
||||
(Avg64u
|
||||
x
|
||||
(Hmul64u <fe.TypeUInt64()>
|
||||
(Const64 <fe.TypeUInt64()> [int64(umagic(64,c).m)])
|
||||
(Hmul64u <types.UInt64>
|
||||
(Const64 <types.UInt64> [int64(umagic(64,c).m)])
|
||||
x))
|
||||
(Const64 <fe.TypeUInt64()> [umagic(64,c).s-1]))
|
||||
(Const64 <types.UInt64> [umagic(64,c).s-1]))
|
||||
|
||||
// Signed divide by a negative constant. Rewrite to divide by a positive constant.
|
||||
(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 -> (Neg8 (Div8 <t> n (Const8 <t> [-c])))
|
||||
|
|
@ -1101,10 +1101,10 @@
|
|||
// Dividing by the most-negative number. Result is always 0 except
|
||||
// if the input is also the most-negative number.
|
||||
// We can detect that using the sign bit of x & -x.
|
||||
(Div8 <t> x (Const8 [-1<<7 ])) -> (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <fe.TypeUInt64()> [7 ]))
|
||||
(Div16 <t> x (Const16 [-1<<15])) -> (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <fe.TypeUInt64()> [15]))
|
||||
(Div32 <t> x (Const32 [-1<<31])) -> (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <fe.TypeUInt64()> [31]))
|
||||
(Div64 <t> x (Const64 [-1<<63])) -> (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <fe.TypeUInt64()> [63]))
|
||||
(Div8 <t> x (Const8 [-1<<7 ])) -> (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <types.UInt64> [7 ]))
|
||||
(Div16 <t> x (Const16 [-1<<15])) -> (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <types.UInt64> [15]))
|
||||
(Div32 <t> x (Const32 [-1<<31])) -> (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <types.UInt64> [31]))
|
||||
(Div64 <t> x (Const64 [-1<<63])) -> (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <types.UInt64> [63]))
|
||||
|
||||
// Signed divide by power of 2.
|
||||
// n / c = n >> log(c) if n >= 0
|
||||
|
|
@ -1112,96 +1112,96 @@
|
|||
// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned).
|
||||
(Div8 <t> n (Const8 [c])) && isPowerOfTwo(c) ->
|
||||
(Rsh8x64
|
||||
(Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <fe.TypeUInt64()> [ 7])) (Const64 <fe.TypeUInt64()> [ 8-log2(c)])))
|
||||
(Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <types.UInt64> [ 7])) (Const64 <types.UInt64> [ 8-log2(c)])))
|
||||
(Const64 <types.UInt64> [log2(c)]))
|
||||
(Div16 <t> n (Const16 [c])) && isPowerOfTwo(c) ->
|
||||
(Rsh16x64
|
||||
(Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <fe.TypeUInt64()> [15])) (Const64 <fe.TypeUInt64()> [16-log2(c)])))
|
||||
(Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <types.UInt64> [15])) (Const64 <types.UInt64> [16-log2(c)])))
|
||||
(Const64 <types.UInt64> [log2(c)]))
|
||||
(Div32 <t> n (Const32 [c])) && isPowerOfTwo(c) ->
|
||||
(Rsh32x64
|
||||
(Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <fe.TypeUInt64()> [31])) (Const64 <fe.TypeUInt64()> [32-log2(c)])))
|
||||
(Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <types.UInt64> [31])) (Const64 <types.UInt64> [32-log2(c)])))
|
||||
(Const64 <types.UInt64> [log2(c)]))
|
||||
(Div64 <t> n (Const64 [c])) && isPowerOfTwo(c) ->
|
||||
(Rsh64x64
|
||||
(Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <fe.TypeUInt64()> [63])) (Const64 <fe.TypeUInt64()> [64-log2(c)])))
|
||||
(Const64 <fe.TypeUInt64()> [log2(c)]))
|
||||
(Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <types.UInt64> [63])) (Const64 <types.UInt64> [64-log2(c)])))
|
||||
(Const64 <types.UInt64> [log2(c)]))
|
||||
|
||||
// Signed divide, not a power of 2. Strength reduce to a multiply.
|
||||
(Div8 <t> x (Const8 [c])) && smagicOK(8,c) ->
|
||||
(Sub8 <t>
|
||||
(Rsh32x64 <t>
|
||||
(Mul32 <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(smagic(8,c).m)])
|
||||
(Mul32 <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(smagic(8,c).m)])
|
||||
(SignExt8to32 x))
|
||||
(Const64 <fe.TypeUInt64()> [8+smagic(8,c).s]))
|
||||
(Const64 <types.UInt64> [8+smagic(8,c).s]))
|
||||
(Rsh32x64 <t>
|
||||
(SignExt8to32 x)
|
||||
(Const64 <fe.TypeUInt64()> [31])))
|
||||
(Const64 <types.UInt64> [31])))
|
||||
(Div16 <t> x (Const16 [c])) && smagicOK(16,c) ->
|
||||
(Sub16 <t>
|
||||
(Rsh32x64 <t>
|
||||
(Mul32 <fe.TypeUInt32()>
|
||||
(Const32 <fe.TypeUInt32()> [int64(smagic(16,c).m)])
|
||||
(Mul32 <types.UInt32>
|
||||
(Const32 <types.UInt32> [int64(smagic(16,c).m)])
|
||||
(SignExt16to32 x))
|
||||
(Const64 <fe.TypeUInt64()> [16+smagic(16,c).s]))
|
||||
(Const64 <types.UInt64> [16+smagic(16,c).s]))
|
||||
(Rsh32x64 <t>
|
||||
(SignExt16to32 x)
|
||||
(Const64 <fe.TypeUInt64()> [31])))
|
||||
(Const64 <types.UInt64> [31])))
|
||||
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 8 ->
|
||||
(Sub32 <t>
|
||||
(Rsh64x64 <t>
|
||||
(Mul64 <fe.TypeUInt64()>
|
||||
(Const64 <fe.TypeUInt64()> [int64(smagic(32,c).m)])
|
||||
(Mul64 <types.UInt64>
|
||||
(Const64 <types.UInt64> [int64(smagic(32,c).m)])
|
||||
(SignExt32to64 x))
|
||||
(Const64 <fe.TypeUInt64()> [32+smagic(32,c).s]))
|
||||
(Const64 <types.UInt64> [32+smagic(32,c).s]))
|
||||
(Rsh64x64 <t>
|
||||
(SignExt32to64 x)
|
||||
(Const64 <fe.TypeUInt64()> [63])))
|
||||
(Const64 <types.UInt64> [63])))
|
||||
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 == 0 ->
|
||||
(Sub32 <t>
|
||||
(Rsh32x64 <t>
|
||||
(Hmul32 <t>
|
||||
(Const32 <fe.TypeUInt32()> [int64(int32(smagic(32,c).m/2))])
|
||||
(Const32 <types.UInt32> [int64(int32(smagic(32,c).m/2))])
|
||||
x)
|
||||
(Const64 <fe.TypeUInt64()> [smagic(32,c).s-1]))
|
||||
(Const64 <types.UInt64> [smagic(32,c).s-1]))
|
||||
(Rsh32x64 <t>
|
||||
x
|
||||
(Const64 <fe.TypeUInt64()> [31])))
|
||||
(Const64 <types.UInt64> [31])))
|
||||
(Div32 <t> x (Const32 [c])) && smagicOK(32,c) && config.RegSize == 4 && smagic(32,c).m&1 != 0 ->
|
||||
(Sub32 <t>
|
||||
(Rsh32x64 <t>
|
||||
(Add32 <t>
|
||||
(Hmul32 <t>
|
||||
(Const32 <fe.TypeUInt32()> [int64(int32(smagic(32,c).m))])
|
||||
(Const32 <types.UInt32> [int64(int32(smagic(32,c).m))])
|
||||
x)
|
||||
x)
|
||||
(Const64 <fe.TypeUInt64()> [smagic(32,c).s]))
|
||||
(Const64 <types.UInt64> [smagic(32,c).s]))
|
||||
(Rsh32x64 <t>
|
||||
x
|
||||
(Const64 <fe.TypeUInt64()> [31])))
|
||||
(Const64 <types.UInt64> [31])))
|
||||
(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 == 0 ->
|
||||
(Sub64 <t>
|
||||
(Rsh64x64 <t>
|
||||
(Hmul64 <t>
|
||||
(Const64 <fe.TypeUInt64()> [int64(smagic(64,c).m/2)])
|
||||
(Const64 <types.UInt64> [int64(smagic(64,c).m/2)])
|
||||
x)
|
||||
(Const64 <fe.TypeUInt64()> [smagic(64,c).s-1]))
|
||||
(Const64 <types.UInt64> [smagic(64,c).s-1]))
|
||||
(Rsh64x64 <t>
|
||||
x
|
||||
(Const64 <fe.TypeUInt64()> [63])))
|
||||
(Const64 <types.UInt64> [63])))
|
||||
(Div64 <t> x (Const64 [c])) && smagicOK(64,c) && smagic(64,c).m&1 != 0 ->
|
||||
(Sub64 <t>
|
||||
(Rsh64x64 <t>
|
||||
(Add64 <t>
|
||||
(Hmul64 <t>
|
||||
(Const64 <fe.TypeUInt64()> [int64(smagic(64,c).m)])
|
||||
(Const64 <types.UInt64> [int64(smagic(64,c).m)])
|
||||
x)
|
||||
x)
|
||||
(Const64 <fe.TypeUInt64()> [smagic(64,c).s]))
|
||||
(Const64 <types.UInt64> [smagic(64,c).s]))
|
||||
(Rsh64x64 <t>
|
||||
x
|
||||
(Const64 <fe.TypeUInt64()> [63])))
|
||||
(Const64 <types.UInt64> [63])))
|
||||
|
||||
// Unsigned mod by power of 2 constant.
|
||||
(Mod8u <t> n (Const8 [c])) && isPowerOfTwo(c&0xff) -> (And8 n (Const8 <t> [(c&0xff)-1]))
|
||||
|
|
|
|||
|
|
@ -204,12 +204,13 @@ func genRules(arch arch) {
|
|||
}
|
||||
|
||||
body := buf.String()
|
||||
// Do a rough match to predict whether we need b, config, and/or fe.
|
||||
// Do a rough match to predict whether we need b, config, fe, and/or types.
|
||||
// It's not precise--thus the blank assignments--but it's good enough
|
||||
// to avoid generating needless code and doing pointless nil checks.
|
||||
hasb := strings.Contains(body, "b.")
|
||||
hasconfig := strings.Contains(body, "config.") || strings.Contains(body, "config)")
|
||||
hasfe := strings.Contains(body, "fe.")
|
||||
hasts := strings.Contains(body, "types.")
|
||||
fmt.Fprintf(w, "func rewriteValue%s_%s(v *Value) bool {\n", arch.name, op)
|
||||
if hasb || hasconfig || hasfe {
|
||||
fmt.Fprintln(w, "b := v.Block")
|
||||
|
|
@ -223,6 +224,10 @@ func genRules(arch arch) {
|
|||
fmt.Fprintln(w, "fe := b.Func.fe")
|
||||
fmt.Fprintln(w, "_ = fe")
|
||||
}
|
||||
if hasts {
|
||||
fmt.Fprintln(w, "types := &b.Func.Config.Types")
|
||||
fmt.Fprintln(w, "_ = types")
|
||||
}
|
||||
fmt.Fprint(w, body)
|
||||
fmt.Fprintf(w, "}\n")
|
||||
}
|
||||
|
|
@ -234,6 +239,8 @@ func genRules(arch arch) {
|
|||
fmt.Fprintln(w, "_ = config")
|
||||
fmt.Fprintln(w, "fe := b.Func.fe")
|
||||
fmt.Fprintln(w, "_ = fe")
|
||||
fmt.Fprintln(w, "types := &config.Types")
|
||||
fmt.Fprintln(w, "_ = types")
|
||||
fmt.Fprintf(w, "switch b.Kind {\n")
|
||||
ops = nil
|
||||
for op := range blockrules {
|
||||
|
|
@ -719,7 +726,7 @@ func typeName(typ string) string {
|
|||
case "Flags", "Mem", "Void", "Int128":
|
||||
return "Type" + typ
|
||||
default:
|
||||
return "fe.Type" + typ + "()"
|
||||
return "types." + typ
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -197,7 +197,8 @@ func insertLoopReschedChecks(f *Func) {
|
|||
// if sp < g.limit { goto sched }
|
||||
// goto header
|
||||
|
||||
pt := f.fe.TypeUintptr()
|
||||
types := &f.Config.Types
|
||||
pt := types.Uintptr
|
||||
g := test.NewValue1(bb.Pos, OpGetG, pt, mem0)
|
||||
sp := test.NewValue0(bb.Pos, OpSP, pt)
|
||||
cmpOp := OpLess64U
|
||||
|
|
@ -206,7 +207,7 @@ func insertLoopReschedChecks(f *Func) {
|
|||
}
|
||||
limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.Size(), g)
|
||||
lim := test.NewValue2(bb.Pos, OpLoad, pt, limaddr, mem0)
|
||||
cmp := test.NewValue2(bb.Pos, cmpOp, f.fe.TypeBool(), sp, lim)
|
||||
cmp := test.NewValue2(bb.Pos, cmpOp, types.Bool, sp, lim)
|
||||
test.SetControl(cmp)
|
||||
|
||||
// if true, goto sched
|
||||
|
|
|
|||
|
|
@ -2055,10 +2055,11 @@ func (e *edgeState) erase(loc Location) {
|
|||
func (e *edgeState) findRegFor(typ Type) Location {
|
||||
// Which registers are possibilities.
|
||||
var m regMask
|
||||
types := &e.s.f.Config.Types
|
||||
if typ.IsFloat() {
|
||||
m = e.s.compatRegs(e.s.f.fe.TypeFloat64())
|
||||
m = e.s.compatRegs(types.Float64)
|
||||
} else {
|
||||
m = e.s.compatRegs(e.s.f.fe.TypeInt64())
|
||||
m = e.s.compatRegs(types.Int64)
|
||||
}
|
||||
|
||||
// Pick a register. In priority order:
|
||||
|
|
@ -2082,7 +2083,7 @@ func (e *edgeState) findRegFor(typ Type) Location {
|
|||
// No register is available. Allocate a temp location to spill a register to.
|
||||
// The type of the slot is immaterial - it will not be live across
|
||||
// any safepoint. Just use a type big enough to hold any register.
|
||||
typ = e.s.f.fe.TypeInt64()
|
||||
typ = types.Int64
|
||||
t := LocalSlot{e.s.f.fe.Auto(typ), typ, 0}
|
||||
// TODO: reuse these slots.
|
||||
|
||||
|
|
|
|||
|
|
@ -4354,8 +4354,8 @@ func rewriteValue386_Op386MOVSDconst(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (MOVSDconst [c])
|
||||
// cond: config.ctxt.Flag_shared
|
||||
// result: (MOVSDconst2 (MOVSDconst1 [c]))
|
||||
|
|
@ -4365,7 +4365,7 @@ func rewriteValue386_Op386MOVSDconst(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(Op386MOVSDconst2)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, types.UInt32)
|
||||
v0.AuxInt = c
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
|
|
@ -4843,8 +4843,8 @@ func rewriteValue386_Op386MOVSSconst(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (MOVSSconst [c])
|
||||
// cond: config.ctxt.Flag_shared
|
||||
// result: (MOVSSconst2 (MOVSSconst1 [c]))
|
||||
|
|
@ -4854,7 +4854,7 @@ func rewriteValue386_Op386MOVSSconst(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(Op386MOVSSconst2)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, types.UInt32)
|
||||
v0.AuxInt = c
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
|
|
@ -7212,8 +7212,8 @@ func rewriteValue386_Op386NOTL(v *Value) bool {
|
|||
func rewriteValue386_Op386ORL(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (ORL x (MOVLconst [c]))
|
||||
// cond:
|
||||
// result: (ORLconst [c] x)
|
||||
|
|
@ -7479,7 +7479,7 @@ func rewriteValue386_Op386ORL(v *Value) bool {
|
|||
break
|
||||
}
|
||||
b = mergePoint(b, x0, x1)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVWload, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
|
||||
v.reset(OpCopy)
|
||||
v.AddArg(v0)
|
||||
v0.AuxInt = i
|
||||
|
|
@ -7554,7 +7554,7 @@ func rewriteValue386_Op386ORL(v *Value) bool {
|
|||
break
|
||||
}
|
||||
b = mergePoint(b, x0, x1, x2)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v.reset(OpCopy)
|
||||
v.AddArg(v0)
|
||||
v0.AuxInt = i
|
||||
|
|
@ -9890,8 +9890,8 @@ func rewriteValue386_OpDiv64F(v *Value) bool {
|
|||
func rewriteValue386_OpDiv8(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div8 x y)
|
||||
// cond:
|
||||
// result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
|
||||
|
|
@ -9899,10 +9899,10 @@ func rewriteValue386_OpDiv8(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(Op386DIVW)
|
||||
v0 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
|
||||
v1.AddArg(y)
|
||||
v.AddArg(v1)
|
||||
return true
|
||||
|
|
@ -9911,8 +9911,8 @@ func rewriteValue386_OpDiv8(v *Value) bool {
|
|||
func rewriteValue386_OpDiv8u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div8u x y)
|
||||
// cond:
|
||||
// result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
|
||||
|
|
@ -9920,10 +9920,10 @@ func rewriteValue386_OpDiv8u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(Op386DIVWU)
|
||||
v0 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
|
||||
v1.AddArg(y)
|
||||
v.AddArg(v1)
|
||||
return true
|
||||
|
|
@ -11163,8 +11163,8 @@ func rewriteValue386_OpMod32u(v *Value) bool {
|
|||
func rewriteValue386_OpMod8(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod8 x y)
|
||||
// cond:
|
||||
// result: (MODW (SignExt8to16 x) (SignExt8to16 y))
|
||||
|
|
@ -11172,10 +11172,10 @@ func rewriteValue386_OpMod8(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(Op386MODW)
|
||||
v0 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
|
||||
v1.AddArg(y)
|
||||
v.AddArg(v1)
|
||||
return true
|
||||
|
|
@ -11184,8 +11184,8 @@ func rewriteValue386_OpMod8(v *Value) bool {
|
|||
func rewriteValue386_OpMod8u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod8u x y)
|
||||
// cond:
|
||||
// result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
|
||||
|
|
@ -11193,10 +11193,10 @@ func rewriteValue386_OpMod8u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(Op386MODWU)
|
||||
v0 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
|
||||
v1.AddArg(y)
|
||||
v.AddArg(v1)
|
||||
return true
|
||||
|
|
@ -11207,8 +11207,8 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Move [0] _ _ mem)
|
||||
// cond:
|
||||
// result: mem
|
||||
|
|
@ -11234,7 +11234,7 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
mem := v.Args[2]
|
||||
v.reset(Op386MOVBstore)
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVBload, fe.TypeUInt8())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVBload, types.UInt8)
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -11253,7 +11253,7 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
mem := v.Args[2]
|
||||
v.reset(Op386MOVWstore)
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVWload, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -11272,7 +11272,7 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
mem := v.Args[2]
|
||||
v.reset(Op386MOVLstore)
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -11292,14 +11292,14 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
v.reset(Op386MOVBstore)
|
||||
v.AuxInt = 2
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVBload, fe.TypeUInt8())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVBload, types.UInt8)
|
||||
v0.AuxInt = 2
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, Op386MOVWstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVWload, fe.TypeUInt16())
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -11320,14 +11320,14 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
v.reset(Op386MOVBstore)
|
||||
v.AuxInt = 4
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVBload, fe.TypeUInt8())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVBload, types.UInt8)
|
||||
v0.AuxInt = 4
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -11348,14 +11348,14 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
v.reset(Op386MOVWstore)
|
||||
v.AuxInt = 4
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVWload, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVWload, types.UInt16)
|
||||
v0.AuxInt = 4
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -11376,14 +11376,14 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
v.reset(Op386MOVLstore)
|
||||
v.AuxInt = 3
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v0.AuxInt = 3
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -11404,14 +11404,14 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
v.reset(Op386MOVLstore)
|
||||
v.AuxInt = 4
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v0.AuxInt = 4
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -11442,7 +11442,7 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
v.AddArg(v1)
|
||||
v2 := b.NewValue0(v.Pos, Op386MOVLstore, TypeMem)
|
||||
v2.AddArg(dst)
|
||||
v3 := b.NewValue0(v.Pos, Op386MOVLload, fe.TypeUInt32())
|
||||
v3 := b.NewValue0(v.Pos, Op386MOVLload, types.UInt32)
|
||||
v3.AddArg(src)
|
||||
v3.AddArg(mem)
|
||||
v2.AddArg(v3)
|
||||
|
|
@ -11482,7 +11482,7 @@ func rewriteValue386_OpMove(v *Value) bool {
|
|||
v.reset(Op386REPMOVSL)
|
||||
v.AddArg(dst)
|
||||
v.AddArg(src)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLconst, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLconst, types.UInt32)
|
||||
v0.AuxInt = s / 4
|
||||
v.AddArg(v0)
|
||||
v.AddArg(mem)
|
||||
|
|
@ -11595,11 +11595,11 @@ func rewriteValue386_OpNeg32F(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Neg32F x)
|
||||
// cond: !config.use387
|
||||
// result: (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
|
||||
// result: (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
|
||||
for {
|
||||
x := v.Args[0]
|
||||
if !(!config.use387) {
|
||||
|
|
@ -11607,7 +11607,7 @@ func rewriteValue386_OpNeg32F(v *Value) bool {
|
|||
}
|
||||
v.reset(Op386PXOR)
|
||||
v.AddArg(x)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVSSconst, fe.TypeFloat32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVSSconst, types.Float32)
|
||||
v0.AuxInt = f2i(math.Copysign(0, -1))
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
|
|
@ -11631,11 +11631,11 @@ func rewriteValue386_OpNeg64F(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Neg64F x)
|
||||
// cond: !config.use387
|
||||
// result: (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
|
||||
// result: (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
|
||||
for {
|
||||
x := v.Args[0]
|
||||
if !(!config.use387) {
|
||||
|
|
@ -11643,7 +11643,7 @@ func rewriteValue386_OpNeg64F(v *Value) bool {
|
|||
}
|
||||
v.reset(Op386PXOR)
|
||||
v.AddArg(x)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVSDconst, fe.TypeFloat64())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVSDconst, types.Float64)
|
||||
v0.AuxInt = f2i(math.Copysign(0, -1))
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
|
|
@ -12955,8 +12955,8 @@ func rewriteValue386_OpZero(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Zero [0] _ mem)
|
||||
// cond:
|
||||
// result: mem
|
||||
|
|
@ -13103,7 +13103,7 @@ func rewriteValue386_OpZero(v *Value) bool {
|
|||
}
|
||||
v.reset(OpZero)
|
||||
v.AuxInt = s - s%4
|
||||
v0 := b.NewValue0(v.Pos, Op386ADDLconst, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386ADDLconst, types.UInt32)
|
||||
v0.AuxInt = s % 4
|
||||
v0.AddArg(destptr)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -13196,7 +13196,7 @@ func rewriteValue386_OpZero(v *Value) bool {
|
|||
v.reset(Op386DUFFZERO)
|
||||
v.AuxInt = 1 * (128 - s/4)
|
||||
v.AddArg(destptr)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLconst, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLconst, types.UInt32)
|
||||
v0.AuxInt = 0
|
||||
v.AddArg(v0)
|
||||
v.AddArg(mem)
|
||||
|
|
@ -13214,10 +13214,10 @@ func rewriteValue386_OpZero(v *Value) bool {
|
|||
}
|
||||
v.reset(Op386REPSTOSL)
|
||||
v.AddArg(destptr)
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLconst, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, Op386MOVLconst, types.UInt32)
|
||||
v0.AuxInt = s / 4
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, Op386MOVLconst, fe.TypeUInt32())
|
||||
v1 := b.NewValue0(v.Pos, Op386MOVLconst, types.UInt32)
|
||||
v1.AuxInt = 0
|
||||
v.AddArg(v1)
|
||||
v.AddArg(mem)
|
||||
|
|
@ -13283,6 +13283,8 @@ func rewriteBlock386(b *Block) bool {
|
|||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &config.Types
|
||||
_ = types
|
||||
switch b.Kind {
|
||||
case Block386EQ:
|
||||
// match: (EQ (InvertFlags cmp) yes no)
|
||||
|
|
|
|||
|
|
@ -6786,8 +6786,8 @@ func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
|
||||
// cond: ValAndOff(sc).canAdd(off)
|
||||
// result: (MOVLstoreconst [ValAndOff(sc).add(off)] {s} ptr mem)
|
||||
|
|
@ -6934,7 +6934,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
|
|||
v.AuxInt = ValAndOff(a).Off()
|
||||
v.Aux = s
|
||||
v.AddArg(p)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
|
||||
v.AddArg(v0)
|
||||
v.AddArg(mem)
|
||||
|
|
@ -6992,8 +6992,8 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (MOVLstoreconstidx1 [c] {sym} ptr (SHLQconst [2] idx) mem)
|
||||
// cond:
|
||||
// result: (MOVLstoreconstidx4 [c] {sym} ptr idx mem)
|
||||
|
|
@ -7093,7 +7093,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool {
|
|||
v.Aux = s
|
||||
v.AddArg(p)
|
||||
v.AddArg(i)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v0.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
|
||||
v.AddArg(v0)
|
||||
v.AddArg(mem)
|
||||
|
|
@ -7104,8 +7104,8 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem)
|
||||
// cond:
|
||||
// result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem)
|
||||
|
|
@ -7184,7 +7184,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4(v *Value) bool {
|
|||
v0.AuxInt = 2
|
||||
v0.AddArg(i)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v1.AuxInt = ValAndOff(a).Val()&0xffffffff | ValAndOff(c).Val()<<32
|
||||
v.AddArg(v1)
|
||||
v.AddArg(mem)
|
||||
|
|
@ -12037,8 +12037,8 @@ func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (ORL x (MOVLconst [c]))
|
||||
// cond:
|
||||
// result: (ORLconst [c] x)
|
||||
|
|
@ -12304,7 +12304,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
|
|||
break
|
||||
}
|
||||
b = mergePoint(b, x0, x1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
|
||||
v.reset(OpCopy)
|
||||
v.AddArg(v0)
|
||||
v0.AuxInt = i
|
||||
|
|
@ -12379,7 +12379,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
|
|||
break
|
||||
}
|
||||
b = mergePoint(b, x0, x1, x2)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
|
||||
v.reset(OpCopy)
|
||||
v.AddArg(v0)
|
||||
v0.AuxInt = i
|
||||
|
|
@ -12567,7 +12567,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
|
|||
v.reset(OpCopy)
|
||||
v.AddArg(v0)
|
||||
v0.AuxInt = 8
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
|
||||
v1.AuxInt = i - 1
|
||||
v1.Aux = s
|
||||
v1.AddArg(p)
|
||||
|
|
@ -12707,7 +12707,7 @@ func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
|
|||
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPL, v.Type)
|
||||
v.reset(OpCopy)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
|
||||
v1.AuxInt = i1 - 2
|
||||
v1.Aux = s
|
||||
v1.AddArg(p)
|
||||
|
|
@ -12903,8 +12903,8 @@ func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (ORQ x (MOVQconst [c]))
|
||||
// cond: is32Bit(c)
|
||||
// result: (ORQconst [c] x)
|
||||
|
|
@ -13222,7 +13222,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
|
|||
break
|
||||
}
|
||||
b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
|
||||
v.reset(OpCopy)
|
||||
v.AddArg(v0)
|
||||
v0.AuxInt = i
|
||||
|
|
@ -13668,7 +13668,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
|
|||
v0 := b.NewValue0(v.Pos, OpAMD64BSWAPQ, v.Type)
|
||||
v.reset(OpCopy)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
|
||||
v1.AuxInt = i - 7
|
||||
v1.Aux = s
|
||||
v1.AddArg(p)
|
||||
|
|
@ -17355,8 +17355,8 @@ func rewriteValueAMD64_OpAndB(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (AtomicAdd32 ptr val mem)
|
||||
// cond:
|
||||
// result: (AddTupleFirst32 (XADDLlock val ptr mem) val)
|
||||
|
|
@ -17365,7 +17365,7 @@ func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
|
|||
val := v.Args[1]
|
||||
mem := v.Args[2]
|
||||
v.reset(OpAMD64AddTupleFirst32)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(fe.TypeUInt32(), TypeMem))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, MakeTuple(types.UInt32, TypeMem))
|
||||
v0.AddArg(val)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
|
|
@ -17377,8 +17377,8 @@ func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (AtomicAdd64 ptr val mem)
|
||||
// cond:
|
||||
// result: (AddTupleFirst64 (XADDQlock val ptr mem) val)
|
||||
|
|
@ -17387,7 +17387,7 @@ func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
|
|||
val := v.Args[1]
|
||||
mem := v.Args[2]
|
||||
v.reset(OpAMD64AddTupleFirst64)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(fe.TypeUInt64(), TypeMem))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, MakeTuple(types.UInt64, TypeMem))
|
||||
v0.AddArg(val)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
|
|
@ -17554,17 +17554,17 @@ func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (AtomicStore32 ptr val mem)
|
||||
// cond:
|
||||
// result: (Select1 (XCHGL <MakeTuple(fe.TypeUInt32(),TypeMem)> val ptr mem))
|
||||
// result: (Select1 (XCHGL <MakeTuple(types.UInt32,TypeMem)> val ptr mem))
|
||||
for {
|
||||
ptr := v.Args[0]
|
||||
val := v.Args[1]
|
||||
mem := v.Args[2]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(fe.TypeUInt32(), TypeMem))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(types.UInt32, TypeMem))
|
||||
v0.AddArg(val)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
|
|
@ -17575,17 +17575,17 @@ func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
|
|||
func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (AtomicStore64 ptr val mem)
|
||||
// cond:
|
||||
// result: (Select1 (XCHGQ <MakeTuple(fe.TypeUInt64(),TypeMem)> val ptr mem))
|
||||
// result: (Select1 (XCHGQ <MakeTuple(types.UInt64,TypeMem)> val ptr mem))
|
||||
for {
|
||||
ptr := v.Args[0]
|
||||
val := v.Args[1]
|
||||
mem := v.Args[2]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(fe.TypeUInt64(), TypeMem))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(types.UInt64, TypeMem))
|
||||
v0.AddArg(val)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
|
|
@ -17598,11 +17598,11 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (AtomicStorePtrNoWB ptr val mem)
|
||||
// cond: config.PtrSize == 8
|
||||
// result: (Select1 (XCHGQ <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
|
||||
// result: (Select1 (XCHGQ <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
|
||||
for {
|
||||
ptr := v.Args[0]
|
||||
val := v.Args[1]
|
||||
|
|
@ -17611,7 +17611,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(fe.TypeBytePtr(), TypeMem))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, MakeTuple(types.BytePtr, TypeMem))
|
||||
v0.AddArg(val)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
|
|
@ -17620,7 +17620,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
|
|||
}
|
||||
// match: (AtomicStorePtrNoWB ptr val mem)
|
||||
// cond: config.PtrSize == 4
|
||||
// result: (Select1 (XCHGL <MakeTuple(fe.TypeBytePtr(),TypeMem)> val ptr mem))
|
||||
// result: (Select1 (XCHGL <MakeTuple(types.BytePtr,TypeMem)> val ptr mem))
|
||||
for {
|
||||
ptr := v.Args[0]
|
||||
val := v.Args[1]
|
||||
|
|
@ -17629,7 +17629,7 @@ func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(fe.TypeBytePtr(), TypeMem))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, MakeTuple(types.BytePtr, TypeMem))
|
||||
v0.AddArg(val)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
|
|
@ -17654,15 +17654,15 @@ func rewriteValueAMD64_OpAvg64u(v *Value) bool {
|
|||
func rewriteValueAMD64_OpBitLen32(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (BitLen32 x)
|
||||
// cond:
|
||||
// result: (BitLen64 (MOVLQZX <fe.TypeUInt64()> x))
|
||||
// result: (BitLen64 (MOVLQZX <types.UInt64> x))
|
||||
for {
|
||||
x := v.Args[0]
|
||||
v.reset(OpBitLen64)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, types.UInt64)
|
||||
v0.AddArg(x)
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
|
|
@ -17671,8 +17671,8 @@ func rewriteValueAMD64_OpBitLen32(v *Value) bool {
|
|||
func rewriteValueAMD64_OpBitLen64(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (BitLen64 <t> x)
|
||||
// cond:
|
||||
// result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <TypeFlags> (BSRQ x))))
|
||||
|
|
@ -17683,7 +17683,7 @@ func rewriteValueAMD64_OpBitLen64(v *Value) bool {
|
|||
v.AuxInt = 1
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
|
||||
v1 := b.NewValue0(v.Pos, OpSelect0, t)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(types.UInt64, TypeFlags))
|
||||
v2.AddArg(x)
|
||||
v1.AddArg(v2)
|
||||
v0.AddArg(v1)
|
||||
|
|
@ -17691,7 +17691,7 @@ func rewriteValueAMD64_OpBitLen64(v *Value) bool {
|
|||
v3.AuxInt = -1
|
||||
v0.AddArg(v3)
|
||||
v4 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
|
||||
v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
|
||||
v5 := b.NewValue0(v.Pos, OpAMD64BSRQ, MakeTuple(types.UInt64, TypeFlags))
|
||||
v5.AddArg(x)
|
||||
v4.AddArg(v5)
|
||||
v0.AddArg(v4)
|
||||
|
|
@ -17930,17 +17930,17 @@ func rewriteValueAMD64_OpConvert(v *Value) bool {
|
|||
func rewriteValueAMD64_OpCtz32(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Ctz32 x)
|
||||
// cond:
|
||||
// result: (Select0 (BSFQ (ORQ <fe.TypeUInt64()> (MOVQconst [1<<32]) x)))
|
||||
// result: (Select0 (BSFQ (ORQ <types.UInt64> (MOVQconst [1<<32]) x)))
|
||||
for {
|
||||
x := v.Args[0]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64ORQ, fe.TypeUInt64())
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags))
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64ORQ, types.UInt64)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v2.AuxInt = 1 << 32
|
||||
v1.AddArg(v2)
|
||||
v1.AddArg(x)
|
||||
|
|
@ -17952,8 +17952,8 @@ func rewriteValueAMD64_OpCtz32(v *Value) bool {
|
|||
func rewriteValueAMD64_OpCtz64(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Ctz64 <t> x)
|
||||
// cond:
|
||||
// result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <TypeFlags> (BSFQ x)))
|
||||
|
|
@ -17962,7 +17962,7 @@ func rewriteValueAMD64_OpCtz64(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
v.reset(OpAMD64CMOVQEQ)
|
||||
v0 := b.NewValue0(v.Pos, OpSelect0, t)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags))
|
||||
v1.AddArg(x)
|
||||
v0.AddArg(v1)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -17970,7 +17970,7 @@ func rewriteValueAMD64_OpCtz64(v *Value) bool {
|
|||
v2.AuxInt = 64
|
||||
v.AddArg(v2)
|
||||
v3 := b.NewValue0(v.Pos, OpSelect1, TypeFlags)
|
||||
v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(fe.TypeUInt64(), TypeFlags))
|
||||
v4 := b.NewValue0(v.Pos, OpAMD64BSFQ, MakeTuple(types.UInt64, TypeFlags))
|
||||
v4.AddArg(x)
|
||||
v3.AddArg(v4)
|
||||
v.AddArg(v3)
|
||||
|
|
@ -18105,8 +18105,8 @@ func rewriteValueAMD64_OpDiv128u(v *Value) bool {
|
|||
func rewriteValueAMD64_OpDiv16(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div16 x y)
|
||||
// cond:
|
||||
// result: (Select0 (DIVW x y))
|
||||
|
|
@ -18114,7 +18114,7 @@ func rewriteValueAMD64_OpDiv16(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(fe.TypeInt16(), fe.TypeInt16()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -18124,8 +18124,8 @@ func rewriteValueAMD64_OpDiv16(v *Value) bool {
|
|||
func rewriteValueAMD64_OpDiv16u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div16u x y)
|
||||
// cond:
|
||||
// result: (Select0 (DIVWU x y))
|
||||
|
|
@ -18133,7 +18133,7 @@ func rewriteValueAMD64_OpDiv16u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(fe.TypeUInt16(), fe.TypeUInt16()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -18143,8 +18143,8 @@ func rewriteValueAMD64_OpDiv16u(v *Value) bool {
|
|||
func rewriteValueAMD64_OpDiv32(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div32 x y)
|
||||
// cond:
|
||||
// result: (Select0 (DIVL x y))
|
||||
|
|
@ -18152,7 +18152,7 @@ func rewriteValueAMD64_OpDiv32(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(types.Int32, types.Int32))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -18175,8 +18175,8 @@ func rewriteValueAMD64_OpDiv32F(v *Value) bool {
|
|||
func rewriteValueAMD64_OpDiv32u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div32u x y)
|
||||
// cond:
|
||||
// result: (Select0 (DIVLU x y))
|
||||
|
|
@ -18184,7 +18184,7 @@ func rewriteValueAMD64_OpDiv32u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(types.UInt32, types.UInt32))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -18194,8 +18194,8 @@ func rewriteValueAMD64_OpDiv32u(v *Value) bool {
|
|||
func rewriteValueAMD64_OpDiv64(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div64 x y)
|
||||
// cond:
|
||||
// result: (Select0 (DIVQ x y))
|
||||
|
|
@ -18203,7 +18203,7 @@ func rewriteValueAMD64_OpDiv64(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(types.Int64, types.Int64))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -18226,8 +18226,8 @@ func rewriteValueAMD64_OpDiv64F(v *Value) bool {
|
|||
func rewriteValueAMD64_OpDiv64u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div64u x y)
|
||||
// cond:
|
||||
// result: (Select0 (DIVQU x y))
|
||||
|
|
@ -18235,7 +18235,7 @@ func rewriteValueAMD64_OpDiv64u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(types.UInt64, types.UInt64))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -18245,8 +18245,8 @@ func rewriteValueAMD64_OpDiv64u(v *Value) bool {
|
|||
func rewriteValueAMD64_OpDiv8(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div8 x y)
|
||||
// cond:
|
||||
// result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
|
||||
|
|
@ -18254,11 +18254,11 @@ func rewriteValueAMD64_OpDiv8(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(fe.TypeInt16(), fe.TypeInt16()))
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16))
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
|
||||
v1.AddArg(x)
|
||||
v0.AddArg(v1)
|
||||
v2 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
|
||||
v2 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
|
||||
v2.AddArg(y)
|
||||
v0.AddArg(v2)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -18268,8 +18268,8 @@ func rewriteValueAMD64_OpDiv8(v *Value) bool {
|
|||
func rewriteValueAMD64_OpDiv8u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Div8u x y)
|
||||
// cond:
|
||||
// result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
|
||||
|
|
@ -18277,11 +18277,11 @@ func rewriteValueAMD64_OpDiv8u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect0)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(fe.TypeUInt16(), fe.TypeUInt16()))
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16))
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
|
||||
v1.AddArg(x)
|
||||
v0.AddArg(v1)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
|
||||
v2.AddArg(y)
|
||||
v0.AddArg(v2)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19780,8 +19780,8 @@ func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
|
|||
func rewriteValueAMD64_OpMod16(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod16 x y)
|
||||
// cond:
|
||||
// result: (Select1 (DIVW x y))
|
||||
|
|
@ -19789,7 +19789,7 @@ func rewriteValueAMD64_OpMod16(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(fe.TypeInt16(), fe.TypeInt16()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19799,8 +19799,8 @@ func rewriteValueAMD64_OpMod16(v *Value) bool {
|
|||
func rewriteValueAMD64_OpMod16u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod16u x y)
|
||||
// cond:
|
||||
// result: (Select1 (DIVWU x y))
|
||||
|
|
@ -19808,7 +19808,7 @@ func rewriteValueAMD64_OpMod16u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(fe.TypeUInt16(), fe.TypeUInt16()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19818,8 +19818,8 @@ func rewriteValueAMD64_OpMod16u(v *Value) bool {
|
|||
func rewriteValueAMD64_OpMod32(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod32 x y)
|
||||
// cond:
|
||||
// result: (Select1 (DIVL x y))
|
||||
|
|
@ -19827,7 +19827,7 @@ func rewriteValueAMD64_OpMod32(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(fe.TypeInt32(), fe.TypeInt32()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVL, MakeTuple(types.Int32, types.Int32))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19837,8 +19837,8 @@ func rewriteValueAMD64_OpMod32(v *Value) bool {
|
|||
func rewriteValueAMD64_OpMod32u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod32u x y)
|
||||
// cond:
|
||||
// result: (Select1 (DIVLU x y))
|
||||
|
|
@ -19846,7 +19846,7 @@ func rewriteValueAMD64_OpMod32u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(fe.TypeUInt32(), fe.TypeUInt32()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, MakeTuple(types.UInt32, types.UInt32))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19856,8 +19856,8 @@ func rewriteValueAMD64_OpMod32u(v *Value) bool {
|
|||
func rewriteValueAMD64_OpMod64(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod64 x y)
|
||||
// cond:
|
||||
// result: (Select1 (DIVQ x y))
|
||||
|
|
@ -19865,7 +19865,7 @@ func rewriteValueAMD64_OpMod64(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(fe.TypeInt64(), fe.TypeInt64()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, MakeTuple(types.Int64, types.Int64))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19875,8 +19875,8 @@ func rewriteValueAMD64_OpMod64(v *Value) bool {
|
|||
func rewriteValueAMD64_OpMod64u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod64u x y)
|
||||
// cond:
|
||||
// result: (Select1 (DIVQU x y))
|
||||
|
|
@ -19884,7 +19884,7 @@ func rewriteValueAMD64_OpMod64u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(fe.TypeUInt64(), fe.TypeUInt64()))
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, MakeTuple(types.UInt64, types.UInt64))
|
||||
v0.AddArg(x)
|
||||
v0.AddArg(y)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19894,8 +19894,8 @@ func rewriteValueAMD64_OpMod64u(v *Value) bool {
|
|||
func rewriteValueAMD64_OpMod8(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod8 x y)
|
||||
// cond:
|
||||
// result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
|
||||
|
|
@ -19903,11 +19903,11 @@ func rewriteValueAMD64_OpMod8(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(fe.TypeInt16(), fe.TypeInt16()))
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVW, MakeTuple(types.Int16, types.Int16))
|
||||
v1 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
|
||||
v1.AddArg(x)
|
||||
v0.AddArg(v1)
|
||||
v2 := b.NewValue0(v.Pos, OpSignExt8to16, fe.TypeInt16())
|
||||
v2 := b.NewValue0(v.Pos, OpSignExt8to16, types.Int16)
|
||||
v2.AddArg(y)
|
||||
v0.AddArg(v2)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19917,8 +19917,8 @@ func rewriteValueAMD64_OpMod8(v *Value) bool {
|
|||
func rewriteValueAMD64_OpMod8u(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Mod8u x y)
|
||||
// cond:
|
||||
// result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
|
||||
|
|
@ -19926,11 +19926,11 @@ func rewriteValueAMD64_OpMod8u(v *Value) bool {
|
|||
x := v.Args[0]
|
||||
y := v.Args[1]
|
||||
v.reset(OpSelect1)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(fe.TypeUInt16(), fe.TypeUInt16()))
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, MakeTuple(types.UInt16, types.UInt16))
|
||||
v1 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
|
||||
v1.AddArg(x)
|
||||
v0.AddArg(v1)
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt8to16, fe.TypeUInt16())
|
||||
v2 := b.NewValue0(v.Pos, OpZeroExt8to16, types.UInt16)
|
||||
v2.AddArg(y)
|
||||
v0.AddArg(v2)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19942,8 +19942,8 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Move [0] _ _ mem)
|
||||
// cond:
|
||||
// result: mem
|
||||
|
|
@ -19969,7 +19969,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
mem := v.Args[2]
|
||||
v.reset(OpAMD64MOVBstore)
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, fe.TypeUInt8())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8)
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -19988,7 +19988,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
mem := v.Args[2]
|
||||
v.reset(OpAMD64MOVWstore)
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -20007,7 +20007,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
mem := v.Args[2]
|
||||
v.reset(OpAMD64MOVLstore)
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -20026,7 +20026,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
mem := v.Args[2]
|
||||
v.reset(OpAMD64MOVQstore)
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
|
|
@ -20065,14 +20065,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
v.reset(OpAMD64MOVBstore)
|
||||
v.AuxInt = 2
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, fe.TypeUInt8())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8)
|
||||
v0.AuxInt = 2
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -20093,14 +20093,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
v.reset(OpAMD64MOVBstore)
|
||||
v.AuxInt = 4
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, fe.TypeUInt8())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, types.UInt8)
|
||||
v0.AuxInt = 4
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -20121,14 +20121,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
v.reset(OpAMD64MOVWstore)
|
||||
v.AuxInt = 4
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, fe.TypeUInt16())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, types.UInt16)
|
||||
v0.AuxInt = 4
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -20149,14 +20149,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
v.reset(OpAMD64MOVLstore)
|
||||
v.AuxInt = 3
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
|
||||
v0.AuxInt = 3
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, fe.TypeUInt32())
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, types.UInt32)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -20178,14 +20178,14 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
v.reset(OpAMD64MOVQstore)
|
||||
v.AuxInt = s - 8
|
||||
v.AddArg(dst)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
|
||||
v0.AuxInt = s - 8
|
||||
v0.AddArg(src)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
|
||||
v1.AddArg(dst)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
|
||||
v2.AddArg(src)
|
||||
v2.AddArg(mem)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -20216,7 +20216,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
v.AddArg(v1)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
|
||||
v2.AddArg(dst)
|
||||
v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, fe.TypeUInt64())
|
||||
v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, types.UInt64)
|
||||
v3.AddArg(src)
|
||||
v3.AddArg(mem)
|
||||
v2.AddArg(v3)
|
||||
|
|
@ -20287,7 +20287,7 @@ func rewriteValueAMD64_OpMove(v *Value) bool {
|
|||
v.reset(OpAMD64REPMOVSQ)
|
||||
v.AddArg(dst)
|
||||
v.AddArg(src)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v0.AuxInt = s / 8
|
||||
v.AddArg(v0)
|
||||
v.AddArg(mem)
|
||||
|
|
@ -20411,16 +20411,16 @@ func rewriteValueAMD64_OpNeg32(v *Value) bool {
|
|||
func rewriteValueAMD64_OpNeg32F(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Neg32F x)
|
||||
// cond:
|
||||
// result: (PXOR x (MOVSSconst <fe.TypeFloat32()> [f2i(math.Copysign(0, -1))]))
|
||||
// result: (PXOR x (MOVSSconst <types.Float32> [f2i(math.Copysign(0, -1))]))
|
||||
for {
|
||||
x := v.Args[0]
|
||||
v.reset(OpAMD64PXOR)
|
||||
v.AddArg(x)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, fe.TypeFloat32())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, types.Float32)
|
||||
v0.AuxInt = f2i(math.Copysign(0, -1))
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
|
|
@ -20440,16 +20440,16 @@ func rewriteValueAMD64_OpNeg64(v *Value) bool {
|
|||
func rewriteValueAMD64_OpNeg64F(v *Value) bool {
|
||||
b := v.Block
|
||||
_ = b
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Neg64F x)
|
||||
// cond:
|
||||
// result: (PXOR x (MOVSDconst <fe.TypeFloat64()> [f2i(math.Copysign(0, -1))]))
|
||||
// result: (PXOR x (MOVSDconst <types.Float64> [f2i(math.Copysign(0, -1))]))
|
||||
for {
|
||||
x := v.Args[0]
|
||||
v.reset(OpAMD64PXOR)
|
||||
v.AddArg(x)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, fe.TypeFloat64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, types.Float64)
|
||||
v0.AuxInt = f2i(math.Copysign(0, -1))
|
||||
v.AddArg(v0)
|
||||
return true
|
||||
|
|
@ -20654,8 +20654,8 @@ func rewriteValueAMD64_OpOffPtr(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (OffPtr [off] ptr)
|
||||
// cond: config.PtrSize == 8 && is32Bit(off)
|
||||
// result: (ADDQconst [off] ptr)
|
||||
|
|
@ -20680,7 +20680,7 @@ func rewriteValueAMD64_OpOffPtr(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpAMD64ADDQ)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v0.AuxInt = off
|
||||
v.AddArg(v0)
|
||||
v.AddArg(ptr)
|
||||
|
|
@ -22133,8 +22133,8 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Zero [0] _ mem)
|
||||
// cond:
|
||||
// result: mem
|
||||
|
|
@ -22394,7 +22394,7 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
|
|||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, TypeMem)
|
||||
v1.AddArg(destptr)
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v2.AuxInt = 0
|
||||
v1.AddArg(v2)
|
||||
v1.AddArg(mem)
|
||||
|
|
@ -22432,10 +22432,10 @@ func rewriteValueAMD64_OpZero(v *Value) bool {
|
|||
}
|
||||
v.reset(OpAMD64REPSTOSQ)
|
||||
v.AddArg(destptr)
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v0.AuxInt = s / 8
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, fe.TypeUInt64())
|
||||
v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, types.UInt64)
|
||||
v1.AuxInt = 0
|
||||
v.AddArg(v1)
|
||||
v.AddArg(mem)
|
||||
|
|
@ -22514,6 +22514,8 @@ func rewriteBlockAMD64(b *Block) bool {
|
|||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &config.Types
|
||||
_ = types
|
||||
switch b.Kind {
|
||||
case BlockAMD64EQ:
|
||||
// match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -108,11 +108,11 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Load <t> ptr mem)
|
||||
// cond: t.IsComplex() && t.Size() == 8
|
||||
// result: (ComplexMake (Load <fe.TypeFloat32()> ptr mem) (Load <fe.TypeFloat32()> (OffPtr <fe.TypeFloat32().PtrTo()> [4] ptr) mem) )
|
||||
// result: (ComplexMake (Load <types.Float32> ptr mem) (Load <types.Float32> (OffPtr <types.Float32.PtrTo()> [4] ptr) mem) )
|
||||
for {
|
||||
t := v.Type
|
||||
ptr := v.Args[0]
|
||||
|
|
@ -121,12 +121,12 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpComplexMake)
|
||||
v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat32())
|
||||
v0 := b.NewValue0(v.Pos, OpLoad, types.Float32)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat32())
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat32().PtrTo())
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, types.Float32)
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, types.Float32.PtrTo())
|
||||
v2.AuxInt = 4
|
||||
v2.AddArg(ptr)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -136,7 +136,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
}
|
||||
// match: (Load <t> ptr mem)
|
||||
// cond: t.IsComplex() && t.Size() == 16
|
||||
// result: (ComplexMake (Load <fe.TypeFloat64()> ptr mem) (Load <fe.TypeFloat64()> (OffPtr <fe.TypeFloat64().PtrTo()> [8] ptr) mem) )
|
||||
// result: (ComplexMake (Load <types.Float64> ptr mem) (Load <types.Float64> (OffPtr <types.Float64.PtrTo()> [8] ptr) mem) )
|
||||
for {
|
||||
t := v.Type
|
||||
ptr := v.Args[0]
|
||||
|
|
@ -145,12 +145,12 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpComplexMake)
|
||||
v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat64())
|
||||
v0 := b.NewValue0(v.Pos, OpLoad, types.Float64)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeFloat64())
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat64().PtrTo())
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, types.Float64)
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, types.Float64.PtrTo())
|
||||
v2.AuxInt = 8
|
||||
v2.AddArg(ptr)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -160,7 +160,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
}
|
||||
// match: (Load <t> ptr mem)
|
||||
// cond: t.IsString()
|
||||
// result: (StringMake (Load <fe.TypeBytePtr()> ptr mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem))
|
||||
// result: (StringMake (Load <types.BytePtr> ptr mem) (Load <types.Int> (OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr) mem))
|
||||
for {
|
||||
t := v.Type
|
||||
ptr := v.Args[0]
|
||||
|
|
@ -169,12 +169,12 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpStringMake)
|
||||
v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
|
||||
v0 := b.NewValue0(v.Pos, OpLoad, types.BytePtr)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, types.Int)
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
|
||||
v2.AuxInt = config.PtrSize
|
||||
v2.AddArg(ptr)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -184,7 +184,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
}
|
||||
// match: (Load <t> ptr mem)
|
||||
// cond: t.IsSlice()
|
||||
// result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] ptr) mem) (Load <fe.TypeInt()> (OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] ptr) mem))
|
||||
// result: (SliceMake (Load <t.ElemType().PtrTo()> ptr mem) (Load <types.Int> (OffPtr <types.Int.PtrTo()> [config.PtrSize] ptr) mem) (Load <types.Int> (OffPtr <types.Int.PtrTo()> [2*config.PtrSize] ptr) mem))
|
||||
for {
|
||||
t := v.Type
|
||||
ptr := v.Args[0]
|
||||
|
|
@ -197,15 +197,15 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, types.Int)
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
|
||||
v2.AuxInt = config.PtrSize
|
||||
v2.AddArg(ptr)
|
||||
v1.AddArg(v2)
|
||||
v1.AddArg(mem)
|
||||
v.AddArg(v1)
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, fe.TypeInt())
|
||||
v4 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
|
||||
v3 := b.NewValue0(v.Pos, OpLoad, types.Int)
|
||||
v4 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
|
||||
v4.AuxInt = 2 * config.PtrSize
|
||||
v4.AddArg(ptr)
|
||||
v3.AddArg(v4)
|
||||
|
|
@ -215,7 +215,7 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
}
|
||||
// match: (Load <t> ptr mem)
|
||||
// cond: t.IsInterface()
|
||||
// result: (IMake (Load <fe.TypeBytePtr()> ptr mem) (Load <fe.TypeBytePtr()> (OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] ptr) mem))
|
||||
// result: (IMake (Load <types.BytePtr> ptr mem) (Load <types.BytePtr> (OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] ptr) mem))
|
||||
for {
|
||||
t := v.Type
|
||||
ptr := v.Args[0]
|
||||
|
|
@ -224,12 +224,12 @@ func rewriteValuedec_OpLoad(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpIMake)
|
||||
v0 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
|
||||
v0 := b.NewValue0(v.Pos, OpLoad, types.BytePtr)
|
||||
v0.AddArg(ptr)
|
||||
v0.AddArg(mem)
|
||||
v.AddArg(v0)
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, fe.TypeBytePtr())
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeBytePtr().PtrTo())
|
||||
v1 := b.NewValue0(v.Pos, OpLoad, types.BytePtr)
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, types.BytePtr.PtrTo())
|
||||
v2.AuxInt = config.PtrSize
|
||||
v2.AddArg(ptr)
|
||||
v1.AddArg(v2)
|
||||
|
|
@ -295,11 +295,11 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
_ = b
|
||||
config := b.Func.Config
|
||||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &b.Func.Config.Types
|
||||
_ = types
|
||||
// match: (Store {t} dst (ComplexMake real imag) mem)
|
||||
// cond: t.(Type).Size() == 8
|
||||
// result: (Store {fe.TypeFloat32()} (OffPtr <fe.TypeFloat32().PtrTo()> [4] dst) imag (Store {fe.TypeFloat32()} dst real mem))
|
||||
// result: (Store {types.Float32} (OffPtr <types.Float32.PtrTo()> [4] dst) imag (Store {types.Float32} dst real mem))
|
||||
for {
|
||||
t := v.Aux
|
||||
dst := v.Args[0]
|
||||
|
|
@ -314,14 +314,14 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpStore)
|
||||
v.Aux = fe.TypeFloat32()
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat32().PtrTo())
|
||||
v.Aux = types.Float32
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, types.Float32.PtrTo())
|
||||
v0.AuxInt = 4
|
||||
v0.AddArg(dst)
|
||||
v.AddArg(v0)
|
||||
v.AddArg(imag)
|
||||
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
|
||||
v1.Aux = fe.TypeFloat32()
|
||||
v1.Aux = types.Float32
|
||||
v1.AddArg(dst)
|
||||
v1.AddArg(real)
|
||||
v1.AddArg(mem)
|
||||
|
|
@ -330,7 +330,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
}
|
||||
// match: (Store {t} dst (ComplexMake real imag) mem)
|
||||
// cond: t.(Type).Size() == 16
|
||||
// result: (Store {fe.TypeFloat64()} (OffPtr <fe.TypeFloat64().PtrTo()> [8] dst) imag (Store {fe.TypeFloat64()} dst real mem))
|
||||
// result: (Store {types.Float64} (OffPtr <types.Float64.PtrTo()> [8] dst) imag (Store {types.Float64} dst real mem))
|
||||
for {
|
||||
t := v.Aux
|
||||
dst := v.Args[0]
|
||||
|
|
@ -345,14 +345,14 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
break
|
||||
}
|
||||
v.reset(OpStore)
|
||||
v.Aux = fe.TypeFloat64()
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeFloat64().PtrTo())
|
||||
v.Aux = types.Float64
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, types.Float64.PtrTo())
|
||||
v0.AuxInt = 8
|
||||
v0.AddArg(dst)
|
||||
v.AddArg(v0)
|
||||
v.AddArg(imag)
|
||||
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
|
||||
v1.Aux = fe.TypeFloat64()
|
||||
v1.Aux = types.Float64
|
||||
v1.AddArg(dst)
|
||||
v1.AddArg(real)
|
||||
v1.AddArg(mem)
|
||||
|
|
@ -361,7 +361,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
}
|
||||
// match: (Store dst (StringMake ptr len) mem)
|
||||
// cond:
|
||||
// result: (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {fe.TypeBytePtr()} dst ptr mem))
|
||||
// result: (Store {types.Int} (OffPtr <types.Int.PtrTo()> [config.PtrSize] dst) len (Store {types.BytePtr} dst ptr mem))
|
||||
for {
|
||||
dst := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
|
|
@ -372,14 +372,14 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
len := v_1.Args[1]
|
||||
mem := v.Args[2]
|
||||
v.reset(OpStore)
|
||||
v.Aux = fe.TypeInt()
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
|
||||
v.Aux = types.Int
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
|
||||
v0.AuxInt = config.PtrSize
|
||||
v0.AddArg(dst)
|
||||
v.AddArg(v0)
|
||||
v.AddArg(len)
|
||||
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
|
||||
v1.Aux = fe.TypeBytePtr()
|
||||
v1.Aux = types.BytePtr
|
||||
v1.AddArg(dst)
|
||||
v1.AddArg(ptr)
|
||||
v1.AddArg(mem)
|
||||
|
|
@ -388,7 +388,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
}
|
||||
// match: (Store dst (SliceMake ptr len cap) mem)
|
||||
// cond:
|
||||
// result: (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [2*config.PtrSize] dst) cap (Store {fe.TypeInt()} (OffPtr <fe.TypeInt().PtrTo()> [config.PtrSize] dst) len (Store {fe.TypeBytePtr()} dst ptr mem)))
|
||||
// result: (Store {types.Int} (OffPtr <types.Int.PtrTo()> [2*config.PtrSize] dst) cap (Store {types.Int} (OffPtr <types.Int.PtrTo()> [config.PtrSize] dst) len (Store {types.BytePtr} dst ptr mem)))
|
||||
for {
|
||||
dst := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
|
|
@ -400,21 +400,21 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
cap := v_1.Args[2]
|
||||
mem := v.Args[2]
|
||||
v.reset(OpStore)
|
||||
v.Aux = fe.TypeInt()
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
|
||||
v.Aux = types.Int
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
|
||||
v0.AuxInt = 2 * config.PtrSize
|
||||
v0.AddArg(dst)
|
||||
v.AddArg(v0)
|
||||
v.AddArg(cap)
|
||||
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
|
||||
v1.Aux = fe.TypeInt()
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeInt().PtrTo())
|
||||
v1.Aux = types.Int
|
||||
v2 := b.NewValue0(v.Pos, OpOffPtr, types.Int.PtrTo())
|
||||
v2.AuxInt = config.PtrSize
|
||||
v2.AddArg(dst)
|
||||
v1.AddArg(v2)
|
||||
v1.AddArg(len)
|
||||
v3 := b.NewValue0(v.Pos, OpStore, TypeMem)
|
||||
v3.Aux = fe.TypeBytePtr()
|
||||
v3.Aux = types.BytePtr
|
||||
v3.AddArg(dst)
|
||||
v3.AddArg(ptr)
|
||||
v3.AddArg(mem)
|
||||
|
|
@ -424,7 +424,7 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
}
|
||||
// match: (Store dst (IMake itab data) mem)
|
||||
// cond:
|
||||
// result: (Store {fe.TypeBytePtr()} (OffPtr <fe.TypeBytePtr().PtrTo()> [config.PtrSize] dst) data (Store {fe.TypeUintptr()} dst itab mem))
|
||||
// result: (Store {types.BytePtr} (OffPtr <types.BytePtr.PtrTo()> [config.PtrSize] dst) data (Store {types.Uintptr} dst itab mem))
|
||||
for {
|
||||
dst := v.Args[0]
|
||||
v_1 := v.Args[1]
|
||||
|
|
@ -435,14 +435,14 @@ func rewriteValuedec_OpStore(v *Value) bool {
|
|||
data := v_1.Args[1]
|
||||
mem := v.Args[2]
|
||||
v.reset(OpStore)
|
||||
v.Aux = fe.TypeBytePtr()
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, fe.TypeBytePtr().PtrTo())
|
||||
v.Aux = types.BytePtr
|
||||
v0 := b.NewValue0(v.Pos, OpOffPtr, types.BytePtr.PtrTo())
|
||||
v0.AuxInt = config.PtrSize
|
||||
v0.AddArg(dst)
|
||||
v.AddArg(v0)
|
||||
v.AddArg(data)
|
||||
v1 := b.NewValue0(v.Pos, OpStore, TypeMem)
|
||||
v1.Aux = fe.TypeUintptr()
|
||||
v1.Aux = types.Uintptr
|
||||
v1.AddArg(dst)
|
||||
v1.AddArg(itab)
|
||||
v1.AddArg(mem)
|
||||
|
|
@ -490,6 +490,8 @@ func rewriteBlockdec(b *Block) bool {
|
|||
_ = config
|
||||
fe := b.Func.fe
|
||||
_ = fe
|
||||
types := &config.Types
|
||||
_ = types
|
||||
switch b.Kind {
|
||||
}
|
||||
return false
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -17,8 +17,8 @@ func shortcircuit(f *Func) {
|
|||
// x = phi(a, ...)
|
||||
//
|
||||
// We can replace the "a" in the phi with the constant true.
|
||||
ct := f.ConstBool(f.Entry.Pos, f.fe.TypeBool(), true)
|
||||
cf := f.ConstBool(f.Entry.Pos, f.fe.TypeBool(), false)
|
||||
ct := f.ConstBool(f.Entry.Pos, f.Config.Types.Bool, true)
|
||||
cf := f.ConstBool(f.Entry.Pos, f.Config.Types.Bool, false)
|
||||
for _, b := range f.Blocks {
|
||||
for _, v := range b.Values {
|
||||
if v.Op != OpPhi {
|
||||
|
|
|
|||
|
|
@ -88,17 +88,17 @@ func writebarrier(f *Func) {
|
|||
}
|
||||
}
|
||||
if sb == nil {
|
||||
sb = f.Entry.NewValue0(initpos, OpSB, f.fe.TypeUintptr())
|
||||
sb = f.Entry.NewValue0(initpos, OpSB, f.Config.Types.Uintptr)
|
||||
}
|
||||
if sp == nil {
|
||||
sp = f.Entry.NewValue0(initpos, OpSP, f.fe.TypeUintptr())
|
||||
sp = f.Entry.NewValue0(initpos, OpSP, f.Config.Types.Uintptr)
|
||||
}
|
||||
wbsym := &ExternSymbol{Typ: f.fe.TypeBool(), Sym: f.fe.Syslook("writeBarrier")}
|
||||
wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.fe.TypeUInt32().PtrTo(), wbsym, sb)
|
||||
wbsym := &ExternSymbol{Typ: f.Config.Types.Bool, Sym: f.fe.Syslook("writeBarrier")}
|
||||
wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32.PtrTo(), wbsym, sb)
|
||||
writebarrierptr = f.fe.Syslook("writebarrierptr")
|
||||
typedmemmove = f.fe.Syslook("typedmemmove")
|
||||
typedmemclr = f.fe.Syslook("typedmemclr")
|
||||
const0 = f.ConstInt32(initpos, f.fe.TypeUInt32(), 0)
|
||||
const0 = f.ConstInt32(initpos, f.Config.Types.UInt32, 0)
|
||||
|
||||
// allocate auxiliary data structures for computing store order
|
||||
sset = f.newSparseSet(f.NumValues())
|
||||
|
|
@ -155,8 +155,9 @@ func writebarrier(f *Func) {
|
|||
|
||||
// set up control flow for write barrier test
|
||||
// load word, test word, avoiding partial register write from load byte.
|
||||
flag := b.NewValue2(pos, OpLoad, f.fe.TypeUInt32(), wbaddr, mem)
|
||||
flag = b.NewValue2(pos, OpNeq32, f.fe.TypeBool(), flag, const0)
|
||||
types := &f.Config.Types
|
||||
flag := b.NewValue2(pos, OpLoad, types.UInt32, wbaddr, mem)
|
||||
flag = b.NewValue2(pos, OpNeq32, types.Bool, flag, const0)
|
||||
b.Kind = BlockIf
|
||||
b.SetControl(flag)
|
||||
b.Likely = BranchUnlikely
|
||||
|
|
@ -175,7 +176,7 @@ func writebarrier(f *Func) {
|
|||
ptr := w.Args[0]
|
||||
var typ interface{}
|
||||
if w.Op != OpStoreWB {
|
||||
typ = &ExternSymbol{Typ: f.fe.TypeUintptr(), Sym: w.Aux.(Type).Symbol()}
|
||||
typ = &ExternSymbol{Typ: types.Uintptr, Sym: w.Aux.(Type).Symbol()}
|
||||
}
|
||||
pos = w.Pos
|
||||
|
||||
|
|
@ -280,7 +281,7 @@ func wbcall(pos src.XPos, b *Block, fn *obj.LSym, typ interface{}, ptr, val, mem
|
|||
off := config.ctxt.FixedFrameSize()
|
||||
|
||||
if typ != nil { // for typedmemmove
|
||||
taddr := b.NewValue1A(pos, OpAddr, b.Func.fe.TypeUintptr(), typ, sb)
|
||||
taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
|
||||
off = round(off, taddr.Type.Alignment())
|
||||
arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
|
||||
mem = b.NewValue3A(pos, OpStore, TypeMem, ptr.Type, arg, taddr, mem)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue