mirror of
https://github.com/golang/go.git
synced 2026-02-07 02:09:55 +00:00
cmd/compile: ensure ops have the expected argument widths
The generic SSA representation uses explicit extension and truncation operations to change widths of values. The map intrinsics were playing somewhat fast and loose with this requirement. Fix that, and add a check to make sure we don't regress. I don't think there is a triggerable bug here, but I ran into this with some prove pass modifications, where cmd/compile/internal/ssa/prove.go:isCleanExt (and/or its uses) is actually wrong when this invariant is not maintained. Change-Id: Idb7be6e691e2dbf6d7af6584641c3227c5c64bf5 Reviewed-on: https://go-review.googlesource.com/c/go/+/731300 Reviewed-by: Carlos Amedee <carlos@golang.org> Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
c7258178cd
commit
819c53c25f
5 changed files with 56 additions and 15 deletions
|
|
@ -223,7 +223,7 @@
|
|||
(Rsh64Ux64 <typ.UInt64>
|
||||
(Avg64u
|
||||
(Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32]))
|
||||
(Mul64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt32> [int64(umagic32(c).m)])))
|
||||
(Mul64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [int64(umagic32(c).m)])))
|
||||
(Const64 <typ.UInt64> [32 + umagic32(c).s - 1])))
|
||||
(Div32u <t> x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 =>
|
||||
(Rsh32Ux64 <t>
|
||||
|
|
|
|||
|
|
@ -1090,7 +1090,7 @@
|
|||
(Neg64 (Lsh64x64 <t> x (Const64 <typ.UInt64> [log64(-c)])))
|
||||
|
||||
// Strength reduction of mod to div.
|
||||
// Strength reduction of div to mul is delayed to genericlateopt.rules.
|
||||
// Strength reduction of div to mul is delayed to divmod.rules.
|
||||
|
||||
// Unsigned mod by power of 2 constant.
|
||||
(Mod8u <t> n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (And8 n (Const8 <t> [c-1]))
|
||||
|
|
|
|||
|
|
@ -345,6 +345,47 @@ func checkFunc(f *Func) {
|
|||
v.Op, v.Args[1].Type.String())
|
||||
}
|
||||
}
|
||||
// Check size of args.
|
||||
// This list isn't exhaustive, just the common ops.
|
||||
// It also can't handle ops with args of different types, like shifts.
|
||||
var argSize int64
|
||||
switch v.Op {
|
||||
case OpAdd8, OpSub8, OpMul8, OpDiv8, OpDiv8u, OpMod8, OpMod8u,
|
||||
OpAnd8, OpOr8, OpXor8,
|
||||
OpEq8, OpNeq8, OpLess8, OpLeq8,
|
||||
OpNeg8, OpCom8,
|
||||
OpSignExt8to16, OpSignExt8to32, OpSignExt8to64,
|
||||
OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64:
|
||||
argSize = 1
|
||||
case OpAdd16, OpSub16, OpMul16, OpDiv16, OpDiv16u, OpMod16, OpMod16u,
|
||||
OpAnd16, OpOr16, OpXor16,
|
||||
OpEq16, OpNeq16, OpLess16, OpLeq16,
|
||||
OpNeg16, OpCom16,
|
||||
OpSignExt16to32, OpSignExt16to64,
|
||||
OpZeroExt16to32, OpZeroExt16to64,
|
||||
OpTrunc16to8:
|
||||
argSize = 2
|
||||
case OpAdd32, OpSub32, OpMul32, OpDiv32, OpDiv32u, OpMod32, OpMod32u,
|
||||
OpAnd32, OpOr32, OpXor32,
|
||||
OpEq32, OpNeq32, OpLess32, OpLeq32,
|
||||
OpNeg32, OpCom32,
|
||||
OpSignExt32to64, OpZeroExt32to64,
|
||||
OpTrunc32to8, OpTrunc32to16:
|
||||
argSize = 4
|
||||
case OpAdd64, OpSub64, OpMul64, OpDiv64, OpDiv64u, OpMod64, OpMod64u,
|
||||
OpAnd64, OpOr64, OpXor64,
|
||||
OpEq64, OpNeq64, OpLess64, OpLeq64,
|
||||
OpNeg64, OpCom64,
|
||||
OpTrunc64to8, OpTrunc64to16, OpTrunc64to32:
|
||||
argSize = 8
|
||||
}
|
||||
if argSize != 0 {
|
||||
for i, arg := range v.Args {
|
||||
if arg.Type.Size() != argSize {
|
||||
f.Fatalf("arg %d to %s (%v) should be %d bytes in size, it is %s", i, v.Op, v, argSize, arg.Type.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: check for cycles in values
|
||||
}
|
||||
|
|
|
|||
|
|
@ -548,7 +548,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool {
|
|||
}
|
||||
// match: (Div32u <t> x (Const32 [c]))
|
||||
// cond: umagicOK32(c) && config.RegSize == 8
|
||||
// result: (Trunc64to32 <t> (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt32> [int64(umagic32(c).m)]))) (Const64 <typ.UInt64> [32 + umagic32(c).s - 1])))
|
||||
// result: (Trunc64to32 <t> (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [int64(umagic32(c).m)]))) (Const64 <typ.UInt64> [32 + umagic32(c).s - 1])))
|
||||
for {
|
||||
t := v.Type
|
||||
x := v_0
|
||||
|
|
@ -570,7 +570,7 @@ func rewriteValuedivmod_OpDiv32u(v *Value) bool {
|
|||
v4.AuxInt = int64ToAuxInt(32)
|
||||
v2.AddArg2(v3, v4)
|
||||
v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
|
||||
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32)
|
||||
v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
|
||||
v6.AuxInt = int64ToAuxInt(int64(umagic32(c).m))
|
||||
v5.AddArg2(v3, v6)
|
||||
v1.AddArg2(v2, v5)
|
||||
|
|
|
|||
|
|
@ -1028,27 +1028,27 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
|
|||
// LeadingZeros is handled because it trivially calls Len.
|
||||
addF("math/bits", "Reverse64",
|
||||
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
|
||||
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
|
||||
return s.newValue1(ssa.OpBitRev64, types.Types[types.TUINT64], args[0])
|
||||
},
|
||||
sys.ARM64, sys.Loong64)
|
||||
addF("math/bits", "Reverse32",
|
||||
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
|
||||
return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
|
||||
return s.newValue1(ssa.OpBitRev32, types.Types[types.TUINT32], args[0])
|
||||
},
|
||||
sys.ARM64, sys.Loong64)
|
||||
addF("math/bits", "Reverse16",
|
||||
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
|
||||
return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
|
||||
return s.newValue1(ssa.OpBitRev16, types.Types[types.TUINT16], args[0])
|
||||
},
|
||||
sys.ARM64, sys.Loong64)
|
||||
addF("math/bits", "Reverse8",
|
||||
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
|
||||
return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
|
||||
return s.newValue1(ssa.OpBitRev8, types.Types[types.TUINT8], args[0])
|
||||
},
|
||||
sys.ARM64, sys.Loong64)
|
||||
addF("math/bits", "Reverse",
|
||||
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
|
||||
return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
|
||||
return s.newValue1(ssa.OpBitRev64, types.Types[types.TUINT], args[0])
|
||||
},
|
||||
sys.ARM64, sys.Loong64)
|
||||
addF("math/bits", "RotateLeft8",
|
||||
|
|
@ -1441,7 +1441,7 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
|
|||
// byte N matched).
|
||||
//
|
||||
// NOTE: See comment above on bitsetFirst.
|
||||
out := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], eq)
|
||||
out := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT8], eq)
|
||||
|
||||
// g is only 64-bits so the upper 64-bits of the
|
||||
// 128-bit register will be zero. If h2 is also zero,
|
||||
|
|
@ -1501,7 +1501,7 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
|
|||
// means byte N matched).
|
||||
//
|
||||
// NOTE: See comment above on bitsetFirst.
|
||||
ret := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], sign)
|
||||
ret := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT64], sign)
|
||||
|
||||
// g is only 64-bits so the upper 64-bits of
|
||||
// the 128-bit register will be zero. PSIGNB
|
||||
|
|
@ -1531,7 +1531,7 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
|
|||
// byte N matched).
|
||||
//
|
||||
// NOTE: See comment above on bitsetFirst.
|
||||
out := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], eq)
|
||||
out := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT8], eq)
|
||||
|
||||
// g is only 64-bits so the upper 64-bits of the
|
||||
// 128-bit register will be zero. The upper 64-bits of
|
||||
|
|
@ -1565,7 +1565,7 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
|
|||
// byte N matched).
|
||||
//
|
||||
// NOTE: See comment above on bitsetFirst.
|
||||
ret := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], gfp)
|
||||
ret := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT64], gfp)
|
||||
|
||||
// g is only 64-bits so the upper 64-bits of the
|
||||
// 128-bit register will be zero. Zero will never match
|
||||
|
|
@ -1597,10 +1597,10 @@ func initIntrinsics(cfg *intrinsicBuildConfig) {
|
|||
// byte N matched).
|
||||
//
|
||||
// NOTE: See comment above on bitsetFirst.
|
||||
mask := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT16], gfp)
|
||||
mask := s.newValue1(ssa.OpAMD64PMOVMSKB, types.Types[types.TUINT8], gfp)
|
||||
|
||||
// Invert the mask to set the bits for the full slots.
|
||||
out := s.newValue1(ssa.OpCom16, types.Types[types.TUINT16], mask)
|
||||
out := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], mask)
|
||||
|
||||
// g is only 64-bits so the upper 64-bits of the
|
||||
// 128-bit register will be zero, with bit 7 unset.
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue