diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index 23ce21a8b2c..af9c24f53fd 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -50,10 +50,10 @@ (Cvt32to64F (Const32 [c])) => (Const64F [float64(c)]) (Cvt64to32F (Const64 [c])) => (Const32F [float32(c)]) (Cvt64to64F (Const64 [c])) => (Const64F [float64(c)]) -(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)]) -(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)]) -(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)]) -(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)]) +(Cvt32Fto32 (Const32F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)]) +(Cvt32Fto64 (Const32F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)]) +(Cvt64Fto32 (Const64F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)]) +(Cvt64Fto64 (Const64F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)]) (Round32F x:(Const32F)) => x (Round64F x:(Const64F)) => x (CvtBoolToUint8 (ConstBool [false])) => (Const8 [0]) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index c36ecc1cc60..79c444a86b2 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -6607,12 +6607,16 @@ func rewriteValuegeneric_OpCtz8(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { v_0 := v.Args[0] // match: (Cvt32Fto32 (Const32F [c])) + // cond: c >= -1<<31 && c < 1<<31 // result: (Const32 [int32(c)]) for { if v_0.Op != OpConst32F { break } c := auxIntToFloat32(v_0.AuxInt) + if !(c >= -1<<31 && c < 1<<31) { + break + } v.reset(OpConst32) v.AuxInt = int32ToAuxInt(int32(c)) return true @@ -6622,12 +6626,16 @@ func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool { func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool { v_0 := v.Args[0] // match: (Cvt32Fto64 (Const32F [c])) + // cond: c >= -1<<63 && c < 1<<63 // result: (Const64 [int64(c)]) for { if v_0.Op != OpConst32F { break } c := auxIntToFloat32(v_0.AuxInt) + if !(c >= -1<<63 && c < 1<<63) { + break + } v.reset(OpConst64) v.AuxInt = int64ToAuxInt(int64(c)) return true @@ -6682,12 +6690,16 @@ func rewriteValuegeneric_OpCvt32to64F(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool { v_0 := v.Args[0] // match: (Cvt64Fto32 (Const64F [c])) + // cond: c >= -1<<31 && c < 1<<31 // result: (Const32 [int32(c)]) for { if v_0.Op != OpConst64F { break } c := auxIntToFloat64(v_0.AuxInt) + if !(c >= -1<<31 && c < 1<<31) { + break + } v.reset(OpConst32) v.AuxInt = int32ToAuxInt(int32(c)) return true @@ -6732,12 +6744,16 @@ func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool { func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool { v_0 := v.Args[0] // match: (Cvt64Fto64 (Const64F [c])) + // cond: c >= -1<<63 && c < 1<<63 // result: (Const64 [int64(c)]) for { if v_0.Op != OpConst64F { break } c := auxIntToFloat64(v_0.AuxInt) + if !(c >= -1<<63 && c < 1<<63) { + break + } v.reset(OpConst64) v.AuxInt = int64ToAuxInt(int64(c)) return true diff --git a/test/codegen/math.go b/test/codegen/math.go index eadf9d7d055..5787657d2bc 100644 --- a/test/codegen/math.go +++ b/test/codegen/math.go @@ -330,3 +330,48 @@ func nanGenerate32() float32 { // amd64/v3:"VFMADD231SS" return z0 + z1 } + +func outOfBoundsConv(i32 *[2]int32, u32 *[2]uint32, i64 *[2]int64, u64 *[2]uint64) { + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + i32[0] = int32(two40()) + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + i32[1] = int32(-two40()) + // arm64: "FCVTZSDW" + // amd64: "CVTTSD2SL", "CVTSD2SS" + u32[0] = uint32(two41()) + // on arm64, this uses an explicit <0 comparison, so it constant folds. + // on amd64, this uses an explicit <0 comparison, so it constant folds. + // amd64: "MOVL\t[$]0," + u32[1] = uint32(minus1()) + // arm64: "FCVTZSD" + // amd64: "CVTTSD2SQ" + i64[0] = int64(two80()) + // arm64: "FCVTZSD" + // amd64: "CVTTSD2SQ" + i64[1] = int64(-two80()) + // arm64: "FCVTZUD" + // amd64: "CVTTSD2SQ" + u64[0] = uint64(two81()) + // arm64: "FCVTZUD" + // on amd64, this uses an explicit <0 comparison, so it constant folds. + // amd64: "MOVQ\t[$]0," + u64[1] = uint64(minus1()) +} + +func two40() float64 { + return 1 << 40 +} +func two41() float64 { + return 1 << 41 +} +func two80() float64 { + return 1 << 80 +} +func two81() float64 { + return 1 << 81 +} +func minus1() float64 { + return -1 +}