cmd/compile: don't allow NaNs in floating-point constant ops

We store 32-bit floating point constants in a 64-bit field, by
converting that 32-bit float to 64-bit float to store it, and convert
it back to use it.

That works for *almost* all floating-point constants. The exception is
signaling NaNs. The round trip described above means we can't represent
a 32-bit signaling NaN, because conversions strip the signaling bit.

To fix this issue, just forbid NaNs as floating-point constants in SSA
form. This shouldn't affect any real-world code, as people seldom
constant-propagate NaNs (except in test code).

Additionally, NaNs are somewhat underspecified (which of the many NaNs
do you get when dividing 0/0?), so when cross-compiling there's a
danger of using the compiler machine's NaN regime for some math, and
the target machine's NaN regime for other math. Better to use the
target machine's NaN regime always.

This has been a bug since 1.10, and there's an easy workaround
(declare a global varaible containing the signaling NaN pattern, and
use that as the argument to math.Float32frombits) so we'll fix it in
1.15.

Fixes #36400
Update #36399

Change-Id: Icf155e743281560eda2eed953d19a829552ccfda
Reviewed-on: https://go-review.googlesource.com/c/go/+/213477
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
This commit is contained in:
Keith Randall 2020-01-06 11:23:08 -08:00
parent 0fb1a49c1a
commit 2aa7c6c548
11 changed files with 200 additions and 26 deletions

View file

@ -483,6 +483,66 @@ func TestFloat32StoreToLoadConstantFold(t *testing.T) {
} }
} }
// Signaling NaN values as constants.
const (
snan32bits uint32 = 0x7f800001
snan64bits uint64 = 0x7ff0000000000001
)
// Signaling NaNs as variables.
var snan32bitsVar uint32 = snan32bits
var snan64bitsVar uint64 = snan64bits
func TestFloatSignalingNaN(t *testing.T) {
// Make sure we generate a signaling NaN from a constant properly.
// See issue 36400.
f32 := math.Float32frombits(snan32bits)
g32 := math.Float32frombits(snan32bitsVar)
x32 := math.Float32bits(f32)
y32 := math.Float32bits(g32)
if x32 != y32 {
t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32)
}
f64 := math.Float64frombits(snan64bits)
g64 := math.Float64frombits(snan64bitsVar)
x64 := math.Float64bits(f64)
y64 := math.Float64bits(g64)
if x64 != y64 {
t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64)
}
}
func TestFloatSignalingNaNConversion(t *testing.T) {
// Test to make sure when we convert a signaling NaN, it converts to a quiet NaN.
// See issue 36399.
s32 := math.Float32frombits(snan32bitsVar)
q64 := float64(s32)
if math.Float64bits(q64)>>52&1 == 0 {
t.Errorf("got signaling NaN, want quiet NaN")
}
s64 := math.Float64frombits(snan64bitsVar)
q32 := float32(s64)
if math.Float32bits(q32)>>22&1 == 0 {
t.Errorf("got signaling NaN, want quiet NaN")
}
}
func TestFloatSignalingNaNConversionConst(t *testing.T) {
// Test to make sure when we convert a signaling NaN, it converts to a quiet NaN.
// See issue 36399 and 36400.
s32 := math.Float32frombits(snan32bits)
q64 := float64(s32)
if math.Float64bits(q64)>>52&1 == 0 {
t.Errorf("got signaling NaN, want quiet NaN")
}
s64 := math.Float64frombits(snan64bits)
q32 := float32(s64)
if math.Float32bits(q32)>>22&1 == 0 {
t.Errorf("got signaling NaN, want quiet NaN")
}
}
var sinkFloat float64 var sinkFloat float64
func BenchmarkMul2(b *testing.B) { func BenchmarkMul2(b *testing.B) {

View file

@ -141,15 +141,23 @@ func checkFunc(f *Func) {
f.Fatalf("bad int32 AuxInt value for %v", v) f.Fatalf("bad int32 AuxInt value for %v", v)
} }
canHaveAuxInt = true canHaveAuxInt = true
case auxInt64, auxFloat64: case auxInt64:
canHaveAuxInt = true canHaveAuxInt = true
case auxInt128: case auxInt128:
// AuxInt must be zero, so leave canHaveAuxInt set to false. // AuxInt must be zero, so leave canHaveAuxInt set to false.
case auxFloat32: case auxFloat32:
canHaveAuxInt = true canHaveAuxInt = true
if math.IsNaN(v.AuxFloat()) {
f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
}
if !isExactFloat32(v.AuxFloat()) { if !isExactFloat32(v.AuxFloat()) {
f.Fatalf("value %v has an AuxInt value that is not an exact float32", v) f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
} }
case auxFloat64:
canHaveAuxInt = true
if math.IsNaN(v.AuxFloat()) {
f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
}
case auxString, auxSym, auxTyp, auxArchSpecific: case auxString, auxSym, auxTyp, auxArchSpecific:
canHaveAux = true canHaveAux = true
case auxSymOff, auxSymValAndOff, auxTypSize: case auxSymOff, auxSymValAndOff, auxTypSize:

View file

@ -78,7 +78,7 @@
// Constant folding // Constant folding
(FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))]) (FABS (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Abs(auxTo64F(x)))])
(FSQRT (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) (FSQRT (FMOVDconst [x])) && auxTo64F(x) >= 0 -> (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
(FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))]) (FFLOOR (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Floor(auxTo64F(x)))])
(FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))]) (FCEIL (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Ceil(auxTo64F(x)))])
(FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))]) (FTRUNC (FMOVDconst [x])) -> (FMOVDconst [auxFrom64F(math.Trunc(auxTo64F(x)))])

View file

@ -372,7 +372,7 @@
(I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y]) (I64Or (I64Const [x]) (I64Const [y])) -> (I64Const [x | y])
(I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y]) (I64Xor (I64Const [x]) (I64Const [y])) -> (I64Const [x ^ y])
(F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))]) (F64Add (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) + auxTo64F(y))])
(F64Mul (F64Const [x]) (F64Const [y])) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) (F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(auxTo64F(x) * auxTo64F(y)) -> (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
(I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1]) (I64Eq (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [1])
(I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0]) (I64Eq (I64Const [x]) (I64Const [y])) && x != y -> (I64Const [0])
(I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0]) (I64Ne (I64Const [x]) (I64Const [y])) && x == y -> (I64Const [0])
@ -382,15 +382,16 @@
(I64ShrU (I64Const [x]) (I64Const [y])) -> (I64Const [int64(uint64(x) >> uint64(y))]) (I64ShrU (I64Const [x]) (I64Const [y])) -> (I64Const [int64(uint64(x) >> uint64(y))])
(I64ShrS (I64Const [x]) (I64Const [y])) -> (I64Const [x >> uint64(y)]) (I64ShrS (I64Const [x]) (I64Const [y])) -> (I64Const [x >> uint64(y)])
(I64Add (I64Const [x]) y) -> (I64Add y (I64Const [x])) // TODO: declare these operations as commutative and get rid of these rules?
(I64Mul (I64Const [x]) y) -> (I64Mul y (I64Const [x])) (I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Add y (I64Const [x]))
(I64And (I64Const [x]) y) -> (I64And y (I64Const [x])) (I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Mul y (I64Const [x]))
(I64Or (I64Const [x]) y) -> (I64Or y (I64Const [x])) (I64And (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64And y (I64Const [x]))
(I64Xor (I64Const [x]) y) -> (I64Xor y (I64Const [x])) (I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Or y (I64Const [x]))
(F64Add (F64Const [x]) y) -> (F64Add y (F64Const [x])) (I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Xor y (I64Const [x]))
(F64Mul (F64Const [x]) y) -> (F64Mul y (F64Const [x])) (F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Add y (F64Const [x]))
(I64Eq (I64Const [x]) y) -> (I64Eq y (I64Const [x])) (F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const -> (F64Mul y (F64Const [x]))
(I64Ne (I64Const [x]) y) -> (I64Ne y (I64Const [x])) (I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Eq y (I64Const [x]))
(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const -> (I64Ne y (I64Const [x]))
(I64Eq x (I64Const [0])) -> (I64Eqz x) (I64Eq x (I64Const [0])) -> (I64Eqz x)
(I64Ne x (I64Const [0])) -> (I64Eqz (I64Eqz x)) (I64Ne x (I64Const [0])) -> (I64Eqz (I64Eqz x))

View file

@ -118,8 +118,8 @@
(Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))]) (Mul16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c*d))])
(Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))]) (Mul32 (Const32 [c]) (Const32 [d])) -> (Const32 [int64(int32(c*d))])
(Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d]) (Mul64 (Const64 [c]) (Const64 [d])) -> (Const64 [c*d])
(Mul32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) (Mul32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
(Mul64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) (Mul64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) * auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
(And8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c&d))]) (And8 (Const8 [c]) (Const8 [d])) -> (Const8 [int64(int8(c&d))])
(And16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c&d))]) (And16 (Const16 [c]) (Const16 [d])) -> (Const16 [int64(int16(c&d))])
@ -144,8 +144,8 @@
(Div16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))]) (Div16u (Const16 [c]) (Const16 [d])) && d != 0 -> (Const16 [int64(int16(uint16(c)/uint16(d)))])
(Div32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))]) (Div32u (Const32 [c]) (Const32 [d])) && d != 0 -> (Const32 [int64(int32(uint32(c)/uint32(d)))])
(Div64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))]) (Div64u (Const64 [c]) (Const64 [d])) && d != 0 -> (Const64 [int64(uint64(c)/uint64(d))])
(Div32F (Const32F [c]) (Const32F [d])) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) (Div32F (Const32F [c]) (Const32F [d])) && !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d))) -> (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
(Div64F (Const64F [c]) (Const64F [d])) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) (Div64F (Const64F [c]) (Const64F [d])) && !math.IsNaN(auxTo64F(c) / auxTo64F(d)) -> (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
(Select0 (Div128u (Const64 [0]) lo y)) -> (Div64u lo y) (Select0 (Div128u (Const64 [0]) lo y)) -> (Div64u lo y)
(Select1 (Div128u (Const64 [0]) lo y)) -> (Mod64u lo y) (Select1 (Div128u (Const64 [0]) lo y)) -> (Mod64u lo y)
@ -588,8 +588,8 @@
-> x -> x
// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits // Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
(Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) -> (Const64F [x]) (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) -> (Const64F [x])
(Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) -> (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x]) (Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) -> (Const64 [x])
(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))]) (Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) -> (Const32 [int64(int32(math.Float32bits(auxTo32F(x))))])
@ -1858,7 +1858,7 @@
(Div32F x (Const32F <t> [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F <t> [auxFrom32F(1/auxTo32F(c))])) (Div32F x (Const32F <t> [c])) && reciprocalExact32(auxTo32F(c)) -> (Mul32F x (Const32F <t> [auxFrom32F(1/auxTo32F(c))]))
(Div64F x (Const64F <t> [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F <t> [auxFrom64F(1/auxTo64F(c))])) (Div64F x (Const64F <t> [c])) && reciprocalExact64(auxTo64F(c)) -> (Mul64F x (Const64F <t> [auxFrom64F(1/auxTo64F(c))]))
(Sqrt (Const64F [c])) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) (Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(auxTo64F(c))) -> (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
// recognize runtime.newobject and don't Zero/Nilcheck it // recognize runtime.newobject and don't Zero/Nilcheck it
(Zero (Load (OffPtr [c] (SP)) mem) mem) (Zero (Load (OffPtr [c] (SP)) mem) mem)

View file

@ -340,6 +340,11 @@ var genericOps = []opData{
// Note: ConstX are sign-extended even when the type of the value is unsigned. // Note: ConstX are sign-extended even when the type of the value is unsigned.
// For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa. // For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
{name: "Const64", aux: "Int64"}, // value is auxint {name: "Const64", aux: "Int64"}, // value is auxint
// Note: for both Const32F and Const64F, we disallow encoding NaNs.
// Signaling NaNs are tricky because if you do anything with them, they become quiet.
// Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN.
// See issue 36399 and 36400.
// Encodings of +inf, -inf, and -0 are fine.
{name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32 {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
{name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint)) {name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
{name: "ConstInterface"}, // nil interface {name: "ConstInterface"}, // nil interface

View file

@ -487,11 +487,17 @@ func DivisionNeedsFixUp(v *Value) bool {
// auxFrom64F encodes a float64 value so it can be stored in an AuxInt. // auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
func auxFrom64F(f float64) int64 { func auxFrom64F(f float64) int64 {
if f != f {
panic("can't encode a NaN in AuxInt field")
}
return int64(math.Float64bits(f)) return int64(math.Float64bits(f))
} }
// auxFrom32F encodes a float32 value so it can be stored in an AuxInt. // auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
func auxFrom32F(f float32) int64 { func auxFrom32F(f float32) int64 {
if f != f {
panic("can't encode a NaN in AuxInt field")
}
return int64(math.Float64bits(extend32Fto64F(f))) return int64(math.Float64bits(extend32Fto64F(f)))
} }

View file

@ -5828,12 +5828,16 @@ func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool {
func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool { func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (FSQRT (FMOVDconst [x])) // match: (FSQRT (FMOVDconst [x]))
// cond: auxTo64F(x) >= 0
// result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))]) // result: (FMOVDconst [auxFrom64F(math.Sqrt(auxTo64F(x)))])
for { for {
if v_0.Op != OpPPC64FMOVDconst { if v_0.Op != OpPPC64FMOVDconst {
break break
} }
x := v_0.AuxInt x := v_0.AuxInt
if !(auxTo64F(x) >= 0) {
break
}
v.reset(OpPPC64FMOVDconst) v.reset(OpPPC64FMOVDconst)
v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x))) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(x)))
return true return true

View file

@ -3,6 +3,7 @@
package ssa package ssa
import "math"
import "cmd/internal/objabi" import "cmd/internal/objabi"
import "cmd/compile/internal/types" import "cmd/compile/internal/types"
@ -3993,6 +3994,7 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool {
return true return true
} }
// match: (F64Add (F64Const [x]) y) // match: (F64Add (F64Const [x]) y)
// cond: y.Op != OpWasmF64Const
// result: (F64Add y (F64Const [x])) // result: (F64Add y (F64Const [x]))
for { for {
if v_0.Op != OpWasmF64Const { if v_0.Op != OpWasmF64Const {
@ -4000,6 +4002,9 @@ func rewriteValueWasm_OpWasmF64Add(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmF64Const) {
break
}
v.reset(OpWasmF64Add) v.reset(OpWasmF64Add)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
@ -4015,6 +4020,7 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
b := v.Block b := v.Block
typ := &b.Func.Config.Types typ := &b.Func.Config.Types
// match: (F64Mul (F64Const [x]) (F64Const [y])) // match: (F64Mul (F64Const [x]) (F64Const [y]))
// cond: !math.IsNaN(auxTo64F(x) * auxTo64F(y))
// result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))]) // result: (F64Const [auxFrom64F(auxTo64F(x) * auxTo64F(y))])
for { for {
if v_0.Op != OpWasmF64Const { if v_0.Op != OpWasmF64Const {
@ -4025,11 +4031,15 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
break break
} }
y := v_1.AuxInt y := v_1.AuxInt
if !(!math.IsNaN(auxTo64F(x) * auxTo64F(y))) {
break
}
v.reset(OpWasmF64Const) v.reset(OpWasmF64Const)
v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y)) v.AuxInt = auxFrom64F(auxTo64F(x) * auxTo64F(y))
return true return true
} }
// match: (F64Mul (F64Const [x]) y) // match: (F64Mul (F64Const [x]) y)
// cond: y.Op != OpWasmF64Const
// result: (F64Mul y (F64Const [x])) // result: (F64Mul y (F64Const [x]))
for { for {
if v_0.Op != OpWasmF64Const { if v_0.Op != OpWasmF64Const {
@ -4037,6 +4047,9 @@ func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmF64Const) {
break
}
v.reset(OpWasmF64Mul) v.reset(OpWasmF64Mul)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64) v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
@ -4067,6 +4080,7 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool {
return true return true
} }
// match: (I64Add (I64Const [x]) y) // match: (I64Add (I64Const [x]) y)
// cond: y.Op != OpWasmI64Const
// result: (I64Add y (I64Const [x])) // result: (I64Add y (I64Const [x]))
for { for {
if v_0.Op != OpWasmI64Const { if v_0.Op != OpWasmI64Const {
@ -4074,6 +4088,9 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmI64Const) {
break
}
v.reset(OpWasmI64Add) v.reset(OpWasmI64Add)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
@ -4153,6 +4170,7 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool {
return true return true
} }
// match: (I64And (I64Const [x]) y) // match: (I64And (I64Const [x]) y)
// cond: y.Op != OpWasmI64Const
// result: (I64And y (I64Const [x])) // result: (I64And y (I64Const [x]))
for { for {
if v_0.Op != OpWasmI64Const { if v_0.Op != OpWasmI64Const {
@ -4160,6 +4178,9 @@ func rewriteValueWasm_OpWasmI64And(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmI64Const) {
break
}
v.reset(OpWasmI64And) v.reset(OpWasmI64And)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
@ -4213,6 +4234,7 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool {
return true return true
} }
// match: (I64Eq (I64Const [x]) y) // match: (I64Eq (I64Const [x]) y)
// cond: y.Op != OpWasmI64Const
// result: (I64Eq y (I64Const [x])) // result: (I64Eq y (I64Const [x]))
for { for {
if v_0.Op != OpWasmI64Const { if v_0.Op != OpWasmI64Const {
@ -4220,6 +4242,9 @@ func rewriteValueWasm_OpWasmI64Eq(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmI64Const) {
break
}
v.reset(OpWasmI64Eq) v.reset(OpWasmI64Eq)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
@ -4533,6 +4558,7 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool {
return true return true
} }
// match: (I64Mul (I64Const [x]) y) // match: (I64Mul (I64Const [x]) y)
// cond: y.Op != OpWasmI64Const
// result: (I64Mul y (I64Const [x])) // result: (I64Mul y (I64Const [x]))
for { for {
if v_0.Op != OpWasmI64Const { if v_0.Op != OpWasmI64Const {
@ -4540,6 +4566,9 @@ func rewriteValueWasm_OpWasmI64Mul(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmI64Const) {
break
}
v.reset(OpWasmI64Mul) v.reset(OpWasmI64Mul)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
@ -4593,6 +4622,7 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool {
return true return true
} }
// match: (I64Ne (I64Const [x]) y) // match: (I64Ne (I64Const [x]) y)
// cond: y.Op != OpWasmI64Const
// result: (I64Ne y (I64Const [x])) // result: (I64Ne y (I64Const [x]))
for { for {
if v_0.Op != OpWasmI64Const { if v_0.Op != OpWasmI64Const {
@ -4600,6 +4630,9 @@ func rewriteValueWasm_OpWasmI64Ne(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmI64Const) {
break
}
v.reset(OpWasmI64Ne) v.reset(OpWasmI64Ne)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
@ -4643,6 +4676,7 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool {
return true return true
} }
// match: (I64Or (I64Const [x]) y) // match: (I64Or (I64Const [x]) y)
// cond: y.Op != OpWasmI64Const
// result: (I64Or y (I64Const [x])) // result: (I64Or y (I64Const [x]))
for { for {
if v_0.Op != OpWasmI64Const { if v_0.Op != OpWasmI64Const {
@ -4650,6 +4684,9 @@ func rewriteValueWasm_OpWasmI64Or(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmI64Const) {
break
}
v.reset(OpWasmI64Or) v.reset(OpWasmI64Or)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
@ -4852,6 +4889,7 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool {
return true return true
} }
// match: (I64Xor (I64Const [x]) y) // match: (I64Xor (I64Const [x]) y)
// cond: y.Op != OpWasmI64Const
// result: (I64Xor y (I64Const [x])) // result: (I64Xor y (I64Const [x]))
for { for {
if v_0.Op != OpWasmI64Const { if v_0.Op != OpWasmI64Const {
@ -4859,6 +4897,9 @@ func rewriteValueWasm_OpWasmI64Xor(v *Value) bool {
} }
x := v_0.AuxInt x := v_0.AuxInt
y := v_1 y := v_1
if !(y.Op != OpWasmI64Const) {
break
}
v.reset(OpWasmI64Xor) v.reset(OpWasmI64Xor)
v.AddArg(y) v.AddArg(y)
v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64) v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)

View file

@ -3579,6 +3579,7 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
// match: (Div32F (Const32F [c]) (Const32F [d])) // match: (Div32F (Const32F [c]) (Const32F [d]))
// cond: !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))
// result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))]) // result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
for { for {
if v_0.Op != OpConst32F { if v_0.Op != OpConst32F {
@ -3589,6 +3590,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool {
break break
} }
d := v_1.AuxInt d := v_1.AuxInt
if !(!math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))) {
break
}
v.reset(OpConst32F) v.reset(OpConst32F)
v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d)) v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d))
return true return true
@ -4052,6 +4056,7 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
b := v.Block b := v.Block
// match: (Div64F (Const64F [c]) (Const64F [d])) // match: (Div64F (Const64F [c]) (Const64F [d]))
// cond: !math.IsNaN(auxTo64F(c) / auxTo64F(d))
// result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))]) // result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
for { for {
if v_0.Op != OpConst64F { if v_0.Op != OpConst64F {
@ -4062,6 +4067,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool {
break break
} }
d := v_1.AuxInt d := v_1.AuxInt
if !(!math.IsNaN(auxTo64F(c) / auxTo64F(d))) {
break
}
v.reset(OpConst64F) v.reset(OpConst64F)
v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d)) v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d))
return true return true
@ -9564,7 +9572,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true return true
} }
// match: (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) // match: (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _))
// cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))
// result: (Const64F [x]) // result: (Const64F [x])
for { for {
t1 := v.Type t1 := v.Type
@ -9580,7 +9588,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
break break
} }
x := v_1_1.AuxInt x := v_1_1.AuxInt
if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1)) { if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) {
break break
} }
v.reset(OpConst64F) v.reset(OpConst64F)
@ -9588,7 +9596,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true return true
} }
// match: (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) // match: (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _))
// cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))
// result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))]) // result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
for { for {
t1 := v.Type t1 := v.Type
@ -9604,7 +9612,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
break break
} }
x := v_1_1.AuxInt x := v_1_1.AuxInt
if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1)) { if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) {
break break
} }
v.reset(OpConst32F) v.reset(OpConst32F)
@ -13529,6 +13537,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (Mul32F (Const32F [c]) (Const32F [d])) // match: (Mul32F (Const32F [c]) (Const32F [d]))
// cond: !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))
// result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))]) // result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
for { for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -13540,6 +13549,9 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool {
continue continue
} }
d := v_1.AuxInt d := v_1.AuxInt
if !(!math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))) {
continue
}
v.reset(OpConst32F) v.reset(OpConst32F)
v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d)) v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d))
return true return true
@ -13779,6 +13791,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool {
v_1 := v.Args[1] v_1 := v.Args[1]
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (Mul64F (Const64F [c]) (Const64F [d])) // match: (Mul64F (Const64F [c]) (Const64F [d]))
// cond: !math.IsNaN(auxTo64F(c) * auxTo64F(d))
// result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))]) // result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
for { for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -13790,6 +13803,9 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool {
continue continue
} }
d := v_1.AuxInt d := v_1.AuxInt
if !(!math.IsNaN(auxTo64F(c) * auxTo64F(d))) {
continue
}
v.reset(OpConst64F) v.reset(OpConst64F)
v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d)) v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d))
return true return true
@ -19663,12 +19679,16 @@ func rewriteValuegeneric_OpSlicemask(v *Value) bool {
func rewriteValuegeneric_OpSqrt(v *Value) bool { func rewriteValuegeneric_OpSqrt(v *Value) bool {
v_0 := v.Args[0] v_0 := v.Args[0]
// match: (Sqrt (Const64F [c])) // match: (Sqrt (Const64F [c]))
// cond: !math.IsNaN(math.Sqrt(auxTo64F(c)))
// result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))]) // result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
for { for {
if v_0.Op != OpConst64F { if v_0.Op != OpConst64F {
break break
} }
c := v_0.AuxInt c := v_0.AuxInt
if !(!math.IsNaN(math.Sqrt(auxTo64F(c)))) {
break
}
v.reset(OpConst64F) v.reset(OpConst64F)
v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c))) v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c)))
return true return true

View file

@ -151,13 +151,13 @@ func toFloat32(u32 uint32) float32 {
func constantCheck64() bool { func constantCheck64() bool {
// amd64:"MOVB\t[$]0",-"FCMP",-"MOVB\t[$]1" // amd64:"MOVB\t[$]0",-"FCMP",-"MOVB\t[$]1"
// s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1," // s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1,"
return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63)) || math.NaN() == math.NaN() return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63))
} }
func constantCheck32() bool { func constantCheck32() bool {
// amd64:"MOVB\t[$]1",-"FCMP",-"MOVB\t[$]0" // amd64:"MOVB\t[$]1",-"FCMP",-"MOVB\t[$]0"
// s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0," // s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0,"
return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31)) && float32(math.NaN()) != float32(math.NaN()) return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31))
} }
// Test that integer constants are converted to floating point constants // Test that integer constants are converted to floating point constants
@ -186,3 +186,32 @@ func constantConvertInt32(x uint32) uint32 {
} }
return x return x
} }
func nanGenerate64() float64 {
// Test to make sure we don't generate a NaN while constant propagating.
// See issue 36400.
zero := 0.0
// amd64:-"DIVSD"
inf := 1 / zero // +inf. We can constant propagate this one.
negone := -1.0
// amd64:"DIVSD"
z0 := zero / zero
// amd64:"MULSD"
z1 := zero * inf
// amd64:"SQRTSD"
z2 := math.Sqrt(negone)
return z0 + z1 + z2
}
func nanGenerate32() float32 {
zero := float32(0.0)
// amd64:-"DIVSS"
inf := 1 / zero // +inf. We can constant propagate this one.
// amd64:"DIVSS"
z0 := zero / zero
// amd64:"MULSS"
z1 := zero * inf
return z0 + z1
}