cmd/compile: don't allow NaNs in floating-point constant ops

We store 32-bit floating point constants in a 64-bit field, by
converting that 32-bit float to 64-bit float to store it, and convert
it back to use it.

That works for *almost* all floating-point constants. The exception is
signaling NaNs. The round trip described above means we can't represent
a 32-bit signaling NaN, because conversions strip the signaling bit.

To fix this issue, just forbid NaNs as floating-point constants in SSA
form. This shouldn't affect any real-world code, as people seldom
constant-propagate NaNs (except in test code).

Additionally, NaNs are somewhat underspecified (which of the many NaNs
do you get when dividing 0/0?), so when cross-compiling there's a
danger of using the compiler machine's NaN regime for some math, and
the target machine's NaN regime for other math. Better to use the
target machine's NaN regime always.

This has been a bug since 1.10, and there's an easy workaround
(declare a global varaible containing the signaling NaN pattern, and
use that as the argument to math.Float32frombits) so we'll fix it in
1.15.

Fixes #36400
Update #36399

Change-Id: Icf155e743281560eda2eed953d19a829552ccfda
Reviewed-on: https://go-review.googlesource.com/c/go/+/213477
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
This commit is contained in:
Keith Randall 2020-01-06 11:23:08 -08:00
parent 0fb1a49c1a
commit 2aa7c6c548
11 changed files with 200 additions and 26 deletions

View file

@ -3579,6 +3579,7 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
// match: (Div32F (Const32F [c]) (Const32F [d]))
// cond: !math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))
// result: (Const32F [auxFrom32F(auxTo32F(c) / auxTo32F(d))])
for {
if v_0.Op != OpConst32F {
@ -3589,6 +3590,9 @@ func rewriteValuegeneric_OpDiv32F(v *Value) bool {
break
}
d := v_1.AuxInt
if !(!math.IsNaN(float64(auxTo32F(c) / auxTo32F(d)))) {
break
}
v.reset(OpConst32F)
v.AuxInt = auxFrom32F(auxTo32F(c) / auxTo32F(d))
return true
@ -4052,6 +4056,7 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
// match: (Div64F (Const64F [c]) (Const64F [d]))
// cond: !math.IsNaN(auxTo64F(c) / auxTo64F(d))
// result: (Const64F [auxFrom64F(auxTo64F(c) / auxTo64F(d))])
for {
if v_0.Op != OpConst64F {
@ -4062,6 +4067,9 @@ func rewriteValuegeneric_OpDiv64F(v *Value) bool {
break
}
d := v_1.AuxInt
if !(!math.IsNaN(auxTo64F(c) / auxTo64F(d))) {
break
}
v.reset(OpConst64F)
v.AuxInt = auxFrom64F(auxTo64F(c) / auxTo64F(d))
return true
@ -9564,7 +9572,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _))
// cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1)
// cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))
// result: (Const64F [x])
for {
t1 := v.Type
@ -9580,7 +9588,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
break
}
x := v_1_1.AuxInt
if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1)) {
if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) {
break
}
v.reset(OpConst64F)
@ -9588,7 +9596,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
return true
}
// match: (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _))
// cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1)
// cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))
// result: (Const32F [auxFrom32F(math.Float32frombits(uint32(x)))])
for {
t1 := v.Type
@ -9604,7 +9612,7 @@ func rewriteValuegeneric_OpLoad(v *Value) bool {
break
}
x := v_1_1.AuxInt
if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1)) {
if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) {
break
}
v.reset(OpConst32F)
@ -13529,6 +13537,7 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Mul32F (Const32F [c]) (Const32F [d]))
// cond: !math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))
// result: (Const32F [auxFrom32F(auxTo32F(c) * auxTo32F(d))])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -13540,6 +13549,9 @@ func rewriteValuegeneric_OpMul32F(v *Value) bool {
continue
}
d := v_1.AuxInt
if !(!math.IsNaN(float64(auxTo32F(c) * auxTo32F(d)))) {
continue
}
v.reset(OpConst32F)
v.AuxInt = auxFrom32F(auxTo32F(c) * auxTo32F(d))
return true
@ -13779,6 +13791,7 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Mul64F (Const64F [c]) (Const64F [d]))
// cond: !math.IsNaN(auxTo64F(c) * auxTo64F(d))
// result: (Const64F [auxFrom64F(auxTo64F(c) * auxTo64F(d))])
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
@ -13790,6 +13803,9 @@ func rewriteValuegeneric_OpMul64F(v *Value) bool {
continue
}
d := v_1.AuxInt
if !(!math.IsNaN(auxTo64F(c) * auxTo64F(d))) {
continue
}
v.reset(OpConst64F)
v.AuxInt = auxFrom64F(auxTo64F(c) * auxTo64F(d))
return true
@ -19663,12 +19679,16 @@ func rewriteValuegeneric_OpSlicemask(v *Value) bool {
func rewriteValuegeneric_OpSqrt(v *Value) bool {
v_0 := v.Args[0]
// match: (Sqrt (Const64F [c]))
// cond: !math.IsNaN(math.Sqrt(auxTo64F(c)))
// result: (Const64F [auxFrom64F(math.Sqrt(auxTo64F(c)))])
for {
if v_0.Op != OpConst64F {
break
}
c := v_0.AuxInt
if !(!math.IsNaN(math.Sqrt(auxTo64F(c)))) {
break
}
v.reset(OpConst64F)
v.AuxInt = auxFrom64F(math.Sqrt(auxTo64F(c)))
return true