mirror of
https://github.com/golang/go.git
synced 2025-12-08 06:10:04 +00:00
cmd/compile: don't depend on arch-dependent conversions in the compiler
Leave those constant foldings for runtime, similar to how we do it for NaN generation. These are the only instances I could find in cmd/compile/..., using objdump -d ../pkg/tool/darwin_arm64/compile| egrep "(fcvtz|>:)" | grep -B1 fcvt (There are instances in other places, like runtime and reflect, but I don't think those places would affect compiler output.) Change-Id: I4113fe4570115e4765825cf442cb1fde97cf2f27 Reviewed-on: https://go-review.googlesource.com/c/go/+/711281 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: David Chase <drchase@google.com> Reviewed-by: Keith Randall <khr@google.com>
This commit is contained in:
parent
0e64ee1286
commit
9b8742f2e7
3 changed files with 65 additions and 4 deletions
|
|
@ -50,10 +50,10 @@
|
|||
(Cvt32to64F (Const32 [c])) => (Const64F [float64(c)])
|
||||
(Cvt64to32F (Const64 [c])) => (Const32F [float32(c)])
|
||||
(Cvt64to64F (Const64 [c])) => (Const64F [float64(c)])
|
||||
(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)])
|
||||
(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)])
|
||||
(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)])
|
||||
(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)])
|
||||
(Cvt32Fto32 (Const32F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)])
|
||||
(Cvt32Fto64 (Const32F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)])
|
||||
(Cvt64Fto32 (Const64F [c])) && c >= -1<<31 && c < 1<<31 => (Const32 [int32(c)])
|
||||
(Cvt64Fto64 (Const64F [c])) && c >= -1<<63 && c < 1<<63 => (Const64 [int64(c)])
|
||||
(Round32F x:(Const32F)) => x
|
||||
(Round64F x:(Const64F)) => x
|
||||
(CvtBoolToUint8 (ConstBool [false])) => (Const8 [0])
|
||||
|
|
|
|||
|
|
@ -6607,12 +6607,16 @@ func rewriteValuegeneric_OpCtz8(v *Value) bool {
|
|||
func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (Cvt32Fto32 (Const32F [c]))
|
||||
// cond: c >= -1<<31 && c < 1<<31
|
||||
// result: (Const32 [int32(c)])
|
||||
for {
|
||||
if v_0.Op != OpConst32F {
|
||||
break
|
||||
}
|
||||
c := auxIntToFloat32(v_0.AuxInt)
|
||||
if !(c >= -1<<31 && c < 1<<31) {
|
||||
break
|
||||
}
|
||||
v.reset(OpConst32)
|
||||
v.AuxInt = int32ToAuxInt(int32(c))
|
||||
return true
|
||||
|
|
@ -6622,12 +6626,16 @@ func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool {
|
|||
func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (Cvt32Fto64 (Const32F [c]))
|
||||
// cond: c >= -1<<63 && c < 1<<63
|
||||
// result: (Const64 [int64(c)])
|
||||
for {
|
||||
if v_0.Op != OpConst32F {
|
||||
break
|
||||
}
|
||||
c := auxIntToFloat32(v_0.AuxInt)
|
||||
if !(c >= -1<<63 && c < 1<<63) {
|
||||
break
|
||||
}
|
||||
v.reset(OpConst64)
|
||||
v.AuxInt = int64ToAuxInt(int64(c))
|
||||
return true
|
||||
|
|
@ -6682,12 +6690,16 @@ func rewriteValuegeneric_OpCvt32to64F(v *Value) bool {
|
|||
func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (Cvt64Fto32 (Const64F [c]))
|
||||
// cond: c >= -1<<31 && c < 1<<31
|
||||
// result: (Const32 [int32(c)])
|
||||
for {
|
||||
if v_0.Op != OpConst64F {
|
||||
break
|
||||
}
|
||||
c := auxIntToFloat64(v_0.AuxInt)
|
||||
if !(c >= -1<<31 && c < 1<<31) {
|
||||
break
|
||||
}
|
||||
v.reset(OpConst32)
|
||||
v.AuxInt = int32ToAuxInt(int32(c))
|
||||
return true
|
||||
|
|
@ -6732,12 +6744,16 @@ func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool {
|
|||
func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool {
|
||||
v_0 := v.Args[0]
|
||||
// match: (Cvt64Fto64 (Const64F [c]))
|
||||
// cond: c >= -1<<63 && c < 1<<63
|
||||
// result: (Const64 [int64(c)])
|
||||
for {
|
||||
if v_0.Op != OpConst64F {
|
||||
break
|
||||
}
|
||||
c := auxIntToFloat64(v_0.AuxInt)
|
||||
if !(c >= -1<<63 && c < 1<<63) {
|
||||
break
|
||||
}
|
||||
v.reset(OpConst64)
|
||||
v.AuxInt = int64ToAuxInt(int64(c))
|
||||
return true
|
||||
|
|
|
|||
|
|
@ -330,3 +330,48 @@ func nanGenerate32() float32 {
|
|||
// amd64/v3:"VFMADD231SS"
|
||||
return z0 + z1
|
||||
}
|
||||
|
||||
func outOfBoundsConv(i32 *[2]int32, u32 *[2]uint32, i64 *[2]int64, u64 *[2]uint64) {
|
||||
// arm64: "FCVTZSDW"
|
||||
// amd64: "CVTTSD2SL", "CVTSD2SS"
|
||||
i32[0] = int32(two40())
|
||||
// arm64: "FCVTZSDW"
|
||||
// amd64: "CVTTSD2SL", "CVTSD2SS"
|
||||
i32[1] = int32(-two40())
|
||||
// arm64: "FCVTZSDW"
|
||||
// amd64: "CVTTSD2SL", "CVTSD2SS"
|
||||
u32[0] = uint32(two41())
|
||||
// on arm64, this uses an explicit <0 comparison, so it constant folds.
|
||||
// on amd64, this uses an explicit <0 comparison, so it constant folds.
|
||||
// amd64: "MOVL\t[$]0,"
|
||||
u32[1] = uint32(minus1())
|
||||
// arm64: "FCVTZSD"
|
||||
// amd64: "CVTTSD2SQ"
|
||||
i64[0] = int64(two80())
|
||||
// arm64: "FCVTZSD"
|
||||
// amd64: "CVTTSD2SQ"
|
||||
i64[1] = int64(-two80())
|
||||
// arm64: "FCVTZUD"
|
||||
// amd64: "CVTTSD2SQ"
|
||||
u64[0] = uint64(two81())
|
||||
// arm64: "FCVTZUD"
|
||||
// on amd64, this uses an explicit <0 comparison, so it constant folds.
|
||||
// amd64: "MOVQ\t[$]0,"
|
||||
u64[1] = uint64(minus1())
|
||||
}
|
||||
|
||||
func two40() float64 {
|
||||
return 1 << 40
|
||||
}
|
||||
func two41() float64 {
|
||||
return 1 << 41
|
||||
}
|
||||
func two80() float64 {
|
||||
return 1 << 80
|
||||
}
|
||||
func two81() float64 {
|
||||
return 1 << 81
|
||||
}
|
||||
func minus1() float64 {
|
||||
return -1
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue